Files
clang-p2996/clang/test/OpenMP/distribute_simd_codegen.cpp
dhruvachak b5d02bbd0d [OpenMP] Increment kernel args version, used by runtime for detecting dyn_ptr. (#85363)
A kernel implicit parameter (dyn_ptr) was introduced some time back.
This patch increments the kernel args version for a compiler supporting
dyn_ptr. The version will be used by the runtime to determine whether
the implicit parameter is generated by the compiler. The versioning is
required to support use cases where code generated by an older compiler
is linked with a newer runtime.

If approved, this patch should be backported to release 18.
2024-03-19 16:40:22 -07:00

7900 lines
545 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test host codegen.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - -DOMP5 | FileCheck %s --check-prefix=CHECK5
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s -DOMP5
// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - -DOMP5 | FileCheck %s --check-prefix=CHECK5
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - -DOMP5| FileCheck %s --check-prefix=CHECK7
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s -DOMP5
// RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - -DOMP5 | FileCheck %s --check-prefix=CHECK7
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - -DOMP5 | FileCheck %s --check-prefix=CHECK13
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s -DOMP5
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - -DOMP5 | FileCheck %s --check-prefix=CHECK13
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - -DOMP5 | FileCheck %s --check-prefix=CHECK15
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s -DOMP5
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - -DOMP5 | FileCheck %s --check-prefix=CHECK15
// Test target codegen - host bc file has to be created first. (no significant differences with host version of target region)
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK17
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK17
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK19
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK19
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc -DOMP5
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -DOMP5 | FileCheck %s --check-prefix=CHECK21
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s -DOMP5
// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - -DOMP5 | FileCheck %s --check-prefix=CHECK21
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc -DOMP5
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -DOMP5 | FileCheck %s --check-prefix=CHECK23
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s -DOMP5
// RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - -DOMP5 | FileCheck %s --check-prefix=CHECK23
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK9
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK11
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc -DOMP5
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -DOMP5 | FileCheck %s --check-prefix=CHECK13
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s -DOMP5
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - -DOMP5 | FileCheck %s --check-prefix=CHECK13
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc -DOMP5
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -DOMP5 | FileCheck %s --check-prefix=CHECK15
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s -DOMP5
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - -DOMP5 | FileCheck %s --check-prefix=CHECK15
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
void without_schedule_clause(float *a, float *b, float *c, float *d) {
#pragma omp target
#pragma omp teams
#ifdef OMP5
#pragma omp distribute simd simdlen(8) aligned(a) if(true)
#else
#pragma omp distribute simd simdlen(8) aligned(a)
#endif // OMP5
for (int i = 33; i < 32000000; i += 7) {
a[i] = b[i] * c[i] * d[i];
}
}
// ... loop body ...
void static_not_chunked(float *a, float *b, float *c, float *d) {
#pragma omp target
#pragma omp teams
#ifdef OMP5
#pragma omp distribute simd dist_schedule(static) safelen(32) if(simd: true) nontemporal(a, b)
#else
#pragma omp distribute simd dist_schedule(static) safelen(32)
#endif // OMP5
for (int i = 32000000; i > 33; i += -7) {
a[i] = b[i] * c[i] * d[i];
}
}
// ... loop body ...
void static_chunked(float *a, float *b, float *c, float *d) {
#pragma omp target
#pragma omp teams
#pragma omp distribute simd dist_schedule(static, 5)
for (unsigned i = 131071; i <= 2147483647; i += 127) {
a[i] = b[i] * c[i] * d[i];
}
}
// ... loop body ...
void test_precond() {
char a = 0; char i;
#pragma omp target
#pragma omp teams
#ifdef OMP5
#pragma omp distribute simd linear(i) if(a) nontemporal(i)
#else
#pragma omp distribute simd linear(i)
#endif // OMP5
for(i = a; i < 10; ++i);
}
// a is passed as a parameter to the outlined functions
// ..many loads of %0..
// no templates for now, as these require special handling in target regions and/or declare target
template <typename T>
T ftemplate() {
short aa = 0;
#pragma omp target
#pragma omp teams
#pragma omp distribute simd dist_schedule(static, aa)
for (int i = 0; i < 100; i++) {
}
return T();
}
int fint(void) { return ftemplate<int>(); }
#endif
// CHECK1-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 4571424, ptr [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK1-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK1-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK1-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
// CHECK1-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i64 16) ]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM]]
// CHECK1-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP17]] to i64
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM2]]
// CHECK1-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[MUL4:%.*]] = fmul float [[TMP15]], [[TMP18]]
// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP20]] to i64
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[IDXPROM5]]
// CHECK1-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP21]]
// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP23]] to i64
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[IDXPROM8]]
// CHECK1-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK1-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK1: .omp.final.done:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP22]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP23]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 4571424, ptr [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK1-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK1-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK1-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK1-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]]
// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK1-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]]
// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
// CHECK1-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
// CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]]
// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4
// CHECK1-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4
// CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]]
// CHECK1-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK1-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 32, ptr [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK1: .omp.final.done:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP22]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP23]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 16908289, ptr [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK1-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK1-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK1-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103
// CHECK1-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5)
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]]
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
// CHECK1-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]]
// CHECK1-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]]
// CHECK1-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]]
// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]]
// CHECK1-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]]
// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]]
// CHECK1-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1
// CHECK1-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]]
// CHECK1-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]]
// CHECK1-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
// CHECK1-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK1: .omp.final.done:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z12test_precondv
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[I_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: store i8 0, ptr [[A]], align 1
// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1
// CHECK1-NEXT: store i8 [[TMP0]], ptr [[I_CASTED]], align 1
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[I_CASTED]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
// CHECK1-NEXT: store i8 [[TMP2]], ptr [[A_CASTED]], align 1
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP5]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP12:%.*]] = load i8, ptr [[A]], align 1
// CHECK1-NEXT: store i8 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK1-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[ADD4]] to i64
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 3, ptr [[TMP16]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 2, ptr [[TMP17]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP10]], ptr [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP11]], ptr [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store ptr @.offload_sizes.5, ptr [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP21]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 [[TMP15]], ptr [[TMP24]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK1-NEXT: store i64 0, ptr [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP27]], align 4
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP28]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK1-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115(i64 [[TMP1]], i64 [[TMP3]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115
// CHECK1-SAME: (i64 noundef [[I:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[I]], ptr [[I_ADDR]], align 8
// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined, ptr [[I_ADDR]], ptr [[A_ADDR]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[I:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK1-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK1-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1
// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]]
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK1-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP20]]
// CHECK1-NEXT: [[CONV9:%.*]] = sext i8 [[TMP16]] to i32
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK1-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK1-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP20]]
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK1-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]])
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK1-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP23]] to i32
// CHECK1-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[CONV14:%.*]] = sext i8 [[TMP24]] to i32
// CHECK1-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]]
// CHECK1-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1
// CHECK1-NEXT: [[ADD17:%.*]] = add i32 [[SUB16]], 1
// CHECK1-NEXT: [[DIV18:%.*]] = udiv i32 [[ADD17]], 1
// CHECK1-NEXT: [[MUL19:%.*]] = mul nsw i32 [[DIV18]], 1
// CHECK1-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV13]], [[MUL19]]
// CHECK1-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK1-NEXT: store i8 [[CONV21]], ptr [[TMP0]], align 1
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK1: .omp.final.done:
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z4fintv
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_v()
// CHECK1-NEXT: ret i32 [[CALL]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_v
// CHECK1-SAME: () #[[ATTR0]] comdat {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: store i16 0, ptr [[AA]], align 2
// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2
// CHECK1-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 3, ptr [[TMP7]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store ptr @.offload_sizes.7, ptr [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 100, ptr [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK1-NEXT: store i64 0, ptr [[TMP16]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP17]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP18]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP19]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK1-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135(i64 [[TMP1]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135
// CHECK1-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined, ptr [[AA_ADDR]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]])
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP23]]
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK1-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK1: .omp.final.done:
// CHECK1-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 4571424, ptr [[TMP26]], align 8
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK3-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK3-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK3-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK3-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3:[0-9]+]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4
// CHECK3-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i32 16) ]
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]]
// CHECK3-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP17]]
// CHECK3-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP15]], [[TMP18]]
// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i32 [[TMP20]]
// CHECK3-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP21]]
// CHECK3-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i32 [[TMP23]]
// CHECK3-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK3-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]])
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK3-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3: .omp.final.then:
// CHECK3-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK3: .omp.final.done:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store ptr @.offload_sizes.1, ptr [[TMP22]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP23]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 4571424, ptr [[TMP26]], align 8
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK3-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK3-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK3-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK3-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK3-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]]
// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]]
// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
// CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]]
// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
// CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]]
// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
// CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]]
// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]]
// CHECK3-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK3-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3: .omp.final.then:
// CHECK3-NEXT: store i32 32, ptr [[I]], align 4
// CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK3: .omp.final.done:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store ptr @.offload_sizes.3, ptr [[TMP22]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP23]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 16908289, ptr [[TMP26]], align 8
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK3-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK3-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK3-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK3-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103
// CHECK3-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5)
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
// CHECK3-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]]
// CHECK3-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]]
// CHECK3-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]]
// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]]
// CHECK3-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]]
// CHECK3-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]]
// CHECK3-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1
// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]]
// CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]]
// CHECK3-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
// CHECK3-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3: .omp.final.then:
// CHECK3-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK3: .omp.final.done:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z12test_precondv
// CHECK3-SAME: () #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[I_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: store i8 0, ptr [[A]], align 1
// CHECK3-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1
// CHECK3-NEXT: store i8 [[TMP0]], ptr [[I_CASTED]], align 1
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[I_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
// CHECK3-NEXT: store i8 [[TMP2]], ptr [[A_CASTED]], align 1
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[A]], align 1
// CHECK3-NEXT: store i8 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32
// CHECK3-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK3-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK3-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK3-NEXT: [[TMP15:%.*]] = zext i32 [[ADD4]] to i64
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 3, ptr [[TMP16]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 2, ptr [[TMP17]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP10]], ptr [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP11]], ptr [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store ptr @.offload_sizes.5, ptr [[TMP20]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP21]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 [[TMP15]], ptr [[TMP24]], align 8
// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK3-NEXT: store i64 0, ptr [[TMP25]], align 8
// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP27]], align 4
// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK3-NEXT: store i32 0, ptr [[TMP28]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK3-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115(i32 [[TMP1]], i32 [[TMP3]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115
// CHECK3-SAME: (i32 noundef [[I:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[I]], ptr [[I_ADDR]], align 4
// CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined, ptr [[I_ADDR]], ptr [[A_ADDR]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[I:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 4
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK3-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32
// CHECK3-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK3-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK3-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1
// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK3: omp.precond.then:
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK3-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP21]]
// CHECK3-NEXT: [[CONV9:%.*]] = sext i8 [[TMP16]] to i32
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK3-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK3-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP21]]
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK3-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]])
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK3-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3: .omp.final.then:
// CHECK3-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[CONV13:%.*]] = sext i8 [[TMP23]] to i32
// CHECK3-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[CONV14:%.*]] = sext i8 [[TMP24]] to i32
// CHECK3-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]]
// CHECK3-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1
// CHECK3-NEXT: [[ADD17:%.*]] = add i32 [[SUB16]], 1
// CHECK3-NEXT: [[DIV18:%.*]] = udiv i32 [[ADD17]], 1
// CHECK3-NEXT: [[MUL19:%.*]] = mul nsw i32 [[DIV18]], 1
// CHECK3-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV13]], [[MUL19]]
// CHECK3-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK3-NEXT: store i8 [[CONV21]], ptr [[TMP0]], align 1
// CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK3: .omp.final.done:
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: omp.precond.end:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z4fintv
// CHECK3-SAME: () #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_v()
// CHECK3-NEXT: ret i32 [[CALL]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_v
// CHECK3-SAME: () #[[ATTR0]] comdat {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: store i16 0, ptr [[AA]], align 2
// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2
// CHECK3-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 3, ptr [[TMP7]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store ptr @.offload_sizes.7, ptr [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 100, ptr [[TMP15]], align 8
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK3-NEXT: store i64 0, ptr [[TMP16]], align 8
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP17]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK3-NEXT: store i32 0, ptr [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK3-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135(i32 [[TMP1]]) #[[ATTR3]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret i32 0
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135
// CHECK3-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined, ptr [[AA_ADDR]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]])
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
// CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK3-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3: .omp.final.then:
// CHECK3-NEXT: store i32 100, ptr [[I]], align 4
// CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK3: .omp.final.done:
// CHECK3-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 8
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8
// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK5-NEXT: store ptr null, ptr [[TMP9]], align 8
// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 8
// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8
// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8
// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK5-NEXT: store ptr null, ptr [[TMP15]], align 8
// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK5-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK5-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8
// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8
// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 8
// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8
// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK5-NEXT: store ptr null, ptr [[TMP24]], align 8
// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK5-NEXT: store ptr null, ptr [[TMP25]], align 8
// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK5-NEXT: store i64 4571424, ptr [[TMP26]], align 8
// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK5-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK5-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK5-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]])
// CHECK5-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK5-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3:[0-9]+]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: omp_offload.cont:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
// CHECK5-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i64 16) ]
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]]
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM]]
// CHECK5-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP17]] to i64
// CHECK5-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM2]]
// CHECK5-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[MUL4:%.*]] = fmul float [[TMP15]], [[TMP18]]
// CHECK5-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP20]] to i64
// CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[IDXPROM5]]
// CHECK5-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP21]]
// CHECK5-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP23]] to i64
// CHECK5-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[IDXPROM8]]
// CHECK5-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK5-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]])
// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK5-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK5: .omp.final.then:
// CHECK5-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK5: .omp.final.done:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 8
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8
// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK5-NEXT: store ptr null, ptr [[TMP9]], align 8
// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 8
// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8
// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8
// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK5-NEXT: store ptr null, ptr [[TMP15]], align 8
// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK5-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK5-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8
// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8
// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK5-NEXT: store ptr @.offload_sizes.1, ptr [[TMP22]], align 8
// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK5-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP23]], align 8
// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK5-NEXT: store ptr null, ptr [[TMP24]], align 8
// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK5-NEXT: store ptr null, ptr [[TMP25]], align 8
// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK5-NEXT: store i64 4571424, ptr [[TMP26]], align 8
// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK5-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK5-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK5-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]])
// CHECK5-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK5-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: omp_offload.cont:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
// CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK5-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK5-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !nontemporal [[META15:![0-9]+]]
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]]
// CHECK5-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8
// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK5-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
// CHECK5-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]]
// CHECK5-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
// CHECK5-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
// CHECK5-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8
// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
// CHECK5-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
// CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]]
// CHECK5-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4
// CHECK5-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
// CHECK5-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !nontemporal [[META15]]
// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4
// CHECK5-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
// CHECK5-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]]
// CHECK5-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK5-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK5: .omp.final.then:
// CHECK5-NEXT: store i32 32, ptr [[I]], align 4
// CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK5: .omp.final.done:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 8
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8
// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK5-NEXT: store ptr null, ptr [[TMP9]], align 8
// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 8
// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8
// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8
// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK5-NEXT: store ptr null, ptr [[TMP15]], align 8
// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK5-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK5-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8
// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8
// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK5-NEXT: store ptr @.offload_sizes.3, ptr [[TMP22]], align 8
// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK5-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP23]], align 8
// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK5-NEXT: store ptr null, ptr [[TMP24]], align 8
// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK5-NEXT: store ptr null, ptr [[TMP25]], align 8
// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK5-NEXT: store i64 16908289, ptr [[TMP26]], align 8
// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK5-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK5-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK5-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]])
// CHECK5-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK5-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: omp_offload.cont:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103
// CHECK5-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5)
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK5: omp.dispatch.cond:
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK5: omp.dispatch.body:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
// CHECK5-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]]
// CHECK5-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64
// CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]]
// CHECK5-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]]
// CHECK5-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64
// CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]]
// CHECK5-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]]
// CHECK5-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64
// CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]]
// CHECK5-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1
// CHECK5-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK5: omp.dispatch.inc:
// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]]
// CHECK5-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]]
// CHECK5-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK5: omp.dispatch.end:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
// CHECK5-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK5: .omp.final.then:
// CHECK5-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK5: .omp.final.done:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z12test_precondv
// CHECK5-SAME: () #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK5-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK5-NEXT: [[I_CASTED:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8
// CHECK5-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK5-NEXT: store i8 0, ptr [[A]], align 1
// CHECK5-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1
// CHECK5-NEXT: store i8 [[TMP0]], ptr [[I_CASTED]], align 1
// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[I_CASTED]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
// CHECK5-NEXT: store i8 [[TMP2]], ptr [[A_CASTED]], align 1
// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP5]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8
// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK5-NEXT: store i64 [[TMP3]], ptr [[TMP7]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK5-NEXT: store i64 [[TMP3]], ptr [[TMP8]], align 8
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK5-NEXT: store ptr null, ptr [[TMP9]], align 8
// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP12:%.*]] = load i8, ptr [[A]], align 1
// CHECK5-NEXT: store i8 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK5-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK5-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32
// CHECK5-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK5-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK5-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK5-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK5-NEXT: [[TMP15:%.*]] = zext i32 [[ADD4]] to i64
// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK5-NEXT: store i32 3, ptr [[TMP16]], align 4
// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK5-NEXT: store i32 2, ptr [[TMP17]], align 4
// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP10]], ptr [[TMP18]], align 8
// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP11]], ptr [[TMP19]], align 8
// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK5-NEXT: store ptr @.offload_sizes.5, ptr [[TMP20]], align 8
// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK5-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP21]], align 8
// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8
// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK5-NEXT: store ptr null, ptr [[TMP23]], align 8
// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK5-NEXT: store i64 [[TMP15]], ptr [[TMP24]], align 8
// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK5-NEXT: store i64 0, ptr [[TMP25]], align 8
// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK5-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP27]], align 4
// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP28]], align 4
// CHECK5-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]])
// CHECK5-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK5-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115(i64 [[TMP1]], i64 [[TMP3]]) #[[ATTR3]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: omp_offload.cont:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115
// CHECK5-SAME: (i64 noundef [[I:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i64 [[I]], ptr [[I_ADDR]], align 8
// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined, ptr [[I_ADDR]], ptr [[A_ADDR]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[I:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK5-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK5-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK5-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32
// CHECK5-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK5-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK5-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK5-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK5-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1
// CHECK5-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK5-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32
// CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK5-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK5: omp.precond.then:
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK5-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP14]], 0
// CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK5: omp_if.then:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK5-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK5-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP21]]
// CHECK5-NEXT: [[CONV9:%.*]] = sext i8 [[TMP17]] to i32
// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK5-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK5-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK5-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal [[META15]], !llvm.access.group [[ACC_GRP21]]
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK5-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK5-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK5: omp_if.else:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]]
// CHECK5: omp.inner.for.cond13:
// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
// CHECK5-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]]
// CHECK5: omp.inner.for.body15:
// CHECK5-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK5-NEXT: [[CONV16:%.*]] = sext i8 [[TMP22]] to i32
// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP23]], 1
// CHECK5-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]]
// CHECK5-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
// CHECK5-NEXT: store i8 [[CONV19]], ptr [[I6]], align 1
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE20:%.*]]
// CHECK5: omp.body.continue20:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]]
// CHECK5: omp.inner.for.inc21:
// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK5-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP24:![0-9]+]]
// CHECK5: omp.inner.for.end23:
// CHECK5-NEXT: br label [[OMP_IF_END]]
// CHECK5: omp_if.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]])
// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
// CHECK5-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK5: .omp.final.then:
// CHECK5-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK5-NEXT: [[CONV24:%.*]] = sext i8 [[TMP29]] to i32
// CHECK5-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK5-NEXT: [[CONV25:%.*]] = sext i8 [[TMP30]] to i32
// CHECK5-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]]
// CHECK5-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1
// CHECK5-NEXT: [[ADD28:%.*]] = add i32 [[SUB27]], 1
// CHECK5-NEXT: [[DIV29:%.*]] = udiv i32 [[ADD28]], 1
// CHECK5-NEXT: [[MUL30:%.*]] = mul nsw i32 [[DIV29]], 1
// CHECK5-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV24]], [[MUL30]]
// CHECK5-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8
// CHECK5-NEXT: store i8 [[CONV32]], ptr [[TMP0]], align 1
// CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK5: .omp.final.done:
// CHECK5-NEXT: br label [[OMP_PRECOND_END]]
// CHECK5: omp.precond.end:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z4fintv
// CHECK5-SAME: () #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_v()
// CHECK5-NEXT: ret i32 [[CALL]]
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_v
// CHECK5-SAME: () #[[ATTR0]] comdat {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK5-NEXT: store i16 0, ptr [[AA]], align 2
// CHECK5-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2
// CHECK5-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2
// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK5-NEXT: store i32 3, ptr [[TMP7]], align 4
// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK5-NEXT: store i32 1, ptr [[TMP8]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK5-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8
// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK5-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK5-NEXT: store ptr @.offload_sizes.7, ptr [[TMP11]], align 8
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK5-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP12]], align 8
// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK5-NEXT: store ptr null, ptr [[TMP13]], align 8
// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK5-NEXT: store ptr null, ptr [[TMP14]], align 8
// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK5-NEXT: store i64 100, ptr [[TMP15]], align 8
// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK5-NEXT: store i64 0, ptr [[TMP16]], align 8
// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP17]], align 4
// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK5-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP18]], align 4
// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP19]], align 4
// CHECK5-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]])
// CHECK5-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK5-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135(i64 [[TMP1]]) #[[ATTR3]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: omp_offload.cont:
// CHECK5-NEXT: ret i32 0
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135
// CHECK5-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined, ptr [[AA_ADDR]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]])
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK5: omp.dispatch.cond:
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK5: omp.dispatch.body:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]]
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK5: omp.dispatch.inc:
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK5-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
// CHECK5-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK5: omp.dispatch.end:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK5-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK5: .omp.final.then:
// CHECK5-NEXT: store i32 100, ptr [[I]], align 4
// CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK5: .omp.final.done:
// CHECK5-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
// CHECK7-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK7-NEXT: store ptr null, ptr [[TMP6]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK7-NEXT: store ptr null, ptr [[TMP9]], align 4
// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4
// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK7-NEXT: store ptr null, ptr [[TMP12]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4
// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK7-NEXT: store ptr null, ptr [[TMP15]], align 4
// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK7-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK7-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4
// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4
// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK7-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 4
// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK7-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4
// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK7-NEXT: store ptr null, ptr [[TMP24]], align 4
// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK7-NEXT: store ptr null, ptr [[TMP25]], align 4
// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK7-NEXT: store i64 4571424, ptr [[TMP26]], align 8
// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK7-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK7-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK7-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK7-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK7-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]])
// CHECK7-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK7-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK7: omp_offload.failed:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3:[0-9]+]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK7: omp_offload.cont:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70
// CHECK7-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined
// CHECK7-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4
// CHECK7-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i32 16) ]
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]]
// CHECK7-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP17]]
// CHECK7-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[MUL3:%.*]] = fmul float [[TMP15]], [[TMP18]]
// CHECK7-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i32 [[TMP20]]
// CHECK7-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP21]]
// CHECK7-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i32 [[TMP23]]
// CHECK7-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK7-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]])
// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK7-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK7: .omp.final.then:
// CHECK7-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK7: .omp.final.done:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK7-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK7-NEXT: store ptr null, ptr [[TMP6]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK7-NEXT: store ptr null, ptr [[TMP9]], align 4
// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4
// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK7-NEXT: store ptr null, ptr [[TMP12]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4
// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK7-NEXT: store ptr null, ptr [[TMP15]], align 4
// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK7-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK7-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4
// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4
// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK7-NEXT: store ptr @.offload_sizes.1, ptr [[TMP22]], align 4
// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK7-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP23]], align 4
// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK7-NEXT: store ptr null, ptr [[TMP24]], align 4
// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK7-NEXT: store ptr null, ptr [[TMP25]], align 4
// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK7-NEXT: store i64 4571424, ptr [[TMP26]], align 8
// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK7-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK7-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK7-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK7-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK7-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]])
// CHECK7-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK7-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK7: omp_offload.failed:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK7: omp_offload.cont:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86
// CHECK7-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined
// CHECK7-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
// CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK7-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK7-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !nontemporal [[META16:![0-9]+]]
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]]
// CHECK7-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK7-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4
// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK7-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]]
// CHECK7-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
// CHECK7-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]]
// CHECK7-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4
// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
// CHECK7-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]]
// CHECK7-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
// CHECK7-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]]
// CHECK7-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !nontemporal [[META16]]
// CHECK7-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4
// CHECK7-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]]
// CHECK7-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK7-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK7-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK7: .omp.final.then:
// CHECK7-NEXT: store i32 32, ptr [[I]], align 4
// CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK7: .omp.final.done:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
// CHECK7-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK7-NEXT: store ptr null, ptr [[TMP6]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK7-NEXT: store ptr null, ptr [[TMP9]], align 4
// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4
// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK7-NEXT: store ptr null, ptr [[TMP12]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4
// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
// CHECK7-NEXT: store ptr null, ptr [[TMP15]], align 4
// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK7-NEXT: store i32 3, ptr [[TMP18]], align 4
// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK7-NEXT: store i32 4, ptr [[TMP19]], align 4
// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4
// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4
// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK7-NEXT: store ptr @.offload_sizes.3, ptr [[TMP22]], align 4
// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK7-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP23]], align 4
// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK7-NEXT: store ptr null, ptr [[TMP24]], align 4
// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK7-NEXT: store ptr null, ptr [[TMP25]], align 4
// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK7-NEXT: store i64 16908289, ptr [[TMP26]], align 8
// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK7-NEXT: store i64 0, ptr [[TMP27]], align 8
// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK7-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP28]], align 4
// CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK7-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP29]], align 4
// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK7-NEXT: store i32 0, ptr [[TMP30]], align 4
// CHECK7-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]])
// CHECK7-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK7-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK7: omp_offload.failed:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK7: omp_offload.cont:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103
// CHECK7-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined
// CHECK7-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5)
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK7: omp.dispatch.cond:
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK7: omp.dispatch.body:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]]
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
// CHECK7-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
// CHECK7-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]]
// CHECK7-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]]
// CHECK7-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]]
// CHECK7-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]]
// CHECK7-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]]
// CHECK7-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]]
// CHECK7-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1
// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK7: omp.dispatch.inc:
// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]]
// CHECK7-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]]
// CHECK7-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK7: omp.dispatch.end:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
// CHECK7-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK7: .omp.final.then:
// CHECK7-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK7: .omp.final.done:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z12test_precondv
// CHECK7-SAME: () #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK7-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK7-NEXT: [[I_CASTED:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK7-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK7-NEXT: store i8 0, ptr [[A]], align 1
// CHECK7-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1
// CHECK7-NEXT: store i8 [[TMP0]], ptr [[I_CASTED]], align 1
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[I_CASTED]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
// CHECK7-NEXT: store i8 [[TMP2]], ptr [[A_CASTED]], align 1
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK7-NEXT: store ptr null, ptr [[TMP6]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK7-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK7-NEXT: store i32 [[TMP3]], ptr [[TMP8]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK7-NEXT: store ptr null, ptr [[TMP9]], align 4
// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP12:%.*]] = load i8, ptr [[A]], align 1
// CHECK7-NEXT: store i8 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK7-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK7-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32
// CHECK7-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK7-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK7-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK7-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK7-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK7-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK7-NEXT: [[TMP15:%.*]] = zext i32 [[ADD4]] to i64
// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK7-NEXT: store i32 3, ptr [[TMP16]], align 4
// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK7-NEXT: store i32 2, ptr [[TMP17]], align 4
// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP10]], ptr [[TMP18]], align 4
// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP11]], ptr [[TMP19]], align 4
// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK7-NEXT: store ptr @.offload_sizes.5, ptr [[TMP20]], align 4
// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK7-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP21]], align 4
// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK7-NEXT: store ptr null, ptr [[TMP22]], align 4
// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK7-NEXT: store ptr null, ptr [[TMP23]], align 4
// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK7-NEXT: store i64 [[TMP15]], ptr [[TMP24]], align 8
// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK7-NEXT: store i64 0, ptr [[TMP25]], align 8
// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK7-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK7-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP27]], align 4
// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK7-NEXT: store i32 0, ptr [[TMP28]], align 4
// CHECK7-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]])
// CHECK7-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
// CHECK7-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK7: omp_offload.failed:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115(i32 [[TMP1]], i32 [[TMP3]]) #[[ATTR3]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK7: omp_offload.cont:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115
// CHECK7-SAME: (i32 noundef [[I:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32 [[I]], ptr [[I_ADDR]], align 4
// CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined, ptr [[I_ADDR]], ptr [[A_ADDR]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined
// CHECK7-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[I:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK7-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK7-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 4
// CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK7-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK7-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK7-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32
// CHECK7-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK7-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK7-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK7-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK7-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK7-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK7-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1
// CHECK7-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK7-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32
// CHECK7-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK7-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK7: omp.precond.then:
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK7-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK7-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK7-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP14]], 0
// CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK7: omp_if.then:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK7-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK7-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP22]]
// CHECK7-NEXT: [[CONV9:%.*]] = sext i8 [[TMP17]] to i32
// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK7-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK7-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK7-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal [[META16]], !llvm.access.group [[ACC_GRP22]]
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK7-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK7-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK7: omp_if.else:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]]
// CHECK7: omp.inner.for.cond13:
// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
// CHECK7-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]]
// CHECK7: omp.inner.for.body15:
// CHECK7-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK7-NEXT: [[CONV16:%.*]] = sext i8 [[TMP22]] to i32
// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP23]], 1
// CHECK7-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]]
// CHECK7-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
// CHECK7-NEXT: store i8 [[CONV19]], ptr [[I6]], align 1
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE20:%.*]]
// CHECK7: omp.body.continue20:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]]
// CHECK7: omp.inner.for.inc21:
// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK7-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK7: omp.inner.for.end23:
// CHECK7-NEXT: br label [[OMP_IF_END]]
// CHECK7: omp_if.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]])
// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
// CHECK7-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK7: .omp.final.then:
// CHECK7-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK7-NEXT: [[CONV24:%.*]] = sext i8 [[TMP29]] to i32
// CHECK7-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK7-NEXT: [[CONV25:%.*]] = sext i8 [[TMP30]] to i32
// CHECK7-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]]
// CHECK7-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1
// CHECK7-NEXT: [[ADD28:%.*]] = add i32 [[SUB27]], 1
// CHECK7-NEXT: [[DIV29:%.*]] = udiv i32 [[ADD28]], 1
// CHECK7-NEXT: [[MUL30:%.*]] = mul nsw i32 [[DIV29]], 1
// CHECK7-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV24]], [[MUL30]]
// CHECK7-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8
// CHECK7-NEXT: store i8 [[CONV32]], ptr [[TMP0]], align 1
// CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK7: .omp.final.done:
// CHECK7-NEXT: br label [[OMP_PRECOND_END]]
// CHECK7: omp.precond.end:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z4fintv
// CHECK7-SAME: () #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_v()
// CHECK7-NEXT: ret i32 [[CALL]]
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_v
// CHECK7-SAME: () #[[ATTR0]] comdat {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK7-NEXT: store i16 0, ptr [[AA]], align 2
// CHECK7-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2
// CHECK7-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK7-NEXT: store ptr null, ptr [[TMP4]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK7-NEXT: store i32 3, ptr [[TMP7]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK7-NEXT: store i32 1, ptr [[TMP8]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK7-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4
// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK7-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK7-NEXT: store ptr @.offload_sizes.7, ptr [[TMP11]], align 4
// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK7-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP12]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK7-NEXT: store ptr null, ptr [[TMP13]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK7-NEXT: store ptr null, ptr [[TMP14]], align 4
// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK7-NEXT: store i64 100, ptr [[TMP15]], align 8
// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK7-NEXT: store i64 0, ptr [[TMP16]], align 8
// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK7-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP17]], align 4
// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK7-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP18]], align 4
// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK7-NEXT: store i32 0, ptr [[TMP19]], align 4
// CHECK7-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]])
// CHECK7-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK7-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK7: omp_offload.failed:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135(i32 [[TMP1]]) #[[ATTR3]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK7: omp_offload.cont:
// CHECK7-NEXT: ret i32 0
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135
// CHECK7-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
// CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined, ptr [[AA_ADDR]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined
// CHECK7-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]])
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK7: omp.dispatch.cond:
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK7: omp.dispatch.body:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK7-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK7-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK7: omp.dispatch.inc:
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK7-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
// CHECK7-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK7: omp.dispatch.end:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK7-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK7: .omp.final.then:
// CHECK7-NEXT: store i32 100, ptr [[I]], align 4
// CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK7: .omp.final.done:
// CHECK7-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
// CHECK9-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK9-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i64 16) ]
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK9: omp.inner.for.cond:
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
// CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK9: omp.inner.for.body:
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64
// CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[IDXPROM]]
// CHECK9-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP9]] to i64
// CHECK9-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM1]]
// CHECK9-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[MUL3:%.*]] = fmul float [[TMP7]], [[TMP10]]
// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP12]] to i64
// CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM4]]
// CHECK9-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP13]]
// CHECK9-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP15]] to i64
// CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM7]]
// CHECK9-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK9: omp.body.continue:
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
// CHECK9: omp.inner.for.end:
// CHECK9-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK9-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK9: omp.inner.for.cond:
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
// CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK9: omp.inner.for.body:
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 7
// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK9-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4
// CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64
// CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM]]
// CHECK9-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4
// CHECK9-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP8]] to i64
// CHECK9-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM1]]
// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
// CHECK9-NEXT: [[MUL3:%.*]] = fmul float [[TMP6]], [[TMP9]]
// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
// CHECK9-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64
// CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM4]]
// CHECK9-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
// CHECK9-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP12]]
// CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
// CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP14]] to i64
// CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM7]]
// CHECK9-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK9: omp.body.continue:
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK9: omp.inner.for.end:
// CHECK9-NEXT: store i32 32, ptr [[I]], align 4
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
// CHECK9-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK9: omp.inner.for.cond:
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]]
// CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK9: omp.inner.for.body:
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[MUL:%.*]] = mul i32 [[TMP3]], 127
// CHECK9-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP5]] to i64
// CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM]]
// CHECK9-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP8]] to i64
// CHECK9-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM1]]
// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[MUL3:%.*]] = fmul float [[TMP6]], [[TMP9]]
// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[IDXPROM4:%.*]] = zext i32 [[TMP11]] to i64
// CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM4]]
// CHECK9-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP12]]
// CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[IDXPROM7:%.*]] = zext i32 [[TMP14]] to i64
// CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM7]]
// CHECK9-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK9: omp.body.continue:
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: [[ADD9:%.*]] = add i32 [[TMP15]], 1
// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
// CHECK9: omp.inner.for.end:
// CHECK9-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@_Z12test_precondv
// CHECK9-SAME: () #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK9-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK9-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTLINEAR_START:%.*]] = alloca i8, align 1
// CHECK9-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK9-NEXT: [[I7:%.*]] = alloca i8, align 1
// CHECK9-NEXT: store i8 0, ptr [[A]], align 1
// CHECK9-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1
// CHECK9-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK9-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
// CHECK9-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK9-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK9-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK9-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK9-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK9-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK9-NEXT: store i8 [[TMP3]], ptr [[I4]], align 1
// CHECK9-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK9-NEXT: [[CONV5:%.*]] = sext i8 [[TMP4]] to i32
// CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK9-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK9: simd.if.then:
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
// CHECK9-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1
// CHECK9-NEXT: store i8 [[TMP6]], ptr [[DOTLINEAR_START]], align 1
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK9: omp.inner.for.cond:
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]]
// CHECK9-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK9-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK9: omp.inner.for.body:
// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP12]]
// CHECK9-NEXT: [[CONV9:%.*]] = sext i8 [[TMP9]] to i32
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK9-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK9-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP12]]
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK9: omp.body.continue:
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
// CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK9-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
// CHECK9: omp.inner.for.end:
// CHECK9-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK9-NEXT: [[CONV13:%.*]] = sext i8 [[TMP12]] to i32
// CHECK9-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK9-NEXT: [[CONV14:%.*]] = sext i8 [[TMP13]] to i32
// CHECK9-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]]
// CHECK9-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1
// CHECK9-NEXT: [[ADD17:%.*]] = add i32 [[SUB16]], 1
// CHECK9-NEXT: [[DIV18:%.*]] = udiv i32 [[ADD17]], 1
// CHECK9-NEXT: [[MUL19:%.*]] = mul nsw i32 [[DIV18]], 1
// CHECK9-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV13]], [[MUL19]]
// CHECK9-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK9-NEXT: store i8 [[CONV21]], ptr [[I]], align 1
// CHECK9-NEXT: br label [[SIMD_IF_END]]
// CHECK9: simd.if.end:
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@_Z4fintv
// CHECK9-SAME: () #[[ATTR0]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_v()
// CHECK9-NEXT: ret i32 [[CALL]]
//
//
// CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_v
// CHECK9-SAME: () #[[ATTR0]] comdat {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK9-NEXT: store i16 0, ptr [[AA]], align 2
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK9: omp.inner.for.cond:
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]]
// CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
// CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK9: omp.inner.for.body:
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]]
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK9: omp.body.continue:
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
// CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK9-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK9: omp.inner.for.end:
// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
// CHECK9-NEXT: ret i32 0
//
//
// CHECK11-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
// CHECK11-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK11-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i32 16) ]
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK11: omp.inner.for.cond:
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]]
// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK11: omp.inner.for.body:
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 [[TMP6]]
// CHECK11-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 [[TMP9]]
// CHECK11-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[MUL2:%.*]] = fmul float [[TMP7]], [[TMP10]]
// CHECK11-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i32 [[TMP12]]
// CHECK11-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP13]]
// CHECK11-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]]
// CHECK11-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK11: omp.body.continue:
// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK11: omp.inner.for.inc:
// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// CHECK11: omp.inner.for.end:
// CHECK11-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK11-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK11: omp.inner.for.cond:
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK11: omp.inner.for.body:
// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 7
// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK11-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4
// CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[TMP5]]
// CHECK11-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4
// CHECK11-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[TMP8]]
// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
// CHECK11-NEXT: [[MUL2:%.*]] = fmul float [[TMP6]], [[TMP9]]
// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
// CHECK11-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[TMP11]]
// CHECK11-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
// CHECK11-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP12]]
// CHECK11-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
// CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]]
// CHECK11-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4
// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK11: omp.body.continue:
// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK11: omp.inner.for.inc:
// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK11: omp.inner.for.end:
// CHECK11-NEXT: store i32 32, ptr [[I]], align 4
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
// CHECK11-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK11: omp.inner.for.cond:
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]]
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]]
// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK11: omp.inner.for.body:
// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[MUL:%.*]] = mul i32 [[TMP3]], 127
// CHECK11-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[TMP5]]
// CHECK11-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[TMP8]]
// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[MUL2:%.*]] = fmul float [[TMP6]], [[TMP9]]
// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[TMP11]]
// CHECK11-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP12]]
// CHECK11-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]]
// CHECK11-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK11: omp.body.continue:
// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK11: omp.inner.for.inc:
// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: [[ADD6:%.*]] = add i32 [[TMP15]], 1
// CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK11: omp.inner.for.end:
// CHECK11-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@_Z12test_precondv
// CHECK11-SAME: () #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK11-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK11-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTLINEAR_START:%.*]] = alloca i8, align 1
// CHECK11-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK11-NEXT: [[I7:%.*]] = alloca i8, align 1
// CHECK11-NEXT: store i8 0, ptr [[A]], align 1
// CHECK11-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1
// CHECK11-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK11-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
// CHECK11-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK11-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK11-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK11-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK11-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK11-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK11-NEXT: store i8 [[TMP3]], ptr [[I4]], align 1
// CHECK11-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK11-NEXT: [[CONV5:%.*]] = sext i8 [[TMP4]] to i32
// CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK11-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK11: simd.if.then:
// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
// CHECK11-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1
// CHECK11-NEXT: store i8 [[TMP6]], ptr [[DOTLINEAR_START]], align 1
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK11: omp.inner.for.cond:
// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
// CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK11: omp.inner.for.body:
// CHECK11-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP13]]
// CHECK11-NEXT: [[CONV9:%.*]] = sext i8 [[TMP9]] to i32
// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK11-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK11-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP13]]
// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK11: omp.body.continue:
// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK11: omp.inner.for.inc:
// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
// CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK11-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
// CHECK11: omp.inner.for.end:
// CHECK11-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK11-NEXT: [[CONV13:%.*]] = sext i8 [[TMP12]] to i32
// CHECK11-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK11-NEXT: [[CONV14:%.*]] = sext i8 [[TMP13]] to i32
// CHECK11-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]]
// CHECK11-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1
// CHECK11-NEXT: [[ADD17:%.*]] = add i32 [[SUB16]], 1
// CHECK11-NEXT: [[DIV18:%.*]] = udiv i32 [[ADD17]], 1
// CHECK11-NEXT: [[MUL19:%.*]] = mul nsw i32 [[DIV18]], 1
// CHECK11-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV13]], [[MUL19]]
// CHECK11-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK11-NEXT: store i8 [[CONV21]], ptr [[I]], align 1
// CHECK11-NEXT: br label [[SIMD_IF_END]]
// CHECK11: simd.if.end:
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@_Z4fintv
// CHECK11-SAME: () #[[ATTR0]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_v()
// CHECK11-NEXT: ret i32 [[CALL]]
//
//
// CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_v
// CHECK11-SAME: () #[[ATTR0]] comdat {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i16 0, ptr [[AA]], align 2
// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK11: omp.inner.for.cond:
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]]
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP16]]
// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK11: omp.inner.for.body:
// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]]
// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP16]]
// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK11: omp.body.continue:
// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK11: omp.inner.for.inc:
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]]
// CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]]
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK11: omp.inner.for.end:
// CHECK11-NEXT: store i32 100, ptr [[I]], align 4
// CHECK11-NEXT: ret i32 0
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
// CHECK13-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK13-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK13-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK13-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK13-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i64 16) ]
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[IDXPROM]]
// CHECK13-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP9]] to i64
// CHECK13-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM1]]
// CHECK13-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[MUL3:%.*]] = fmul float [[TMP7]], [[TMP10]]
// CHECK13-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP12]] to i64
// CHECK13-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM4]]
// CHECK13-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP13]]
// CHECK13-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP15]] to i64
// CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM7]]
// CHECK13-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK13-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK13-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK13-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK13-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 7
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK13-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nontemporal [[META7:![0-9]+]]
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM]]
// CHECK13-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4
// CHECK13-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP8]] to i64
// CHECK13-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM1]]
// CHECK13-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
// CHECK13-NEXT: [[MUL3:%.*]] = fmul float [[TMP6]], [[TMP9]]
// CHECK13-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
// CHECK13-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64
// CHECK13-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM4]]
// CHECK13-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
// CHECK13-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP12]]
// CHECK13-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nontemporal [[META7]]
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP14]] to i64
// CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM7]]
// CHECK13-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: store i32 32, ptr [[I]], align 4
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
// CHECK13-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK13-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK13-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK13-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]]
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[MUL:%.*]] = mul i32 [[TMP3]], 127
// CHECK13-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP5]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM]]
// CHECK13-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP8]] to i64
// CHECK13-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM1]]
// CHECK13-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[MUL3:%.*]] = fmul float [[TMP6]], [[TMP9]]
// CHECK13-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[IDXPROM4:%.*]] = zext i32 [[TMP11]] to i64
// CHECK13-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM4]]
// CHECK13-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP12]]
// CHECK13-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[IDXPROM7:%.*]] = zext i32 [[TMP14]] to i64
// CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM7]]
// CHECK13-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: [[ADD9:%.*]] = add i32 [[TMP15]], 1
// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z12test_precondv
// CHECK13-SAME: () #[[ATTR0]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK13-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK13-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTLINEAR_START:%.*]] = alloca i8, align 1
// CHECK13-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK13-NEXT: [[I7:%.*]] = alloca i8, align 1
// CHECK13-NEXT: store i8 0, ptr [[A]], align 1
// CHECK13-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1
// CHECK13-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK13-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK13-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
// CHECK13-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK13-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK13-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK13-NEXT: store i8 [[TMP3]], ptr [[I4]], align 1
// CHECK13-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK13-NEXT: [[CONV5:%.*]] = sext i8 [[TMP4]] to i32
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK13-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK13: simd.if.then:
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1
// CHECK13-NEXT: store i8 [[TMP6]], ptr [[DOTLINEAR_START]], align 1
// CHECK13-NEXT: [[TMP7:%.*]] = load i8, ptr [[A]], align 1
// CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP7]], 0
// CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK13: omp_if.then:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
// CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP13]]
// CHECK13-NEXT: [[CONV9:%.*]] = sext i8 [[TMP10]] to i32
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK13-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK13-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK13-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal [[META7]], !llvm.access.group [[ACC_GRP13]]
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
// CHECK13-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK13-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK13: omp_if.else:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]]
// CHECK13: omp.inner.for.cond13:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK13-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]]
// CHECK13: omp.inner.for.body15:
// CHECK13-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK13-NEXT: [[CONV16:%.*]] = sext i8 [[TMP15]] to i32
// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP16]], 1
// CHECK13-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]]
// CHECK13-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
// CHECK13-NEXT: store i8 [[CONV19]], ptr [[I6]], align 1
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE20:%.*]]
// CHECK13: omp.body.continue20:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]]
// CHECK13: omp.inner.for.inc21:
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK13-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK13: omp.inner.for.end23:
// CHECK13-NEXT: br label [[OMP_IF_END]]
// CHECK13: omp_if.end:
// CHECK13-NEXT: [[TMP18:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK13-NEXT: [[CONV24:%.*]] = sext i8 [[TMP18]] to i32
// CHECK13-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK13-NEXT: [[CONV25:%.*]] = sext i8 [[TMP19]] to i32
// CHECK13-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]]
// CHECK13-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1
// CHECK13-NEXT: [[ADD28:%.*]] = add i32 [[SUB27]], 1
// CHECK13-NEXT: [[DIV29:%.*]] = udiv i32 [[ADD28]], 1
// CHECK13-NEXT: [[MUL30:%.*]] = mul nsw i32 [[DIV29]], 1
// CHECK13-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV24]], [[MUL30]]
// CHECK13-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8
// CHECK13-NEXT: store i8 [[CONV32]], ptr [[I]], align 1
// CHECK13-NEXT: br label [[SIMD_IF_END]]
// CHECK13: simd.if.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z4fintv
// CHECK13-SAME: () #[[ATTR0]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_v()
// CHECK13-NEXT: ret i32 [[CALL]]
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_v
// CHECK13-SAME: () #[[ATTR0]] comdat {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i16 0, ptr [[AA]], align 2
// CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK13-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: store i32 100, ptr [[I]], align 4
// CHECK13-NEXT: ret i32 0
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
// CHECK15-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK15-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK15-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK15-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK15-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i32 16) ]
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]]
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 [[TMP6]]
// CHECK15-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 [[TMP9]]
// CHECK15-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[MUL2:%.*]] = fmul float [[TMP7]], [[TMP10]]
// CHECK15-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i32 [[TMP12]]
// CHECK15-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP13]]
// CHECK15-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]]
// CHECK15-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK15-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK15-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK15-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK15-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK15-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 7
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK15-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nontemporal [[META8:![0-9]+]]
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[TMP5]]
// CHECK15-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4
// CHECK15-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[TMP8]]
// CHECK15-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
// CHECK15-NEXT: [[MUL2:%.*]] = fmul float [[TMP6]], [[TMP9]]
// CHECK15-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
// CHECK15-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[TMP11]]
// CHECK15-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
// CHECK15-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP12]]
// CHECK15-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nontemporal [[META8]]
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
// CHECK15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]]
// CHECK15-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK15-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: store i32 32, ptr [[I]], align 4
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
// CHECK15-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK15-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK15-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK15-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]]
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[MUL:%.*]] = mul i32 [[TMP3]], 127
// CHECK15-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[TMP5]]
// CHECK15-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[TMP8]]
// CHECK15-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[MUL2:%.*]] = fmul float [[TMP6]], [[TMP9]]
// CHECK15-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[TMP11]]
// CHECK15-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP12]]
// CHECK15-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]]
// CHECK15-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: [[ADD6:%.*]] = add i32 [[TMP15]], 1
// CHECK15-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]]
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z12test_precondv
// CHECK15-SAME: () #[[ATTR0]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK15-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK15-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTLINEAR_START:%.*]] = alloca i8, align 1
// CHECK15-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK15-NEXT: [[I7:%.*]] = alloca i8, align 1
// CHECK15-NEXT: store i8 0, ptr [[A]], align 1
// CHECK15-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1
// CHECK15-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK15-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK15-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
// CHECK15-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK15-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK15-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK15-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK15-NEXT: store i8 [[TMP3]], ptr [[I4]], align 1
// CHECK15-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK15-NEXT: [[CONV5:%.*]] = sext i8 [[TMP4]] to i32
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK15-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK15: simd.if.then:
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1
// CHECK15-NEXT: store i8 [[TMP6]], ptr [[DOTLINEAR_START]], align 1
// CHECK15-NEXT: [[TMP7:%.*]] = load i8, ptr [[A]], align 1
// CHECK15-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP7]], 0
// CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK15: omp_if.then:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]]
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]]
// CHECK15-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK15-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP14]]
// CHECK15-NEXT: [[CONV9:%.*]] = sext i8 [[TMP10]] to i32
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK15-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK15-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK15-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal [[META8]], !llvm.access.group [[ACC_GRP14]]
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
// CHECK15-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK15-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK15: omp_if.else:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]]
// CHECK15: omp.inner.for.cond13:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK15-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]]
// CHECK15: omp.inner.for.body15:
// CHECK15-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK15-NEXT: [[CONV16:%.*]] = sext i8 [[TMP15]] to i32
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP16]], 1
// CHECK15-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]]
// CHECK15-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
// CHECK15-NEXT: store i8 [[CONV19]], ptr [[I6]], align 1
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE20:%.*]]
// CHECK15: omp.body.continue20:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]]
// CHECK15: omp.inner.for.inc21:
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK15-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK15: omp.inner.for.end23:
// CHECK15-NEXT: br label [[OMP_IF_END]]
// CHECK15: omp_if.end:
// CHECK15-NEXT: [[TMP18:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK15-NEXT: [[CONV24:%.*]] = sext i8 [[TMP18]] to i32
// CHECK15-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK15-NEXT: [[CONV25:%.*]] = sext i8 [[TMP19]] to i32
// CHECK15-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]]
// CHECK15-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1
// CHECK15-NEXT: [[ADD28:%.*]] = add i32 [[SUB27]], 1
// CHECK15-NEXT: [[DIV29:%.*]] = udiv i32 [[ADD28]], 1
// CHECK15-NEXT: [[MUL30:%.*]] = mul nsw i32 [[DIV29]], 1
// CHECK15-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV24]], [[MUL30]]
// CHECK15-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8
// CHECK15-NEXT: store i8 [[CONV32]], ptr [[I]], align 1
// CHECK15-NEXT: br label [[SIMD_IF_END]]
// CHECK15: simd.if.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z4fintv
// CHECK15-SAME: () #[[ATTR0]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_v()
// CHECK15-NEXT: ret i32 [[CALL]]
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_v
// CHECK15-SAME: () #[[ATTR0]] comdat {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i16 0, ptr [[AA]], align 2
// CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]]
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK15-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: store i32 100, ptr [[I]], align 4
// CHECK15-NEXT: ret i32 0
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70
// CHECK17-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2:[0-9]+]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined
// CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
// CHECK17-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i64 16) ]
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM]]
// CHECK17-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP17]] to i64
// CHECK17-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM2]]
// CHECK17-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[MUL4:%.*]] = fmul float [[TMP15]], [[TMP18]]
// CHECK17-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP20]] to i64
// CHECK17-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[IDXPROM5]]
// CHECK17-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP21]]
// CHECK17-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP23]] to i64
// CHECK17-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[IDXPROM8]]
// CHECK17-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK17-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]])
// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK17-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK17: .omp.final.then:
// CHECK17-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK17: .omp.final.done:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86
// CHECK17-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined
// CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK17-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]]
// CHECK17-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK17-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
// CHECK17-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]]
// CHECK17-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
// CHECK17-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
// CHECK17-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
// CHECK17-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
// CHECK17-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]]
// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4
// CHECK17-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
// CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8
// CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4
// CHECK17-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
// CHECK17-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]]
// CHECK17-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK17-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK17: .omp.final.then:
// CHECK17-NEXT: store i32 32, ptr [[I]], align 4
// CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK17: .omp.final.done:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103
// CHECK17-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined
// CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5)
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK17: omp.dispatch.cond:
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
// CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK17: omp.dispatch.body:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
// CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
// CHECK17-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]]
// CHECK17-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64
// CHECK17-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]]
// CHECK17-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]]
// CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64
// CHECK17-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]]
// CHECK17-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]]
// CHECK17-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64
// CHECK17-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]]
// CHECK17-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1
// CHECK17-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK17: omp.dispatch.inc:
// CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]]
// CHECK17-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]]
// CHECK17-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK17: omp.dispatch.end:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
// CHECK17-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK17: .omp.final.then:
// CHECK17-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK17: .omp.final.done:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115
// CHECK17-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[I:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK17-NEXT: store i64 [[I]], ptr [[I_ADDR]], align 8
// CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined, ptr [[I_ADDR]], ptr [[A_ADDR]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined
// CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[I:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
// CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK17-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK17-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK17-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32
// CHECK17-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK17-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK17-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK17-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1
// CHECK17-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK17-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK17-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK17-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK17-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP21]]
// CHECK17-NEXT: [[CONV9:%.*]] = sext i8 [[TMP16]] to i32
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK17-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK17-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP21]]
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK17-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK17-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]])
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK17-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK17: .omp.final.then:
// CHECK17-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK17-NEXT: [[CONV13:%.*]] = sext i8 [[TMP23]] to i32
// CHECK17-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK17-NEXT: [[CONV14:%.*]] = sext i8 [[TMP24]] to i32
// CHECK17-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]]
// CHECK17-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1
// CHECK17-NEXT: [[ADD17:%.*]] = add i32 [[SUB16]], 1
// CHECK17-NEXT: [[DIV18:%.*]] = udiv i32 [[ADD17]], 1
// CHECK17-NEXT: [[MUL19:%.*]] = mul nsw i32 [[DIV18]], 1
// CHECK17-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV13]], [[MUL19]]
// CHECK17-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK17-NEXT: store i8 [[CONV21]], ptr [[TMP0]], align 1
// CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK17: .omp.final.done:
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135
// CHECK17-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
// CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined, ptr [[AA_ADDR]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined
// CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]])
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK17: omp.dispatch.cond:
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK17: omp.dispatch.body:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK17-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK17: omp.dispatch.inc:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
// CHECK17-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK17: omp.dispatch.end:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK17-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK17: .omp.final.then:
// CHECK17-NEXT: store i32 100, ptr [[I]], align 4
// CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK17: .omp.final.done:
// CHECK17-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70
// CHECK19-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2:[0-9]+]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined
// CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4
// CHECK19-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i32 16) ]
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]]
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]]
// CHECK19-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP17]]
// CHECK19-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[MUL3:%.*]] = fmul float [[TMP15]], [[TMP18]]
// CHECK19-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i32 [[TMP20]]
// CHECK19-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP21]]
// CHECK19-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i32 [[TMP23]]
// CHECK19-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK19-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]])
// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK19-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK19: .omp.final.then:
// CHECK19-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK19: .omp.final.done:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86
// CHECK19-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined
// CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK19-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]]
// CHECK19-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]]
// CHECK19-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
// CHECK19-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]]
// CHECK19-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
// CHECK19-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]]
// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
// CHECK19-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]]
// CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4
// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4
// CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]]
// CHECK19-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK19-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK19: .omp.final.then:
// CHECK19-NEXT: store i32 32, ptr [[I]], align 4
// CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK19: .omp.final.done:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103
// CHECK19-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined
// CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5)
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK19: omp.dispatch.cond:
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK19: omp.dispatch.body:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]]
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
// CHECK19-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
// CHECK19-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]]
// CHECK19-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]]
// CHECK19-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]]
// CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]]
// CHECK19-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]]
// CHECK19-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]]
// CHECK19-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1
// CHECK19-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK19: omp.dispatch.inc:
// CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]]
// CHECK19-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]]
// CHECK19-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK19: omp.dispatch.end:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
// CHECK19-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK19: .omp.final.then:
// CHECK19-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK19: .omp.final.done:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115
// CHECK19-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[I:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK19-NEXT: store i32 [[I]], ptr [[I_ADDR]], align 4
// CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined, ptr [[I_ADDR]], ptr [[A_ADDR]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined
// CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[I:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 4
// CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK19-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK19-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK19-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32
// CHECK19-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK19-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK19-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK19-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1
// CHECK19-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK19-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK19-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK19-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK19-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP22]]
// CHECK19-NEXT: [[CONV9:%.*]] = sext i8 [[TMP16]] to i32
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK19-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK19-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP22]]
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK19-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK19-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]])
// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK19-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK19: .omp.final.then:
// CHECK19-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK19-NEXT: [[CONV13:%.*]] = sext i8 [[TMP23]] to i32
// CHECK19-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK19-NEXT: [[CONV14:%.*]] = sext i8 [[TMP24]] to i32
// CHECK19-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]]
// CHECK19-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1
// CHECK19-NEXT: [[ADD17:%.*]] = add i32 [[SUB16]], 1
// CHECK19-NEXT: [[DIV18:%.*]] = udiv i32 [[ADD17]], 1
// CHECK19-NEXT: [[MUL19:%.*]] = mul nsw i32 [[DIV18]], 1
// CHECK19-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV13]], [[MUL19]]
// CHECK19-NEXT: [[CONV21:%.*]] = trunc i32 [[ADD20]] to i8
// CHECK19-NEXT: store i8 [[CONV21]], ptr [[TMP0]], align 1
// CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK19: .omp.final.done:
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135
// CHECK19-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
// CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined, ptr [[AA_ADDR]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined
// CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]])
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK19: omp.dispatch.cond:
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK19: omp.dispatch.body:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
// CHECK19-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK19-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK19-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK19: omp.dispatch.inc:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
// CHECK19-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK19: omp.dispatch.end:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK19-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK19: .omp.final.then:
// CHECK19-NEXT: store i32 100, ptr [[I]], align 4
// CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK19: .omp.final.done:
// CHECK19-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70
// CHECK21-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK21-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2:[0-9]+]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined
// CHECK21-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK21-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK21-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
// CHECK21-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i64 16) ]
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423
// CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK21: cond.true:
// CHECK21-NEXT: br label [[COND_END:%.*]]
// CHECK21: cond.false:
// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: br label [[COND_END]]
// CHECK21: cond.end:
// CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK21: omp.inner.for.cond:
// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK21: omp.inner.for.body:
// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7
// CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
// CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM]]
// CHECK21-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP17]] to i64
// CHECK21-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM2]]
// CHECK21-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[MUL4:%.*]] = fmul float [[TMP15]], [[TMP18]]
// CHECK21-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP20]] to i64
// CHECK21-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[IDXPROM5]]
// CHECK21-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP21]]
// CHECK21-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP23]] to i64
// CHECK21-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[IDXPROM8]]
// CHECK21-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK21: omp.body.continue:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK21: omp.inner.for.inc:
// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK21-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
// CHECK21: omp.inner.for.end:
// CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK21: omp.loop.exit:
// CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]])
// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK21-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK21: .omp.final.then:
// CHECK21-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK21: .omp.final.done:
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86
// CHECK21-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK21-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined
// CHECK21-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK21-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
// CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK21: cond.true:
// CHECK21-NEXT: br label [[COND_END:%.*]]
// CHECK21: cond.false:
// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: br label [[COND_END]]
// CHECK21: cond.end:
// CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK21: omp.inner.for.cond:
// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK21: omp.inner.for.body:
// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
// CHECK21-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK21-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK21-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !nontemporal [[META16:![0-9]+]]
// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
// CHECK21-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
// CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]]
// CHECK21-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK21-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8
// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK21-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
// CHECK21-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]]
// CHECK21-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
// CHECK21-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
// CHECK21-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8
// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
// CHECK21-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
// CHECK21-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]]
// CHECK21-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4
// CHECK21-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
// CHECK21-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !nontemporal [[META16]]
// CHECK21-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4
// CHECK21-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
// CHECK21-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]]
// CHECK21-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4
// CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK21: omp.body.continue:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK21: omp.inner.for.inc:
// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK21-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK21: omp.inner.for.end:
// CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK21: omp.loop.exit:
// CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK21-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK21: .omp.final.then:
// CHECK21-NEXT: store i32 32, ptr [[I]], align 4
// CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK21: .omp.final.done:
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103
// CHECK21-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK21-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined
// CHECK21-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[C:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK21-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK21-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5)
// CHECK21-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK21: omp.dispatch.cond:
// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
// CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK21: cond.true:
// CHECK21-NEXT: br label [[COND_END:%.*]]
// CHECK21: cond.false:
// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: br label [[COND_END]]
// CHECK21: cond.end:
// CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
// CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK21: omp.dispatch.body:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK21: omp.inner.for.cond:
// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]]
// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
// CHECK21-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK21: omp.inner.for.body:
// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
// CHECK21-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64
// CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]]
// CHECK21-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64
// CHECK21-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]]
// CHECK21-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]]
// CHECK21-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64
// CHECK21-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]]
// CHECK21-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]]
// CHECK21-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64
// CHECK21-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]]
// CHECK21-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK21: omp.body.continue:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK21: omp.inner.for.inc:
// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1
// CHECK21-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
// CHECK21: omp.inner.for.end:
// CHECK21-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK21: omp.dispatch.inc:
// CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK21-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]]
// CHECK21-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK21-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]]
// CHECK21-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK21: omp.dispatch.end:
// CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
// CHECK21-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK21: .omp.final.then:
// CHECK21-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK21: .omp.final.done:
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115
// CHECK21-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[I:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK21-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK21-NEXT: store i64 [[I]], ptr [[I_ADDR]], align 8
// CHECK21-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined, ptr [[I_ADDR]], ptr [[A_ADDR]])
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined
// CHECK21-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[I:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK21-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK21-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK21-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8
// CHECK21-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8
// CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK21-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK21-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK21-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK21-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32
// CHECK21-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK21-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK21-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK21-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK21-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK21-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK21-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK21-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1
// CHECK21-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK21-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32
// CHECK21-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK21-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK21: omp.precond.then:
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK21-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK21-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK21-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK21: cond.true:
// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK21-NEXT: br label [[COND_END:%.*]]
// CHECK21: cond.false:
// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: br label [[COND_END]]
// CHECK21: cond.end:
// CHECK21-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK21-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP14]], 0
// CHECK21-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK21: omp_if.then:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK21: omp.inner.for.cond:
// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK21-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK21-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK21: omp.inner.for.body:
// CHECK21-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP22]]
// CHECK21-NEXT: [[CONV9:%.*]] = sext i8 [[TMP17]] to i32
// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK21-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK21-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK21-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal [[META16]], !llvm.access.group [[ACC_GRP22]]
// CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK21: omp.body.continue:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK21: omp.inner.for.inc:
// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK21-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK21-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
// CHECK21: omp.inner.for.end:
// CHECK21-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK21: omp_if.else:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]]
// CHECK21: omp.inner.for.cond13:
// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
// CHECK21-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]]
// CHECK21: omp.inner.for.body15:
// CHECK21-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK21-NEXT: [[CONV16:%.*]] = sext i8 [[TMP22]] to i32
// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP23]], 1
// CHECK21-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]]
// CHECK21-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
// CHECK21-NEXT: store i8 [[CONV19]], ptr [[I6]], align 1
// CHECK21-NEXT: br label [[OMP_BODY_CONTINUE20:%.*]]
// CHECK21: omp.body.continue20:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]]
// CHECK21: omp.inner.for.inc21:
// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK21-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK21: omp.inner.for.end23:
// CHECK21-NEXT: br label [[OMP_IF_END]]
// CHECK21: omp_if.end:
// CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK21: omp.loop.exit:
// CHECK21-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
// CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]])
// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
// CHECK21-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK21: .omp.final.then:
// CHECK21-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK21-NEXT: [[CONV24:%.*]] = sext i8 [[TMP29]] to i32
// CHECK21-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK21-NEXT: [[CONV25:%.*]] = sext i8 [[TMP30]] to i32
// CHECK21-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]]
// CHECK21-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1
// CHECK21-NEXT: [[ADD28:%.*]] = add i32 [[SUB27]], 1
// CHECK21-NEXT: [[DIV29:%.*]] = udiv i32 [[ADD28]], 1
// CHECK21-NEXT: [[MUL30:%.*]] = mul nsw i32 [[DIV29]], 1
// CHECK21-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV24]], [[MUL30]]
// CHECK21-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8
// CHECK21-NEXT: store i8 [[CONV32]], ptr [[TMP0]], align 1
// CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK21: .omp.final.done:
// CHECK21-NEXT: br label [[OMP_PRECOND_END]]
// CHECK21: omp.precond.end:
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135
// CHECK21-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK21-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK21-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
// CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined, ptr [[AA_ADDR]])
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined
// CHECK21-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
// CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK21-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK21-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]])
// CHECK21-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK21: omp.dispatch.cond:
// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
// CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK21: cond.true:
// CHECK21-NEXT: br label [[COND_END:%.*]]
// CHECK21: cond.false:
// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: br label [[COND_END]]
// CHECK21: cond.end:
// CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK21: omp.dispatch.body:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK21: omp.inner.for.cond:
// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK21-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK21-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK21: omp.inner.for.body:
// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK21: omp.body.continue:
// CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK21: omp.inner.for.inc:
// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK21-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK21-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
// CHECK21: omp.inner.for.end:
// CHECK21-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK21: omp.dispatch.inc:
// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK21-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK21-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
// CHECK21-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
// CHECK21-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK21: omp.dispatch.end:
// CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK21-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK21-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK21: .omp.final.then:
// CHECK21-NEXT: store i32 100, ptr [[I]], align 4
// CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK21: .omp.final.done:
// CHECK21-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70
// CHECK23-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK23-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2:[0-9]+]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK23-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.omp_outlined
// CHECK23-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK23-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4
// CHECK23-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i32 16) ]
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
// CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423
// CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK23: cond.true:
// CHECK23-NEXT: br label [[COND_END:%.*]]
// CHECK23: cond.false:
// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: br label [[COND_END]]
// CHECK23: cond.end:
// CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK23: omp.inner.for.cond:
// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]]
// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK23: omp.inner.for.body:
// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7
// CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]]
// CHECK23-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP17]]
// CHECK23-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[MUL3:%.*]] = fmul float [[TMP15]], [[TMP18]]
// CHECK23-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i32 [[TMP20]]
// CHECK23-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP21]]
// CHECK23-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i32 [[TMP23]]
// CHECK23-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK23: omp.body.continue:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK23: omp.inner.for.inc:
// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK23-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK23: omp.inner.for.end:
// CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK23: omp.loop.exit:
// CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]])
// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK23-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK23: .omp.final.then:
// CHECK23-NEXT: store i32 32000001, ptr [[I]], align 4
// CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK23: .omp.final.done:
// CHECK23-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86
// CHECK23-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK23-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK23-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.omp_outlined
// CHECK23-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK23-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
// CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK23: cond.true:
// CHECK23-NEXT: br label [[COND_END:%.*]]
// CHECK23: cond.false:
// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: br label [[COND_END]]
// CHECK23: cond.end:
// CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK23: omp.inner.for.cond:
// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK23: omp.inner.for.body:
// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
// CHECK23-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK23-NEXT: store i32 [[SUB]], ptr [[I]], align 4
// CHECK23-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !nontemporal [[META17:![0-9]+]]
// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
// CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]]
// CHECK23-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK23-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4
// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK23-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]]
// CHECK23-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
// CHECK23-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]]
// CHECK23-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4
// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
// CHECK23-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]]
// CHECK23-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
// CHECK23-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]]
// CHECK23-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !nontemporal [[META17]]
// CHECK23-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4
// CHECK23-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]]
// CHECK23-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4
// CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK23: omp.body.continue:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK23: omp.inner.for.inc:
// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK23-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
// CHECK23: omp.inner.for.end:
// CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK23: omp.loop.exit:
// CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK23-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK23: .omp.final.then:
// CHECK23-NEXT: store i32 32, ptr [[I]], align 4
// CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK23: .omp.final.done:
// CHECK23-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103
// CHECK23-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK23-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[C_ADDR]], ptr [[D_ADDR]])
// CHECK23-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.omp_outlined
// CHECK23-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK23-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK23-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5)
// CHECK23-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK23: omp.dispatch.cond:
// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
// CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK23: cond.true:
// CHECK23-NEXT: br label [[COND_END:%.*]]
// CHECK23: cond.false:
// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: br label [[COND_END]]
// CHECK23: cond.end:
// CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
// CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK23: omp.dispatch.body:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK23: omp.inner.for.cond:
// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]]
// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
// CHECK23-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK23: omp.inner.for.body:
// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
// CHECK23-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]]
// CHECK23-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]]
// CHECK23-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]]
// CHECK23-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]]
// CHECK23-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]]
// CHECK23-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]]
// CHECK23-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK23: omp.body.continue:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK23: omp.inner.for.inc:
// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1
// CHECK23-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
// CHECK23: omp.inner.for.end:
// CHECK23-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK23: omp.dispatch.inc:
// CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK23-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]]
// CHECK23-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK23-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]]
// CHECK23-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK23: omp.dispatch.end:
// CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
// CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
// CHECK23-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK23: .omp.final.then:
// CHECK23-NEXT: store i32 -2147483522, ptr [[I]], align 4
// CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK23: .omp.final.done:
// CHECK23-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115
// CHECK23-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[I:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK23-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK23-NEXT: store i32 [[I]], ptr [[I_ADDR]], align 4
// CHECK23-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined, ptr [[I_ADDR]], ptr [[A_ADDR]])
// CHECK23-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.omp_outlined
// CHECK23-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[I:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK23-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK23-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[I4:%.*]] = alloca i8, align 1
// CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[I6:%.*]] = alloca i8, align 1
// CHECK23-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 4
// CHECK23-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 4
// CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK23-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK23-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK23-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK23-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32
// CHECK23-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]]
// CHECK23-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1
// CHECK23-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1
// CHECK23-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK23-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK23-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK23-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK23-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1
// CHECK23-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK23-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32
// CHECK23-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10
// CHECK23-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK23: omp.precond.then:
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK23-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK23-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK23-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK23: cond.true:
// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK23-NEXT: br label [[COND_END:%.*]]
// CHECK23: cond.false:
// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: br label [[COND_END]]
// CHECK23: cond.end:
// CHECK23-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1
// CHECK23-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP14]], 0
// CHECK23-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK23: omp_if.then:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK23: omp.inner.for.cond:
// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
// CHECK23-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK23-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK23: omp.inner.for.body:
// CHECK23-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP23]]
// CHECK23-NEXT: [[CONV9:%.*]] = sext i8 [[TMP17]] to i32
// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
// CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK23-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]]
// CHECK23-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8
// CHECK23-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal [[META17]], !llvm.access.group [[ACC_GRP23]]
// CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK23: omp.body.continue:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK23: omp.inner.for.inc:
// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
// CHECK23-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK23-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
// CHECK23: omp.inner.for.end:
// CHECK23-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK23: omp_if.else:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]]
// CHECK23: omp.inner.for.cond13:
// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
// CHECK23-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]]
// CHECK23: omp.inner.for.body15:
// CHECK23-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK23-NEXT: [[CONV16:%.*]] = sext i8 [[TMP22]] to i32
// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP23]], 1
// CHECK23-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]]
// CHECK23-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
// CHECK23-NEXT: store i8 [[CONV19]], ptr [[I6]], align 1
// CHECK23-NEXT: br label [[OMP_BODY_CONTINUE20:%.*]]
// CHECK23: omp.body.continue20:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]]
// CHECK23: omp.inner.for.inc21:
// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK23-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP26:![0-9]+]]
// CHECK23: omp.inner.for.end23:
// CHECK23-NEXT: br label [[OMP_IF_END]]
// CHECK23: omp_if.end:
// CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK23: omp.loop.exit:
// CHECK23-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
// CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]])
// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
// CHECK23-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK23: .omp.final.then:
// CHECK23-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK23-NEXT: [[CONV24:%.*]] = sext i8 [[TMP29]] to i32
// CHECK23-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
// CHECK23-NEXT: [[CONV25:%.*]] = sext i8 [[TMP30]] to i32
// CHECK23-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]]
// CHECK23-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1
// CHECK23-NEXT: [[ADD28:%.*]] = add i32 [[SUB27]], 1
// CHECK23-NEXT: [[DIV29:%.*]] = udiv i32 [[ADD28]], 1
// CHECK23-NEXT: [[MUL30:%.*]] = mul nsw i32 [[DIV29]], 1
// CHECK23-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV24]], [[MUL30]]
// CHECK23-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8
// CHECK23-NEXT: store i8 [[CONV32]], ptr [[TMP0]], align 1
// CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK23: .omp.final.done:
// CHECK23-NEXT: br label [[OMP_PRECOND_END]]
// CHECK23: omp.precond.end:
// CHECK23-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135
// CHECK23-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK23-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK23-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
// CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined, ptr [[AA_ADDR]])
// CHECK23-NEXT: ret void
//
//
// CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.omp_outlined
// CHECK23-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0]] {
// CHECK23-NEXT: entry:
// CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
// CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK23-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK23-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]])
// CHECK23-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK23: omp.dispatch.cond:
// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
// CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK23: cond.true:
// CHECK23-NEXT: br label [[COND_END:%.*]]
// CHECK23: cond.false:
// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: br label [[COND_END]]
// CHECK23: cond.end:
// CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK23: omp.dispatch.body:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK23: omp.inner.for.cond:
// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK23-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK23-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK23: omp.inner.for.body:
// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK23: omp.body.continue:
// CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK23: omp.inner.for.inc:
// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK23-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK23-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
// CHECK23: omp.inner.for.end:
// CHECK23-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK23: omp.dispatch.inc:
// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK23-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
// CHECK23-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
// CHECK23-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
// CHECK23-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK23: omp.dispatch.end:
// CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK23-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK23-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK23: .omp.final.then:
// CHECK23-NEXT: store i32 100, ptr [[I]], align 4
// CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK23: .omp.final.done:
// CHECK23-NEXT: ret void
//