Files
clang-p2996/clang/test/OpenMP/target_parallel_generic_loop_codegen-1.cpp
dhruvachak b5d02bbd0d [OpenMP] Increment kernel args version, used by runtime for detecting dyn_ptr. (#85363)
A kernel implicit parameter (dyn_ptr) was introduced some time back.
This patch increments the kernel args version for a compiler supporting
dyn_ptr. The version will be used by the runtime to determine whether
the implicit parameter is generated by the compiler. The versioning is
required to support use cases where code generated by an older compiler
is linked with a newer runtime.

If approved, this patch should be backported to release 18.
2024-03-19 16:40:22 -07:00

8370 lines
468 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test host codegen.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// Test target parallel for codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// Check that no target code is emitted if no omptests flag was provided.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-NTARGET
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY2 %s
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix OMP-DEFAULT
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix OMP-DEFAULT
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix OMP-DEfAULT
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix OMP-DEFAULT
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// Test target parallel for codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// Check that no target code is emitted if no omptests flag was provided.
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-NTARGET-OMP-DEFAULT
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY2 %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// We have 7 target regions
// We have 4 initializers, one for the 500 priority, another one for 501, or more for the default priority, and the last one for the offloading registration function.
extern int *R;
struct SA {
int arr[4];
void foo() {
int a = *R;
a += 1;
*R = a;
}
SA() {
int a = *R;
a += 2;
*R = a;
}
~SA() {
int a = *R;
a += 3;
*R = a;
}
};
struct SB {
int arr[8];
void foo() {
int a = *R;
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
a += 4;
*R = a;
}
SB() {
int a = *R;
a += 5;
*R = a;
}
~SB() {
int a = *R;
a += 6;
*R = a;
}
};
struct SC {
int arr[16];
void foo() {
int a = *R;
a += 7;
*R = a;
}
SC() {
int a = *R;
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
a += 8;
*R = a;
}
~SC() {
int a = *R;
a += 9;
*R = a;
}
};
struct SD {
int arr[32];
void foo() {
int a = *R;
a += 10;
*R = a;
}
SD() {
int a = *R;
a += 11;
*R = a;
}
~SD() {
int a = *R;
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
a += 12;
*R = a;
}
};
struct SE {
int arr[64];
void foo() {
int a = *R;
#pragma omp target parallel loop if(target: 0)
for (int i = 0; i < 10; ++i)
a += 13;
*R = a;
}
SE() {
int a = *R;
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
a += 14;
*R = a;
}
~SE() {
int a = *R;
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
a += 15;
*R = a;
}
};
template <int x>
struct ST {
int arr[128 + x];
void foo() {
int a = *R;
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
a += 16 + x;
*R = a;
}
ST() {
int a = *R;
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
a += 17 + x;
*R = a;
}
~ST() {
int a = *R;
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
a += 18 + x;
*R = a;
}
};
// We have to make sure we us all the target regions:
// We have 2 initializers with priority 500
// We have 1 initializers with priority 501
// We have 6 initializers with default priority
static __attribute__((init_priority(500))) SA a1;
SA a2;
SB __attribute__((init_priority(500))) b1;
SB __attribute__((init_priority(501))) b2;
static SC c1;
SD d1;
SE e1;
ST<100> t1;
ST<1000> t2;
int bar(int a){
int r = a;
a1.foo();
a2.foo();
b1.foo();
b2.foo();
c1.foo();
d1.foo();
e1.foo();
t1.foo();
t2.foo();
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
++r;
return r + *R;
}
// Check metadata is properly generated:
#endif
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init
// CHECK-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @_ZL2a1, ptr @__dso_handle) #[[ATTR2:[0-9]+]]
// CHECK-NEXT: ret void
//
//
//
//
//
//
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @a2, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NEXT: ret void
//
//
//
//
//
//
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b2, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @_ZN2SCC1Ev(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SCD1Ev, ptr @_ZL2c1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NEXT: ret void
//
//
//
//
//
//
//
//
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.7
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @_ZN2SDC1Ev(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SDD1Ev, ptr @d1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NEXT: ret void
//
//
//
//
//
//
//
//
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.10
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @_ZN2SEC1Ev(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SED1Ev, ptr @e1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.15
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @_ZN2STILi100EEC1Ev(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi100EED1Ev, ptr @t1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.20
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @_ZN2STILi1000EEC1Ev(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi1000EED1Ev, ptr @t2, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// CHECK-LABEL: define {{[^@]+}}@_GLOBAL__I_000500
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @__cxx_global_var_init()
// CHECK-NEXT: call void @__cxx_global_var_init.2()
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@_GLOBAL__I_000501
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @__cxx_global_var_init.3()
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_parallel_generic_loop_codegen_1.cpp
// CHECK-SAME: () #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @__cxx_global_var_init.1()
// CHECK-NEXT: call void @__cxx_global_var_init.4()
// CHECK-NEXT: call void @__cxx_global_var_init.7()
// CHECK-NEXT: call void @__cxx_global_var_init.10()
// CHECK-NEXT: call void @__cxx_global_var_init.15()
// CHECK-NEXT: call void @__cxx_global_var_init.20()
// CHECK-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init
// SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @_ZL2a1, ptr @__dso_handle) #[[ATTR2:[0-9]+]]
// SIMD-ONLY0-NEXT: ret void
//
//
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @a2, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY0-NEXT: ret void
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY0-NEXT: ret void
//
//
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b2, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY0-NEXT: ret void
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @_ZN2SCC1Ev(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SCD1Ev, ptr @_ZL2c1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY0-NEXT: ret void
//
//
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.5
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @_ZN2SDC1Ev(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SDD1Ev, ptr @d1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY0-NEXT: ret void
//
//
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.6
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @_ZN2SEC1Ev(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SED1Ev, ptr @e1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY0-NEXT: ret void
//
//
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.7
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @_ZN2STILi100EEC1Ev(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi100EED1Ev, ptr @t1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY0-NEXT: ret void
//
//
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.8
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @_ZN2STILi1000EEC1Ev(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi1000EED1Ev, ptr @t2, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY0-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_GLOBAL__I_000500
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init()
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.2()
// SIMD-ONLY0-NEXT: ret void
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_GLOBAL__I_000501
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.3()
// SIMD-ONLY0-NEXT: ret void
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_parallel_generic_loop_codegen_1.cpp
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.1()
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.4()
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.5()
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.6()
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.7()
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.8()
// SIMD-ONLY0-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@__cxx_global_var_init
// SIMD-ONLY1-SAME: () #[[ATTR0:[0-9]+]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// SIMD-ONLY1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @_ZL2a1, ptr @__dso_handle) #[[ATTR2:[0-9]+]]
// SIMD-ONLY1-NEXT: ret void
//
//
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// SIMD-ONLY1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @a2, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY1-NEXT: ret void
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// SIMD-ONLY1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY1-NEXT: ret void
//
//
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// SIMD-ONLY1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b2, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY1-NEXT: ret void
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @_ZN2SCC1Ev(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// SIMD-ONLY1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SCD1Ev, ptr @_ZL2c1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY1-NEXT: ret void
//
//
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@__cxx_global_var_init.5
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @_ZN2SDC1Ev(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// SIMD-ONLY1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SDD1Ev, ptr @d1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY1-NEXT: ret void
//
//
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@__cxx_global_var_init.6
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @_ZN2SEC1Ev(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// SIMD-ONLY1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SED1Ev, ptr @e1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY1-NEXT: ret void
//
//
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@__cxx_global_var_init.7
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @_ZN2STILi100EEC1Ev(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// SIMD-ONLY1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi100EED1Ev, ptr @t1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY1-NEXT: ret void
//
//
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@__cxx_global_var_init.8
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @_ZN2STILi1000EEC1Ev(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// SIMD-ONLY1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi1000EED1Ev, ptr @t2, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY1-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@_GLOBAL__I_000500
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @__cxx_global_var_init()
// SIMD-ONLY1-NEXT: call void @__cxx_global_var_init.2()
// SIMD-ONLY1-NEXT: ret void
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@_GLOBAL__I_000501
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @__cxx_global_var_init.3()
// SIMD-ONLY1-NEXT: ret void
//
//
// SIMD-ONLY1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_parallel_generic_loop_codegen_1.cpp
// SIMD-ONLY1-SAME: () #[[ATTR0]] {
// SIMD-ONLY1-NEXT: entry:
// SIMD-ONLY1-NEXT: call void @__cxx_global_var_init.1()
// SIMD-ONLY1-NEXT: call void @__cxx_global_var_init.4()
// SIMD-ONLY1-NEXT: call void @__cxx_global_var_init.5()
// SIMD-ONLY1-NEXT: call void @__cxx_global_var_init.6()
// SIMD-ONLY1-NEXT: call void @__cxx_global_var_init.7()
// SIMD-ONLY1-NEXT: call void @__cxx_global_var_init.8()
// SIMD-ONLY1-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_Z3bari
// CHECK-NTARGET-SAME: (i32 noundef signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[R:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[R_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[R]], align 4
// CHECK-NTARGET-NEXT: call void @_ZN2SA3fooEv(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// CHECK-NTARGET-NEXT: call void @_ZN2SA3fooEv(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// CHECK-NTARGET-NEXT: call void @_ZN2SB3fooEv(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// CHECK-NTARGET-NEXT: call void @_ZN2SB3fooEv(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// CHECK-NTARGET-NEXT: call void @_ZN2SC3fooEv(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// CHECK-NTARGET-NEXT: call void @_ZN2SD3fooEv(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// CHECK-NTARGET-NEXT: call void @_ZN2SE3fooEv(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// CHECK-NTARGET-NEXT: call void @_ZN2STILi100EE3fooEv(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// CHECK-NTARGET-NEXT: call void @_ZN2STILi1000EE3fooEv(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[R]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[R_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i64, ptr [[R_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267(i64 [[TMP2]]) #[[ATTR2:[0-9]+]]
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[R]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[TMP5]]
// CHECK-NTARGET-NEXT: ret i32 [[ADD]]
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SA3fooEv
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SB3fooEv
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SC3fooEv
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 7
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SD3fooEv
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 10
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SE3fooEv
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi100EE3fooEv
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi1000EE3fooEv
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267
// CHECK-NTARGET-SAME: (i64 noundef [[R:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[R_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[R_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[R]], ptr [[R_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[R_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[R_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[R_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2:[0-9]+]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[R:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[R_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[R]], ptr [[R_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[R_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK-NTARGET-NEXT: store i32 [[INC]], ptr [[R_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@__cxx_global_var_init
// CHECK-NTARGET-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @_ZL2a1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SAC1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SAC2Ev(ptr noundef nonnull align 4 dereferenceable(16) [[THIS1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SAD1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SAD2Ev(ptr noundef nonnull align 4 dereferenceable(16) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SAC2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 2
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SAD2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 3
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @a2, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SBC1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SBC2Ev(ptr noundef nonnull align 4 dereferenceable(32) [[THIS1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SBD1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SBD2Ev(ptr noundef nonnull align 4 dereferenceable(32) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SBC2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 5
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SBD2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 6
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b2, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @_ZN2SCC1Ev(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SCD1Ev, ptr @_ZL2c1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SCC1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SCC2Ev(ptr noundef nonnull align 4 dereferenceable(64) [[THIS1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SCD1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SCD2Ev(ptr noundef nonnull align 4 dereferenceable(64) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SCC2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 8
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SCD2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@__cxx_global_var_init.5
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @_ZN2SDC1Ev(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SDD1Ev, ptr @d1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SDC1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SDC2Ev(ptr noundef nonnull align 4 dereferenceable(128) [[THIS1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SDD1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SDD2Ev(ptr noundef nonnull align 4 dereferenceable(128) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SDC2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 11
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SDD2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 12
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@__cxx_global_var_init.6
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @_ZN2SEC1Ev(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SED1Ev, ptr @e1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SEC1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SEC2Ev(ptr noundef nonnull align 4 dereferenceable(256) [[THIS1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SED1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2SED2Ev(ptr noundef nonnull align 4 dereferenceable(256) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SEC2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 14
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2SED2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 15
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@__cxx_global_var_init.7
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @_ZN2STILi100EEC1Ev(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi100EED1Ev, ptr @t1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi100EEC1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2STILi100EEC2Ev(ptr noundef nonnull align 4 dereferenceable(912) [[THIS1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi100EED1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2STILi100EED2Ev(ptr noundef nonnull align 4 dereferenceable(912) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi100EEC2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 117
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi100EED2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 118
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@__cxx_global_var_init.8
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @_ZN2STILi1000EEC1Ev(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi1000EED1Ev, ptr @t2, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi1000EEC1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2STILi1000EEC2Ev(ptr noundef nonnull align 4 dereferenceable(4512) [[THIS1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi1000EED1Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: call void @_ZN2STILi1000EED2Ev(ptr noundef nonnull align 4 dereferenceable(4512) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi1000EEC2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1017
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_ZN2STILi1000EED2Ev
// CHECK-NTARGET-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1018
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 4
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 13
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 116
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211
// CHECK-NTARGET-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211.omp_outlined
// CHECK-NTARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET: cond.true:
// CHECK-NTARGET-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET: cond.false:
// CHECK-NTARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: br label [[COND_END]]
// CHECK-NTARGET: cond.end:
// CHECK-NTARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET: omp.inner.for.cond:
// CHECK-NTARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET: omp.inner.for.body:
// CHECK-NTARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1016
// CHECK-NTARGET-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET: omp.body.continue:
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET: omp.inner.for.inc:
// CHECK-NTARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET: omp.inner.for.end:
// CHECK-NTARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET: omp.loop.exit:
// CHECK-NTARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_GLOBAL__I_000500
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @__cxx_global_var_init()
// CHECK-NTARGET-NEXT: call void @__cxx_global_var_init.2()
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_GLOBAL__I_000501
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @__cxx_global_var_init.3()
// CHECK-NTARGET-NEXT: ret void
//
//
// CHECK-NTARGET-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_parallel_generic_loop_codegen_1.cpp
// CHECK-NTARGET-SAME: () #[[ATTR3]] {
// CHECK-NTARGET-NEXT: entry:
// CHECK-NTARGET-NEXT: call void @__cxx_global_var_init.1()
// CHECK-NTARGET-NEXT: call void @__cxx_global_var_init.4()
// CHECK-NTARGET-NEXT: call void @__cxx_global_var_init.5()
// CHECK-NTARGET-NEXT: call void @__cxx_global_var_init.6()
// CHECK-NTARGET-NEXT: call void @__cxx_global_var_init.7()
// CHECK-NTARGET-NEXT: call void @__cxx_global_var_init.8()
// CHECK-NTARGET-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@__cxx_global_var_init
// SIMD-ONLY2-SAME: () #[[ATTR0:[0-9]+]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @_ZL2a1, ptr @__dso_handle) #[[ATTR2:[0-9]+]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SAC1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SAC2Ev(ptr noundef nonnull align 4 dereferenceable(16) [[THIS1]])
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SAD1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SAD2Ev(ptr noundef nonnull align 4 dereferenceable(16) [[THIS1]]) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @a2, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SBC1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SBC2Ev(ptr noundef nonnull align 4 dereferenceable(32) [[THIS1]])
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SBD1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SBD2Ev(ptr noundef nonnull align 4 dereferenceable(32) [[THIS1]]) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b2, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @_ZN2SCC1Ev(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SCD1Ev, ptr @_ZL2c1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SCC1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SCC2Ev(ptr noundef nonnull align 4 dereferenceable(64) [[THIS1]])
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SCD1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SCD2Ev(ptr noundef nonnull align 4 dereferenceable(64) [[THIS1]]) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@__cxx_global_var_init.5
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @_ZN2SDC1Ev(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SDD1Ev, ptr @d1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SDC1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SDC2Ev(ptr noundef nonnull align 4 dereferenceable(128) [[THIS1]])
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SDD1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SDD2Ev(ptr noundef nonnull align 4 dereferenceable(128) [[THIS1]]) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@__cxx_global_var_init.6
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @_ZN2SEC1Ev(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SED1Ev, ptr @e1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SEC1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SEC2Ev(ptr noundef nonnull align 4 dereferenceable(256) [[THIS1]])
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SED1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2SED2Ev(ptr noundef nonnull align 4 dereferenceable(256) [[THIS1]]) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@__cxx_global_var_init.7
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @_ZN2STILi100EEC1Ev(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi100EED1Ev, ptr @t1, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi100EEC1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2STILi100EEC2Ev(ptr noundef nonnull align 4 dereferenceable(912) [[THIS1]])
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi100EED1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2STILi100EED2Ev(ptr noundef nonnull align 4 dereferenceable(912) [[THIS1]]) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@__cxx_global_var_init.8
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @_ZN2STILi1000EEC1Ev(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi1000EED1Ev, ptr @t2, ptr @__dso_handle) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi1000EEC1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2STILi1000EEC2Ev(ptr noundef nonnull align 4 dereferenceable(4512) [[THIS1]])
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi1000EED1Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: call void @_ZN2STILi1000EED2Ev(ptr noundef nonnull align 4 dereferenceable(4512) [[THIS1]]) #[[ATTR2]]
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_Z3bari
// SIMD-ONLY2-SAME: (i32 noundef signext [[A:%.*]]) #[[ATTR1]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[R:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP0]], ptr [[R]], align 4
// SIMD-ONLY2-NEXT: call void @_ZN2SA3fooEv(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// SIMD-ONLY2-NEXT: call void @_ZN2SA3fooEv(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// SIMD-ONLY2-NEXT: call void @_ZN2SB3fooEv(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// SIMD-ONLY2-NEXT: call void @_ZN2SB3fooEv(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// SIMD-ONLY2-NEXT: call void @_ZN2SC3fooEv(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// SIMD-ONLY2-NEXT: call void @_ZN2SD3fooEv(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// SIMD-ONLY2-NEXT: call void @_ZN2SE3fooEv(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// SIMD-ONLY2-NEXT: call void @_ZN2STILi100EE3fooEv(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// SIMD-ONLY2-NEXT: call void @_ZN2STILi1000EE3fooEv(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[R]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[R]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC1:%.*]] = add nsw i32 [[TMP3]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC1]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[R]], align 4
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP6]]
// SIMD-ONLY2-NEXT: ret i32 [[ADD]]
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SA3fooEv
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 1
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SB3fooEv
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 4
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SC3fooEv
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 7
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SD3fooEv
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SE3fooEv
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 13
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi100EE3fooEv
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 116
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi1000EE3fooEv
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1016
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SAC2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 2
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SAD2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 3
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SBC2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 5
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SBD2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 6
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SCC2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 8
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SCD2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 9
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SDC2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 11
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SDD2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 12
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SEC2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 14
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2SED2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 15
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi100EEC2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 117
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi100EED2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 118
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi1000EEC2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1017
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_ZN2STILi1000EED2Ev
// SIMD-ONLY2-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD-ONLY2-NEXT: [[A:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// SIMD-ONLY2-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// SIMD-ONLY2-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY2: for.cond:
// SIMD-ONLY2-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// SIMD-ONLY2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY2: for.body:
// SIMD-ONLY2-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1018
// SIMD-ONLY2-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY2: for.inc:
// SIMD-ONLY2-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY2-NEXT: store i32 [[INC]], ptr [[I]], align 4
// SIMD-ONLY2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
// SIMD-ONLY2: for.end:
// SIMD-ONLY2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// SIMD-ONLY2-NEXT: [[TMP6:%.*]] = load ptr, ptr @R, align 8
// SIMD-ONLY2-NEXT: store i32 [[TMP5]], ptr [[TMP6]], align 4
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_GLOBAL__I_000500
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @__cxx_global_var_init()
// SIMD-ONLY2-NEXT: call void @__cxx_global_var_init.2()
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_GLOBAL__I_000501
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @__cxx_global_var_init.3()
// SIMD-ONLY2-NEXT: ret void
//
//
// SIMD-ONLY2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_parallel_generic_loop_codegen_1.cpp
// SIMD-ONLY2-SAME: () #[[ATTR0]] {
// SIMD-ONLY2-NEXT: entry:
// SIMD-ONLY2-NEXT: call void @__cxx_global_var_init.1()
// SIMD-ONLY2-NEXT: call void @__cxx_global_var_init.4()
// SIMD-ONLY2-NEXT: call void @__cxx_global_var_init.5()
// SIMD-ONLY2-NEXT: call void @__cxx_global_var_init.6()
// SIMD-ONLY2-NEXT: call void @__cxx_global_var_init.7()
// SIMD-ONLY2-NEXT: call void @__cxx_global_var_init.8()
// SIMD-ONLY2-NEXT: ret void
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init
// OMP-DEFAULT-SAME: () #[[ATTR0:[0-9]+]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @_ZL2a1, ptr @__dso_handle) #[[ATTR2:[0-9]+]]
// OMP-DEFAULT-NEXT: ret void
//
//
//
//
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @a2, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEFAULT-NEXT: ret void
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEFAULT-NEXT: ret void
//
//
//
//
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b2, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEFAULT-NEXT: ret void
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @_ZN2SCC1Ev(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SCD1Ev, ptr @_ZL2c1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEFAULT-NEXT: ret void
//
//
//
//
//
//
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.5
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @_ZN2SDC1Ev(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SDD1Ev, ptr @d1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEFAULT-NEXT: ret void
//
//
//
//
//
//
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.8
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @_ZN2SEC1Ev(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SED1Ev, ptr @e1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEFAULT-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.13
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @_ZN2STILi100EEC1Ev(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi100EED1Ev, ptr @t1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEFAULT-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.18
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @_ZN2STILi1000EEC1Ev(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi1000EED1Ev, ptr @t2, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEFAULT-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@_GLOBAL__I_000500
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @__cxx_global_var_init()
// OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.2()
// OMP-DEFAULT-NEXT: ret void
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@_GLOBAL__I_000501
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.3()
// OMP-DEFAULT-NEXT: ret void
//
//
// OMP-DEFAULT-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_parallel_generic_loop_codegen_1.cpp
// OMP-DEFAULT-SAME: () #[[ATTR0]] {
// OMP-DEFAULT-NEXT: entry:
// OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.1()
// OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.4()
// OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.5()
// OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.8()
// OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.13()
// OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.18()
// OMP-DEFAULT-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init
// OMP-DEfAULT-SAME: () #[[ATTR0:[0-9]+]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @_ZL2a1, ptr @__dso_handle) #[[ATTR2:[0-9]+]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SAC1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SAC2Ev(ptr noundef nonnull align 4 dereferenceable(16) [[THIS1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SAD1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SAD2Ev(ptr noundef nonnull align 4 dereferenceable(16) [[THIS1]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SAC2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 2
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SAD2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 3
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @a2, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SBC1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SBC2Ev(ptr noundef nonnull align 4 dereferenceable(32) [[THIS1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SBD1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SBD2Ev(ptr noundef nonnull align 4 dereferenceable(32) [[THIS1]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SBC2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 5
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SBD2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 6
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b2, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @_ZN2SCC1Ev(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SCD1Ev, ptr @_ZL2c1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SCC1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SCC2Ev(ptr noundef nonnull align 4 dereferenceable(64) [[THIS1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SCD1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SCD2Ev(ptr noundef nonnull align 4 dereferenceable(64) [[THIS1]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SCC2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3:[0-9]+]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 8
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SCD2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.5
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @_ZN2SDC1Ev(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SDD1Ev, ptr @d1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SDC1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SDC2Ev(ptr noundef nonnull align 4 dereferenceable(128) [[THIS1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SDD1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SDD2Ev(ptr noundef nonnull align 4 dereferenceable(128) [[THIS1]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SDC2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 11
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SDD2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.6, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.7, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 12
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.8
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @_ZN2SEC1Ev(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SED1Ev, ptr @e1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SEC1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SEC2Ev(ptr noundef nonnull align 4 dereferenceable(256) [[THIS1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SED1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SED2Ev(ptr noundef nonnull align 4 dereferenceable(256) [[THIS1]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SEC2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.9, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 14
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SED2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.11, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 15
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.13
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @_ZN2STILi100EEC1Ev(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi100EED1Ev, ptr @t1, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EEC1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2STILi100EEC2Ev(ptr noundef nonnull align 4 dereferenceable(912) [[THIS1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EED1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2STILi100EED2Ev(ptr noundef nonnull align 4 dereferenceable(912) [[THIS1]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EEC2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.14, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.15, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 117
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EED2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.16, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.17, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 118
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.18
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @_ZN2STILi1000EEC1Ev(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi1000EED1Ev, ptr @t2, ptr @__dso_handle) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EEC1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2STILi1000EEC2Ev(ptr noundef nonnull align 4 dereferenceable(4512) [[THIS1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EED1Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2STILi1000EED2Ev(ptr noundef nonnull align 4 dereferenceable(4512) [[THIS1]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EEC2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.19, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.20, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1017
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EED2Ev
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.21, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.22, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1018
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_Z3bari
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR1]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[R:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[R_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[R]], align 4
// OMP-DEfAULT-NEXT: call void @_ZN2SA3fooEv(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// OMP-DEfAULT-NEXT: call void @_ZN2SA3fooEv(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// OMP-DEfAULT-NEXT: call void @_ZN2SB3fooEv(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// OMP-DEfAULT-NEXT: call void @_ZN2SB3fooEv(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// OMP-DEfAULT-NEXT: call void @_ZN2SC3fooEv(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// OMP-DEfAULT-NEXT: call void @_ZN2SD3fooEv(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// OMP-DEfAULT-NEXT: call void @_ZN2SE3fooEv(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// OMP-DEfAULT-NEXT: call void @_ZN2STILi100EE3fooEv(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// OMP-DEfAULT-NEXT: call void @_ZN2STILi1000EE3fooEv(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[R]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[R_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[R_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP8]], align 4
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.23, ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.24, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP16]], align 8
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP18]], align 4
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267(i32 [[TMP2]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = load i32, ptr [[R]], align 4
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP25]]
// OMP-DEfAULT-NEXT: ret i32 [[ADD]]
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SA3fooEv
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SB3fooEv
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.25, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.26, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SC3fooEv
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 7
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SD3fooEv
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 10
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2SE3fooEv
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EE3fooEv
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.27, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.28, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EE3fooEv
// OMP-DEfAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// OMP-DEfAULT-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// OMP-DEfAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP6]], align 4
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// OMP-DEfAULT-NEXT: store i32 3, ptr [[TMP9]], align 4
// OMP-DEfAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// OMP-DEfAULT-NEXT: store i32 1, ptr [[TMP10]], align 4
// OMP-DEfAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// OMP-DEfAULT-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4
// OMP-DEfAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// OMP-DEfAULT-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4
// OMP-DEfAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// OMP-DEfAULT-NEXT: store ptr @.offload_sizes.29, ptr [[TMP13]], align 4
// OMP-DEfAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// OMP-DEfAULT-NEXT: store ptr @.offload_maptypes.30, ptr [[TMP14]], align 4
// OMP-DEfAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP15]], align 4
// OMP-DEfAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// OMP-DEfAULT-NEXT: store ptr null, ptr [[TMP16]], align 4
// OMP-DEfAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP17]], align 8
// OMP-DEfAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// OMP-DEfAULT-NEXT: store i64 0, ptr [[TMP18]], align 8
// OMP-DEfAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// OMP-DEfAULT-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP19]], align 4
// OMP-DEfAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// OMP-DEfAULT-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP20]], align 4
// OMP-DEfAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// OMP-DEfAULT-NEXT: store i32 0, ptr [[TMP21]], align 4
// OMP-DEfAULT-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211.region_id, ptr [[KERNEL_ARGS]])
// OMP-DEfAULT-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// OMP-DEfAULT-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// OMP-DEfAULT: omp_offload.failed:
// OMP-DEfAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211(i32 [[TMP3]]) #[[ATTR2]]
// OMP-DEfAULT-NEXT: br label [[OMP_OFFLOAD_CONT]]
// OMP-DEfAULT: omp_offload.cont:
// OMP-DEfAULT-NEXT: [[TMP24:%.*]] = load i32, ptr [[A]], align 4
// OMP-DEfAULT-NEXT: [[TMP25:%.*]] = load ptr, ptr @R, align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP24]], ptr [[TMP25]], align 4
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267
// OMP-DEfAULT-SAME: (i32 noundef [[R:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[R_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[R_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[R]], ptr [[R_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[R_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[R_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[R_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[R:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[R_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[R]], ptr [[R_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[R_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// OMP-DEfAULT-NEXT: store i32 [[INC]], ptr [[R_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 4
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 13
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 116
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211
// OMP-DEfAULT-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// OMP-DEfAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211.omp_outlined, i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211.omp_outlined
// OMP-DEfAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// OMP-DEfAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// OMP-DEfAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// OMP-DEfAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// OMP-DEfAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// OMP-DEfAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// OMP-DEfAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// OMP-DEfAULT: cond.true:
// OMP-DEfAULT-NEXT: br label [[COND_END:%.*]]
// OMP-DEfAULT: cond.false:
// OMP-DEfAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: br label [[COND_END]]
// OMP-DEfAULT: cond.end:
// OMP-DEfAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// OMP-DEfAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// OMP-DEfAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// OMP-DEfAULT: omp.inner.for.cond:
// OMP-DEfAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// OMP-DEfAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// OMP-DEfAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// OMP-DEfAULT: omp.inner.for.body:
// OMP-DEfAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// OMP-DEfAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// OMP-DEfAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// OMP-DEfAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1016
// OMP-DEfAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// OMP-DEfAULT: omp.body.continue:
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// OMP-DEfAULT: omp.inner.for.inc:
// OMP-DEfAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// OMP-DEfAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// OMP-DEfAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// OMP-DEfAULT: omp.inner.for.end:
// OMP-DEfAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// OMP-DEfAULT: omp.loop.exit:
// OMP-DEfAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_GLOBAL__I_000500
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @__cxx_global_var_init()
// OMP-DEfAULT-NEXT: call void @__cxx_global_var_init.2()
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_GLOBAL__I_000501
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @__cxx_global_var_init.3()
// OMP-DEfAULT-NEXT: ret void
//
//
// OMP-DEfAULT-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_parallel_generic_loop_codegen_1.cpp
// OMP-DEfAULT-SAME: () #[[ATTR0]] {
// OMP-DEfAULT-NEXT: entry:
// OMP-DEfAULT-NEXT: call void @__cxx_global_var_init.1()
// OMP-DEfAULT-NEXT: call void @__cxx_global_var_init.4()
// OMP-DEfAULT-NEXT: call void @__cxx_global_var_init.5()
// OMP-DEfAULT-NEXT: call void @__cxx_global_var_init.8()
// OMP-DEfAULT-NEXT: call void @__cxx_global_var_init.13()
// OMP-DEfAULT-NEXT: call void @__cxx_global_var_init.18()
// OMP-DEfAULT-NEXT: ret void
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @_ZL2a1, ptr @__dso_handle) #[[ATTR2:[0-9]+]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SAC1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SAC2Ev(ptr noundef nonnull align 4 dereferenceable(16) [[THIS1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SAD1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SAD2Ev(ptr noundef nonnull align 4 dereferenceable(16) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SAC2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 2
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SAD2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 3
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SAC1Ev(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SAD1Ev, ptr @a2, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SBC1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SBC2Ev(ptr noundef nonnull align 4 dereferenceable(32) [[THIS1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SBD1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SBD2Ev(ptr noundef nonnull align 4 dereferenceable(32) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SBC2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 5
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SBD2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 6
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SBC1Ev(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SBD1Ev, ptr @b2, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SCC1Ev(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SCD1Ev, ptr @_ZL2c1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SCC1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SCC2Ev(ptr noundef nonnull align 4 dereferenceable(64) [[THIS1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SCD1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SCD2Ev(ptr noundef nonnull align 4 dereferenceable(64) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SCC2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2:[0-9]+]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SCC1Ev_l148.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SCD2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.5
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SDC1Ev(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SDD1Ev, ptr @d1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SDC1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SDC2Ev(ptr noundef nonnull align 4 dereferenceable(128) [[THIS1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SDD1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SDD2Ev(ptr noundef nonnull align 4 dereferenceable(128) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SDC2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 11
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SDD2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SDD1Ev_l174.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 12
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.6
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SEC1Ev(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2SED1Ev, ptr @e1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SEC1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SEC2Ev(ptr noundef nonnull align 4 dereferenceable(256) [[THIS1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SED1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SED2Ev(ptr noundef nonnull align 4 dereferenceable(256) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SEC2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SEC1Ev_l192.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 14
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SED2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SED1Ev_l199.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 15
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.7
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2STILi100EEC1Ev(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi100EED1Ev, ptr @t1, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EEC1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2STILi100EEC2Ev(ptr noundef nonnull align 4 dereferenceable(912) [[THIS1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EED1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2STILi100EED2Ev(ptr noundef nonnull align 4 dereferenceable(912) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EEC2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EEC1Ev_l218.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 117
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EED2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EED1Ev_l225.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 118
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@__cxx_global_var_init.8
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2STILi1000EEC1Ev(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2STILi1000EED1Ev, ptr @t2, ptr @__dso_handle) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EEC1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2STILi1000EEC2Ev(ptr noundef nonnull align 4 dereferenceable(4512) [[THIS1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EED1Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2STILi1000EED2Ev(ptr noundef nonnull align 4 dereferenceable(4512) [[THIS1]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EEC2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EEC1Ev_l218.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1017
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EED2Ev
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EED1Ev_l225.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1018
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_Z3bari
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i32 noundef signext [[A:%.*]]) #[[ATTR1]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[R:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[R_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[R]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SA3fooEv(ptr noundef nonnull align 4 dereferenceable(16) @_ZL2a1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SA3fooEv(ptr noundef nonnull align 4 dereferenceable(16) @a2)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SB3fooEv(ptr noundef nonnull align 4 dereferenceable(32) @b1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SB3fooEv(ptr noundef nonnull align 4 dereferenceable(32) @b2)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SC3fooEv(ptr noundef nonnull align 4 dereferenceable(64) @_ZL2c1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SD3fooEv(ptr noundef nonnull align 4 dereferenceable(128) @d1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2SE3fooEv(ptr noundef nonnull align 4 dereferenceable(256) @e1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2STILi100EE3fooEv(ptr noundef nonnull align 4 dereferenceable(912) @t1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @_ZN2STILi1000EE3fooEv(ptr noundef nonnull align 4 dereferenceable(4512) @t2)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[R]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[R_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i64, ptr [[R_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267(i64 [[TMP2]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[R]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[TMP5]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret i32 [[ADD]]
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SA3fooEv
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(16) [[THIS:%.*]]) #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SB3fooEv
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(32) [[THIS:%.*]]) #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SC3fooEv
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(64) [[THIS:%.*]]) #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 7
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SD3fooEv
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(128) [[THIS:%.*]]) #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 10
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2SE3fooEv
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[THIS:%.*]]) #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi100EE3fooEv
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(912) [[THIS:%.*]]) #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_ZN2STILi1000EE3fooEv
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noundef nonnull align 4 dereferenceable(4512) [[THIS:%.*]]) #[[ATTR1]] comdat {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP1]], ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211(i64 [[TMP3]]) #[[ATTR2]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load ptr, ptr @R, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[R:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[R_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[R_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[R]], ptr [[R_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[R_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[R_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[R_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l267.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[R:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[R_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[R]], ptr [[R_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[R_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[INC]], ptr [[R_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SB3fooEv_l122.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SE3fooEv_l185.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 13
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi100EE3fooEv_l211.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 116
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211
// CHECK-NTARGET-OMP-DEFAULT-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211.omp_outlined, i64 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2STILi1000EE3fooEv_l211.omp_outlined
// CHECK-NTARGET-OMP-DEFAULT-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.true:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: cond.false:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[COND_END]]
// CHECK-NTARGET-OMP-DEFAULT: cond.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.cond:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.body:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1016
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.body.continue:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.inc:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NTARGET-OMP-DEFAULT-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-NTARGET-OMP-DEFAULT: omp.inner.for.end:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-NTARGET-OMP-DEFAULT: omp.loop.exit:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_GLOBAL__I_000500
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__cxx_global_var_init()
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.2()
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_GLOBAL__I_000501
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.3()
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//
// CHECK-NTARGET-OMP-DEFAULT-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_parallel_generic_loop_codegen_1.cpp
// CHECK-NTARGET-OMP-DEFAULT-SAME: () #[[ATTR0]] {
// CHECK-NTARGET-OMP-DEFAULT-NEXT: entry:
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.1()
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.4()
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.5()
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.6()
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.7()
// CHECK-NTARGET-OMP-DEFAULT-NEXT: call void @__cxx_global_var_init.8()
// CHECK-NTARGET-OMP-DEFAULT-NEXT: ret void
//
//// NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
// TCHECK: {{.*}}