I used a script to reuse existing check lines rather than creating new ones. There are more opportunities to reduce the line count but the "check generated functions" logic makes that somewhat tricky. FWIW, we really should redo the update script with all these use cases in mind... Differential Revision: https://reviews.llvm.org/D128686
283 lines
19 KiB
C++
283 lines
19 KiB
C++
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
|
|
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -DOMP5 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
|
|
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -DOMP5 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -DOMP5 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
|
|
|
|
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -DOMP5 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -DOMP5 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -DOMP5 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// expected-no-diagnostics
|
|
|
|
#ifndef HEADER
|
|
#define HEADER
|
|
|
|
int main() {
|
|
int a = 0;
|
|
#pragma omp parallel for lastprivate(conditional: a)
|
|
for (int i = 0; i < 10; ++i) {
|
|
if (i < 5) {
|
|
a = 0;
|
|
#pragma omp parallel reduction(+:a) num_threads(10)
|
|
a += i;
|
|
#pragma omp atomic
|
|
a += i;
|
|
#pragma omp parallel num_threads(10)
|
|
#pragma omp atomic
|
|
a += i;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif // HEADER
|
|
// CHECK1-LABEL: define {{[^@]+}}@main
|
|
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
|
// CHECK1-NEXT: store i32 0, i32* [[A]], align 4
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]])
|
|
// CHECK1-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[A1:%.*]] = alloca [[STRUCT_LASPRIVATE_CONDITIONAL:%.*]], align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL]], %struct.lasprivate.conditional* [[A1]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i8 0, i8* [[TMP1]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL]], %struct.lasprivate.conditional* [[A1]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp slt i32 [[TMP11]], 5
|
|
// CHECK1-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
|
|
// CHECK1: if.then:
|
|
// CHECK1-NEXT: store i32 0, i32* [[TMP2]], align 4
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = icmp sle i32 [[TMP13]], [[TMP12]]
|
|
// CHECK1-NEXT: br i1 [[TMP14]], label [[LP_COND_THEN:%.*]], label [[LP_COND_EXIT:%.*]]
|
|
// CHECK1: lp_cond_then:
|
|
// CHECK1-NEXT: store i32 [[TMP12]], i32* @.{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP2]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP15]], i32* @{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: br label [[LP_COND_EXIT]]
|
|
// CHECK1: lp_cond_exit:
|
|
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
|
|
// CHECK1-NEXT: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 10)
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[TMP2]], i32* [[I]])
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = icmp sle i32 [[TMP17]], [[TMP16]]
|
|
// CHECK1-NEXT: br i1 [[TMP18]], label [[LP_COND_THEN4:%.*]], label [[LP_COND_EXIT5:%.*]]
|
|
// CHECK1: lp_cond_then4:
|
|
// CHECK1-NEXT: store i32 [[TMP16]], i32* @.{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP2]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP19]], i32* @{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: br label [[LP_COND_EXIT5]]
|
|
// CHECK1: lp_cond_exit5:
|
|
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP20]] monotonic, align 4
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = icmp sle i32 [[TMP23]], [[TMP22]]
|
|
// CHECK1-NEXT: br i1 [[TMP24]], label [[LP_COND_THEN6:%.*]], label [[LP_COND_EXIT7:%.*]]
|
|
// CHECK1: lp_cond_then6:
|
|
// CHECK1-NEXT: store i32 [[TMP22]], i32* @.{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP2]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP25]], i32* @{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: br label [[LP_COND_EXIT7]]
|
|
// CHECK1: lp_cond_exit7:
|
|
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
|
|
// CHECK1-NEXT: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 10)
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP2]], i32* [[I]])
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL]], %struct.lasprivate.conditional* [[A1]], i32 0, i32 1
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = load i8, i8* [[TMP26]], align 4
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i8 [[TMP27]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP28]], label [[LPC_THEN:%.*]], label [[LPC_DONE:%.*]]
|
|
// CHECK1: lpc.then:
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = icmp sle i32 [[TMP30]], [[TMP29]]
|
|
// CHECK1-NEXT: br i1 [[TMP31]], label [[LP_COND_THEN8:%.*]], label [[LP_COND_EXIT9:%.*]]
|
|
// CHECK1: lp_cond_then8:
|
|
// CHECK1-NEXT: store i32 [[TMP29]], i32* @.{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP2]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP32]], i32* @{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: br label [[LP_COND_EXIT9]]
|
|
// CHECK1: lp_cond_exit9:
|
|
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
|
|
// CHECK1-NEXT: br label [[LPC_DONE]]
|
|
// CHECK1: lpc.done:
|
|
// CHECK1-NEXT: br label [[IF_END]]
|
|
// CHECK1: if.end:
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP33]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
|
|
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP4]])
|
|
// CHECK1-NEXT: br i1 [[TMP35]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
|
|
// CHECK1: .omp.lastprivate.then:
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* @{{pl_cond[.].+[.|,]}} align 4
|
|
// CHECK1-NEXT: store i32 [[TMP36]], i32* [[TMP2]], align 4
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP2]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP37]], i32* [[TMP0]], align 4
|
|
// CHECK1-NEXT: br label [[DOTOMP_LASTPRIVATE_DONE]]
|
|
// CHECK1: .omp.lastprivate.done:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[I_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[A1]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[A1]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], i32* [[A1]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i32* [[A1]] to i8*
|
|
// CHECK1-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP7]], i32 1, i64 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[A1]], align 4
|
|
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
|
|
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[A1]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP12]] monotonic, align 4
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
|
|
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
|
|
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[I_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP2]] monotonic, align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP0]] to %struct.lasprivate.conditional*
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL:%.*]], %struct.lasprivate.conditional* [[TMP4]], i32 0, i32 1
|
|
// CHECK1-NEXT: store atomic volatile i8 1, i8* [[TMP5]] unordered, align 1
|
|
// CHECK1-NEXT: ret void
|
|
//
|