koops updated this revision to Diff 529532. koops added a comment. 1. Taken care of Alexy's comments. 2. Reverting back changes to StmtPrinter.cpp because state of AST needs to be shown as is to the developer of clang when -ast-print/-ast-dump is used. This also meant some changes to the test cases to reflect the Mapped directives (from "omp loop" to "omp for", "omp distribute" or "omp simd".
CHANGES SINCE LAST ACTION https://reviews.llvm.org/D144634/new/ https://reviews.llvm.org/D144634 Files: clang/include/clang/Basic/DiagnosticSemaKinds.td clang/include/clang/Sema/Sema.h clang/lib/Sema/SemaOpenMP.cpp clang/test/OpenMP/generic_loop_ast_print.cpp clang/test/OpenMP/loop_bind_codegen.cpp clang/test/OpenMP/loop_bind_enclosed.cpp clang/test/OpenMP/loop_bind_messages.cpp clang/test/OpenMP/nested_loop_codegen.cpp
Index: clang/test/OpenMP/nested_loop_codegen.cpp =================================================================== --- clang/test/OpenMP/nested_loop_codegen.cpp +++ clang/test/OpenMP/nested_loop_codegen.cpp @@ -58,6 +58,12 @@ // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 @@ -66,35 +72,27 @@ // CHECK1-NEXT: store i32 0, ptr [[TMP0]], align 4 // CHECK1-NEXT: br label [[FOR_COND:%.*]] // CHECK1: for.cond: -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 10 -// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END7:%.*]] // CHECK1: for.body: -// CHECK1-NEXT: store i32 0, ptr [[K]], align 4 -// CHECK1-NEXT: br label [[FOR_COND1:%.*]] -// CHECK1: for.cond1: -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[K]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP2]], 5 -// CHECK1-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END:%.*]] -// CHECK1: for.body3: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[K]], align 4 -// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP3]], 1 -// CHECK1-NEXT: store i32 [[INC]], ptr [[K]], align 4 -// CHECK1-NEXT: br label [[FOR_INC:%.*]] -// CHECK1: for.inc: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[K]], align 4 -// CHECK1-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK1-NEXT: store i32 [[INC4]], ptr [[K]], align 4 -// CHECK1-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP3:![0-9]+]] -// CHECK1: for.end: -// CHECK1-NEXT: br label [[FOR_INC5:%.*]] -// CHECK1: for.inc5: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[INC6:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK1-NEXT: store i32 [[INC6]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] -// CHECK1: for.end7: -// CHECK1-NEXT: ret void +// CHECK1-NEXT [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT call void @__kmpc_for_static_init_4(ptr @1, i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +//CHECK1 cond.end: +//CHECK1 omp.inner.for.cond: +//CHECK1 omp.inner.for.body: +//CHECK1 omp.body.continue: +//CHECK1 omp.inner.for.inc: +//CHECK1 omp.inner.for.end: +//CHECK1 omp.loop.exit: +// CHECK1-NEXT [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT [[TMP14:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK1-NEXT call void @__kmpc_for_static_fini(ptr @1, i32 [[TMP14]]) +// CHECK1-NEXT [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 +// CHECK1-NEXT call void @__kmpc_barrier(ptr @2, i32 [[TMP16]]) +//CHECK1 for.inc: +//CHECK1 for.end: +// CHECK1-NEXT ret void +// // // // CHECK1-LABEL: define {{[^@]+}}@_Z11inline_declv @@ -114,45 +112,36 @@ // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: [[RES_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[RES]], ptr [[RES_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[RES_ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP0]], align 4 -// CHECK1-NEXT: br label [[FOR_COND:%.*]] -// CHECK1: for.cond: -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10 -// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END7:%.*]] -// CHECK1: for.body: -// CHECK1-NEXT: store i32 0, ptr [[K]], align 4 -// CHECK1-NEXT: br label [[FOR_COND1:%.*]] -// CHECK1: for.cond1: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[K]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP3]], 5 -// CHECK1-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END:%.*]] -// CHECK1: for.body3: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK1-NEXT: store i32 [[INC]], ptr [[TMP1]], align 4 -// CHECK1-NEXT: br label [[FOR_INC:%.*]] -// CHECK1: for.inc: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[K]], align 4 -// CHECK1-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK1-NEXT: store i32 [[INC4]], ptr [[K]], align 4 -// CHECK1-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP8:![0-9]+]] -// CHECK1: for.end: -// CHECK1-NEXT: br label [[FOR_INC5:%.*]] -// CHECK1: for.inc5: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[INC6:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK1-NEXT: store i32 [[INC6]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] -// CHECK1: for.end7: -// CHECK1-NEXT: ret void +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1: for.cond: +// CHECK1: for.body: +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 4, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @1, i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1: omp.inner.for.cond: +// CHECK1: omp.inner.for.body: +// CHECK1: omp.body.continue: +// CHECK1: omp.inner.for.inc: +// CHECK1: omp.inner.for.end: +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @1, i32 [[TMP14]]) +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 +// CHECK1-NEXT: call void @__kmpc_barrier(ptr @2, i32 [[TMP16]]) +// CHECK1: for.inc: +// CHECK1: for.end: +// CHECK1-NEXT: ret void // // // CHECK2-LABEL: define {{[^@]+}}@_Z12outline_declv @@ -173,6 +162,12 @@ // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTGLOBAL_TID__ADDR]], metadata [[META27:![0-9]+]], metadata !DIExpression()), !dbg [[DBG28:![0-9]+]] @@ -180,39 +175,24 @@ // CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTBOUND_TID__ADDR]], metadata [[META29:![0-9]+]], metadata !DIExpression()), !dbg [[DBG28]] // CHECK2-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[I_ADDR]], metadata [[META30:![0-9]+]], metadata !DIExpression()), !dbg [[DBG31:![0-9]+]] -// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8, !dbg [[DBG32:![0-9]+]] -// CHECK2-NEXT: store i32 0, ptr [[TMP0]], align 4, !dbg [[DBG33:![0-9]+]] -// CHECK2-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG35:![0-9]+]] -// CHECK2: for.cond: -// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !dbg [[DBG36:![0-9]+]] -// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 10, !dbg [[DBG38:![0-9]+]] -// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END7:%.*]], !dbg [[DBG39:![0-9]+]] // CHECK2: for.body: -// CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[K]], metadata [[META40:![0-9]+]], metadata !DIExpression()), !dbg [[DBG43:![0-9]+]] -// CHECK2-NEXT: store i32 0, ptr [[K]], align 4, !dbg [[DBG44:![0-9]+]] -// CHECK2-NEXT: br label [[FOR_COND1:%.*]], !dbg [[DBG46:![0-9]+]] -// CHECK2: for.cond1: -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[K]], align 4, !dbg [[DBG47:![0-9]+]] -// CHECK2-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP2]], 5, !dbg [[DBG49:![0-9]+]] -// CHECK2-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END:%.*]], !dbg [[DBG50:![0-9]+]] -// CHECK2: for.body3: -// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[K]], align 4, !dbg [[DBG51:![0-9]+]] -// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP3]], 1, !dbg [[DBG51]] -// CHECK2-NEXT: store i32 [[INC]], ptr [[K]], align 4, !dbg [[DBG51]] -// CHECK2-NEXT: br label [[FOR_INC:%.*]], !dbg [[DBG53:![0-9]+]] +// CHECK2: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !dbg !50 +// CHECK2: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !dbg !50 +// CHECK2: call void @__kmpc_for_static_init_4(ptr @1, i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1), !dbg !51 +// CHECK2: omp.inner.for.cond: +// CHECK2: omp.inner.for.body: +// CHECK2: omp.body.continue: +// CHECK2: omp.inner.for.inc: +// CHECK2: omp.inner.for.end: +// CHECK2: omp.loop.exit: +// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !dbg !51 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !dbg !51 +// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @3, i32 [[TMP13]]), !dbg !58 +// CHECK2-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !dbg !58 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg !58 +// CHECK2-NEXT: call void @__kmpc_barrier(ptr @4, i32 [[TMP15]]), !dbg !58 // CHECK2: for.inc: -// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[K]], align 4, !dbg [[DBG54:![0-9]+]] -// CHECK2-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP4]], 1, !dbg [[DBG54]] -// CHECK2-NEXT: store i32 [[INC4]], ptr [[K]], align 4, !dbg [[DBG54]] -// CHECK2-NEXT: br label [[FOR_COND1]], !dbg [[DBG55:![0-9]+]], !llvm.loop [[LOOP56:![0-9]+]] // CHECK2: for.end: -// CHECK2-NEXT: br label [[FOR_INC5:%.*]], !dbg [[DBG59:![0-9]+]] -// CHECK2: for.inc5: -// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !dbg [[DBG60:![0-9]+]] -// CHECK2-NEXT: [[INC6:%.*]] = add nsw i32 [[TMP5]], 1, !dbg [[DBG60]] -// CHECK2-NEXT: store i32 [[INC6]], ptr [[TMP0]], align 4, !dbg [[DBG60]] -// CHECK2-NEXT: br label [[FOR_COND]], !dbg [[DBG61:![0-9]+]], !llvm.loop [[LOOP62:![0-9]+]] -// CHECK2: for.end7: // CHECK2-NEXT: ret void, !dbg [[DBG64:![0-9]+]] // // @@ -255,6 +235,12 @@ // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: [[RES_ADDR:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTGLOBAL_TID__ADDR]], metadata [[META84:![0-9]+]], metadata !DIExpression()), !dbg [[DBG85:![0-9]+]] @@ -273,32 +259,31 @@ // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10, !dbg [[DBG97:![0-9]+]] // CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END7:%.*]], !dbg [[DBG98:![0-9]+]] // CHECK2: for.body: -// CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[K]], metadata [[META99:![0-9]+]], metadata !DIExpression()), !dbg [[DBG103:![0-9]+]] -// CHECK2-NEXT: store i32 0, ptr [[K]], align 4, !dbg [[DBG103]] -// CHECK2-NEXT: br label [[FOR_COND1:%.*]], !dbg [[DBG104:![0-9]+]] -// CHECK2: for.cond1: -// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[K]], align 4, !dbg [[DBG105:![0-9]+]] -// CHECK2-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP3]], 5, !dbg [[DBG107:![0-9]+]] -// CHECK2-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END:%.*]], !dbg [[DBG108:![0-9]+]] -// CHECK2: for.body3: -// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4, !dbg [[DBG109:![0-9]+]] -// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1, !dbg [[DBG109]] -// CHECK2-NEXT: store i32 [[INC]], ptr [[TMP1]], align 4, !dbg [[DBG109]] -// CHECK2-NEXT: br label [[FOR_INC:%.*]], !dbg [[DBG111:![0-9]+]] +// CHECK2: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG103:![0-9]+]] +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !dbg [[DBG103:![0-9]+]] +// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @8, i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1), !dbg [[DBG103:![0-9]+]] +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !dbg [[DBG103:![0-9]+]] +// CHECK2-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP5]], 4, !dbg [[DBG103:![0-9]+]] +// CHECK2: omp.inner.for.cond: +// CHECK2: omp.inner.for.body: +// CHECK2: omp.body.continue: +// CHECK2: omp.inner.for.inc: +// CHECK2: omp.inner.for.end: +// CHECK2: omp.loop.exit: +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !dbg !111 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !dbg !111 +// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @10, i32 [[TMP14]]), !dbg !118 +// CHECK2-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !dbg !118 +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !dbg !118 +// CHECK2-NEXT: call void @__kmpc_barrier(ptr @11, i32 [[TMP16]]), !dbg !118 +// CHECK2-NEXT br label [[FOR_INC]], !dbg !119 // CHECK2: for.inc: -// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[K]], align 4, !dbg [[DBG112:![0-9]+]] -// CHECK2-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP5]], 1, !dbg [[DBG112]] -// CHECK2-NEXT: store i32 [[INC4]], ptr [[K]], align 4, !dbg [[DBG112]] -// CHECK2-NEXT: br label [[FOR_COND1]], !dbg [[DBG113:![0-9]+]], !llvm.loop [[LOOP114:![0-9]+]] +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP0]], align 4, !dbg [[DBG112:![0-9]+]] +// CHECK2-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP17]], 1, !dbg [[DBG112]] +// CHECK2-NEXT: store i32 [[INC4]], ptr [[TMP0]], align 4, !dbg [[DBG112]] +// CHECK2-NEXT: br label [[FOR_COND]], !dbg [[DBG113:![0-9]+]], !llvm.loop [[DBG113:![0-9]+]] // CHECK2: for.end: -// CHECK2-NEXT: br label [[FOR_INC5:%.*]], !dbg [[DBG116:![0-9]+]] -// CHECK2: for.inc5: -// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP0]], align 4, !dbg [[DBG117:![0-9]+]] -// CHECK2-NEXT: [[INC6:%.*]] = add nsw i32 [[TMP6]], 1, !dbg [[DBG117]] -// CHECK2-NEXT: store i32 [[INC6]], ptr [[TMP0]], align 4, !dbg [[DBG117]] -// CHECK2-NEXT: br label [[FOR_COND]], !dbg [[DBG118:![0-9]+]], !llvm.loop [[LOOP119:![0-9]+]] -// CHECK2: for.end7: -// CHECK2-NEXT: ret void, !dbg [[DBG121:![0-9]+]] +// CHECK2-NEXT: ret void, !dbg [[DBG114:![0-9]+]] // // // CHECK2-LABEL: define {{[^@]+}}@_Z11inline_declv.omp_outlined @@ -362,10 +347,14 @@ // CHECK3-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8 // CHECK3-NEXT: [[AGG_CAPTURED1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4 // CHECK3-NEXT: [[DOTCOUNT_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[P_LASTITER:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[P_LOWERBOUND:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[P_UPPERBOUND:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[P_STRIDE:%.*]] = alloca i32, align 4 // CHECK3-NEXT: br label [[OMP_PAR_REGION:%.*]] // CHECK3: omp.par.region: // CHECK3-NEXT: store i32 0, ptr [[LOADGEP_I]], align 4 -// CHECK3-NEXT: br label [[FOR_COND:%.*]] +// CHECK3-NEXT: br label [[FOR_COND:]] // CHECK3: for.cond: // CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[LOADGEP_I]], align 4 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10 @@ -387,26 +376,35 @@ // CHECK3-NEXT: [[DOTCOUNT:%.*]] = load i32, ptr [[DOTCOUNT_ADDR]], align 4 // CHECK3-NEXT: br label [[OMP_LOOP_PREHEADER:%.*]] // CHECK3: omp_loop.preheader: +// CHECK3-NEXT: store i32 0, ptr [[P_LOWERBOUND]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = sub i32 [[DOTCOUNT]], 1 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[P_UPPERBOUND]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[P_STRIDE]], align 4 +// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @1) +// CHECK3-NEXT: call void @__kmpc_for_static_init_4u(ptr @1, i32 [[OMP_GLOBAL_THREAD_NUM2]], i32 34, ptr [[P_LASTITER]], ptr [[P_LOWERBOUND]], ptr [[P_UPPERBOUND]], ptr [[P_STRIDE]], i32 1, i32 0) +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[P_LOWERBOUND]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[P_UPPERBOUND]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = sub i32 [[TMP8]], [[TMP7]] +// CHECK3-NEXT: [[TMP10:%.*]] = add i32 [[TMP9]], 1 // CHECK3-NEXT: br label [[OMP_LOOP_HEADER:%.*]] // CHECK3: omp_loop.header: // CHECK3-NEXT: [[OMP_LOOP_IV:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER]] ], [ [[OMP_LOOP_NEXT:%.*]], [[OMP_LOOP_INC:%.*]] ] // CHECK3-NEXT: br label [[OMP_LOOP_COND:%.*]] // CHECK3: omp_loop.cond: -// CHECK3-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_LOOP_IV]], [[DOTCOUNT]] +// CHECK3-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_LOOP_IV]], [[TMP10]] // CHECK3-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp_loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @1, i32 [[OMP_GLOBAL_THREAD_NUM2]]) +// CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @1) +// CHECK3-NEXT: call void @__kmpc_barrier(ptr @2, i32 [[OMP_GLOBAL_THREAD_NUM3]]) // CHECK3-NEXT: br label [[OMP_LOOP_AFTER:%.*]] // CHECK3: omp_loop.after: -// CHECK3-NEXT: br label [[FOR_INC:%.*]] // CHECK3: for.inc: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[LOADGEP_I]], align 4 -// CHECK3-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK3-NEXT: store i32 [[INC2]], ptr [[LOADGEP_I]], align 4 -// CHECK3-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // CHECK3: omp_loop.body: -// CHECK3-NEXT: call void @__captured_stmt.1(ptr [[LOADGEP_K]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[LOADGEP_K]], align 4 -// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK3-NEXT: [[TMP12:%.*]] = add i32 [[OMP_LOOP_IV]], [[TMP7]] +// CHECK3-NEXT: call void @__captured_stmt.1(ptr [[LOADGEP_K]], i32 [[TMP12]], ptr [[AGG_CAPTURED1]]) +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[LOADGEP_K]], align 4 +// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1 // CHECK3-NEXT: store i32 [[INC]], ptr [[LOADGEP_K]], align 4 // CHECK3-NEXT: br label [[OMP_LOOP_INC]] // CHECK3: omp_loop.inc: @@ -513,20 +511,16 @@ // CHECK3-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8 // CHECK3-NEXT: [[AGG_CAPTURED1:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 4 // CHECK3-NEXT: [[DOTCOUNT_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[P_LASTITER:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[P_LOWERBOUND:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[P_UPPERBOUND:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[P_STRIDE:%.*]] = alloca i32, align 4 // CHECK3-NEXT: br label [[OMP_PAR_REGION:%.*]] // CHECK3: omp.par.region: -// CHECK3-NEXT: store i32 0, ptr [[LOADGEP_I]], align 4 -// CHECK3-NEXT: br label [[FOR_COND:%.*]] // CHECK3: for.cond: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[LOADGEP_I]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10 -// CHECK3-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] // CHECK3: for.end: -// CHECK3-NEXT: br label [[OMP_PAR_REGION_PARALLEL_AFTER:%.*]] // CHECK3: omp.par.region.parallel.after: -// CHECK3-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]] // CHECK3: omp.par.pre_finalize: -// CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]] // CHECK3: for.body: // CHECK3-NEXT: store i32 0, ptr [[K]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], ptr [[AGG_CAPTURED]], i32 0, i32 0 @@ -538,31 +532,15 @@ // CHECK3-NEXT: [[DOTCOUNT:%.*]] = load i32, ptr [[DOTCOUNT_ADDR]], align 4 // CHECK3-NEXT: br label [[OMP_LOOP_PREHEADER:%.*]] // CHECK3: omp_loop.preheader: -// CHECK3-NEXT: br label [[OMP_LOOP_HEADER:%.*]] // CHECK3: omp_loop.header: // CHECK3-NEXT: [[OMP_LOOP_IV:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER]] ], [ [[OMP_LOOP_NEXT:%.*]], [[OMP_LOOP_INC:%.*]] ] // CHECK3-NEXT: br label [[OMP_LOOP_COND:%.*]] // CHECK3: omp_loop.cond: -// CHECK3-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_LOOP_IV]], [[DOTCOUNT]] -// CHECK3-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp_loop.exit: -// CHECK3-NEXT: br label [[OMP_LOOP_AFTER:%.*]] // CHECK3: omp_loop.after: -// CHECK3-NEXT: br label [[FOR_INC:%.*]] // CHECK3: for.inc: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[LOADGEP_I]], align 4 -// CHECK3-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK3-NEXT: store i32 [[INC2]], ptr [[LOADGEP_I]], align 4 -// CHECK3-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK3: omp_loop.body: -// CHECK3-NEXT: call void @__captured_stmt.3(ptr [[K]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[LOADGEP_RES]], align 4 -// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK3-NEXT: store i32 [[INC]], ptr [[LOADGEP_RES]], align 4 -// CHECK3-NEXT: br label [[OMP_LOOP_INC]] // CHECK3: omp_loop.inc: -// CHECK3-NEXT: [[OMP_LOOP_NEXT]] = add nuw i32 [[OMP_LOOP_IV]], 1 -// CHECK3-NEXT: br label [[OMP_LOOP_HEADER]] // CHECK3: omp.par.outlined.exit.exitStub: // CHECK3-NEXT: ret void // @@ -665,22 +643,18 @@ // CHECK4-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8 // CHECK4-NEXT: [[AGG_CAPTURED1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4 // CHECK4-NEXT: [[DOTCOUNT_ADDR:%.*]] = alloca i32, align 4 +// CHECK4-NEXT: [[P_LASTITER:%.*]] = alloca i32, align 4 +// CHECK4-NEXT: [[P_LOWERBOUND:%.*]] = alloca i32, align 4 +// CHECK4-NEXT: [[P_UPPERBOUND:%.*]] = alloca i32, align 4 +// CHECK4-NEXT: [[P_STRIDE:%.*]] = alloca i32, align 4 // CHECK4-NEXT: br label [[OMP_PAR_REGION:%.*]] // CHECK4: omp.par.region: -// CHECK4-NEXT: store i32 0, ptr [[LOADGEP_I]], align 4, !dbg [[DBG23:![0-9]+]] -// CHECK4-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG23]] // CHECK4: for.cond: -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[LOADGEP_I]], align 4, !dbg [[DBG25:![0-9]+]] -// CHECK4-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10, !dbg [[DBG25]] -// CHECK4-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]], !dbg [[DBG23]] // CHECK4: for.end: -// CHECK4-NEXT: br label [[OMP_PAR_REGION_PARALLEL_AFTER:%.*]], !dbg [[DBG27:![0-9]+]] // CHECK4: omp.par.region.parallel.after: -// CHECK4-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]] // CHECK4: omp.par.pre_finalize: -// CHECK4-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG27]] // CHECK4: for.body: -// CHECK4-NEXT: store i32 0, ptr [[LOADGEP_K]], align 4, !dbg [[DBG28:![0-9]+]] +// CHECK4: store i32 0, ptr [[LOADGEP_K]], align 4, !dbg [[DBG28:![0-9]+]] // CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0, !dbg [[DBG28]] // CHECK4-NEXT: store ptr [[LOADGEP_K]], ptr [[TMP3]], align 8, !dbg [[DBG28]] // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], ptr [[AGG_CAPTURED1]], i32 0, i32 0, !dbg [[DBG28]] @@ -690,31 +664,28 @@ // CHECK4-NEXT: [[DOTCOUNT:%.*]] = load i32, ptr [[DOTCOUNT_ADDR]], align 4, !dbg [[DBG28]] // CHECK4-NEXT: br label [[OMP_LOOP_PREHEADER:%.*]], !dbg [[DBG28]] // CHECK4: omp_loop.preheader: +// CHECK4-NEXT: store i32 0, ptr [[P_LOWERBOUND]], align 4, !dbg [[DBG28]] +// CHECK4-NEXT: [[TMP6:%.*]] = sub i32 [[DOTCOUNT]], 1, !dbg [[DBG28]] +// CHECK4-NEXT: store i32 [[TMP6]], ptr [[P_UPPERBOUND]], align 4, !dbg [[DBG28]] +// CHECK4-NEXT: store i32 1, ptr [[P_STRIDE]], align 4, !dbg [[DBG28]] +// CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @3), !dbg [[DBG28]] +// CHECK4-NEXT: call void @__kmpc_for_static_init_4u(ptr @3, i32 [[OMP_GLOBAL_THREAD_NUM2]], i32 34, ptr [[P_LASTITER]], ptr [[P_LOWERBOUND]], ptr [[P_UPPERBOUND]], ptr [[P_STRIDE]], i32 1, i32 0), !dbg [[DBG28]] +// CHECK4-NEXT: [[TMP7:%.*]] = load i32, ptr [[P_LOWERBOUND]], align 4, !dbg [[DBG28]] +// CHECK4-NEXT: [[TMP8:%.*]] = load i32, ptr [[P_UPPERBOUND]], align 4, !dbg [[DBG28]] +// CHECK4-NEXT: [[TMP9:%.*]] = sub i32 [[TMP8]], [[TMP7]], !dbg [[DBG28]] +// CHECK4-NEXT: [[TMP10:%.*]] = add i32 [[TMP9]], 1, !dbg [[DBG28]] // CHECK4-NEXT: br label [[OMP_LOOP_HEADER:%.*]], !dbg [[DBG28]] // CHECK4: omp_loop.header: -// CHECK4-NEXT: [[OMP_LOOP_IV:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER]] ], [ [[OMP_LOOP_NEXT:%.*]], [[OMP_LOOP_INC:%.*]] ], !dbg [[DBG28]] -// CHECK4-NEXT: br label [[OMP_LOOP_COND:%.*]], !dbg [[DBG28]] // CHECK4: omp_loop.cond: -// CHECK4-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_LOOP_IV]], [[DOTCOUNT]], !dbg [[DBG28]] -// CHECK4-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]], !dbg [[DBG28]] // CHECK4: omp_loop.exit: +// CHECK4: call void @__kmpc_for_static_fini(ptr @3, i32 [[OMP_GLOBAL_THREAD_NUM2]]), !dbg [[DBG28]] +// CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @3), !dbg [[DBG33:![0-9]+]] +// CHECK4-NEXT: call void @__kmpc_barrier(ptr @4, i32 [[OMP_GLOBAL_THREAD_NUM3]]), !dbg [[DBG33]] // CHECK4-NEXT: br label [[OMP_LOOP_AFTER:%.*]], !dbg [[DBG28]] // CHECK4: omp_loop.after: -// CHECK4-NEXT: br label [[FOR_INC:%.*]], !dbg [[DBG33:![0-9]+]] // CHECK4: for.inc: -// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[LOADGEP_I]], align 4, !dbg [[DBG25]] -// CHECK4-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP6]], 1, !dbg [[DBG25]] -// CHECK4-NEXT: store i32 [[INC2]], ptr [[LOADGEP_I]], align 4, !dbg [[DBG25]] -// CHECK4-NEXT: br label [[FOR_COND]], !dbg [[DBG25]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK4: omp_loop.body: -// CHECK4-NEXT: call void @__captured_stmt.1(ptr [[LOADGEP_K]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]), !dbg [[DBG28]] -// CHECK4-NEXT: [[TMP7:%.*]] = load i32, ptr [[LOADGEP_K]], align 4, !dbg [[DBG36:![0-9]+]] -// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1, !dbg [[DBG36]] -// CHECK4-NEXT: store i32 [[INC]], ptr [[LOADGEP_K]], align 4, !dbg [[DBG36]] -// CHECK4-NEXT: br label [[OMP_LOOP_INC]], !dbg [[DBG28]] // CHECK4: omp_loop.inc: -// CHECK4-NEXT: [[OMP_LOOP_NEXT]] = add nuw i32 [[OMP_LOOP_IV]], 1, !dbg [[DBG28]] -// CHECK4-NEXT: br label [[OMP_LOOP_HEADER]], !dbg [[DBG28]] // CHECK4: omp.par.outlined.exit.exitStub: // CHECK4-NEXT: ret void // @@ -826,57 +797,29 @@ // CHECK4-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8 // CHECK4-NEXT: [[AGG_CAPTURED1:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 4 // CHECK4-NEXT: [[DOTCOUNT_ADDR:%.*]] = alloca i32, align 4 +// CHECK4-NEXT: [[P_LASTITER:%.*]] = alloca i32, align 4 +// CHECK4-NEXT: [[P_LOWERBOUND:%.*]] = alloca i32, align 4 +// CHECK4-NEXT: [[P_UPPERBOUND:%.*]] = alloca i32, align 4 +// CHECK4-NEXT: [[P_STRIDE:%.*]] = alloca i32, align 4 // CHECK4-NEXT: br label [[OMP_PAR_REGION:%.*]] // CHECK4: omp.par.region: -// CHECK4-NEXT: store i32 0, ptr [[LOADGEP_I]], align 4, !dbg [[DBG86:![0-9]+]] -// CHECK4-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG86]] // CHECK4: for.cond: -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[LOADGEP_I]], align 4, !dbg [[DBG88:![0-9]+]] -// CHECK4-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10, !dbg [[DBG88]] -// CHECK4-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]], !dbg [[DBG86]] // CHECK4: for.end: -// CHECK4-NEXT: br label [[OMP_PAR_REGION_PARALLEL_AFTER:%.*]], !dbg [[DBG90:![0-9]+]] // CHECK4: omp.par.region.parallel.after: -// CHECK4-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]] // CHECK4: omp.par.pre_finalize: -// CHECK4-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG90]] // CHECK4: for.body: -// CHECK4-NEXT: call void @llvm.dbg.declare(metadata ptr [[K]], metadata [[META91:![0-9]+]], metadata !DIExpression()), !dbg [[DBG95:![0-9]+]] -// CHECK4-NEXT: store i32 0, ptr [[K]], align 4, !dbg [[DBG95]] -// CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], ptr [[AGG_CAPTURED]], i32 0, i32 0, !dbg [[DBG95]] -// CHECK4-NEXT: store ptr [[K]], ptr [[TMP3]], align 8, !dbg [[DBG95]] -// CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED1]], i32 0, i32 0, !dbg [[DBG95]] -// CHECK4-NEXT: [[TMP5:%.*]] = load i32, ptr [[K]], align 4, !dbg [[DBG96:![0-9]+]] -// CHECK4-NEXT: store i32 [[TMP5]], ptr [[TMP4]], align 4, !dbg [[DBG95]] +// CHECK4: store i32 [[TMP5]], ptr [[TMP4]], align 4, !dbg [[DBG95:![0-9]+]] // CHECK4-NEXT: call void @__captured_stmt.2(ptr [[DOTCOUNT_ADDR]], ptr [[AGG_CAPTURED]]), !dbg [[DBG95]] -// CHECK4-NEXT: [[DOTCOUNT:%.*]] = load i32, ptr [[DOTCOUNT_ADDR]], align 4, !dbg [[DBG95]] -// CHECK4-NEXT: br label [[OMP_LOOP_PREHEADER:%.*]], !dbg [[DBG95]] // CHECK4: omp_loop.preheader: -// CHECK4-NEXT: br label [[OMP_LOOP_HEADER:%.*]], !dbg [[DBG95]] // CHECK4: omp_loop.header: -// CHECK4-NEXT: [[OMP_LOOP_IV:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER]] ], [ [[OMP_LOOP_NEXT:%.*]], [[OMP_LOOP_INC:%.*]] ], !dbg [[DBG95]] -// CHECK4-NEXT: br label [[OMP_LOOP_COND:%.*]], !dbg [[DBG95]] // CHECK4: omp_loop.cond: -// CHECK4-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_LOOP_IV]], [[DOTCOUNT]], !dbg [[DBG95]] -// CHECK4-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]], !dbg [[DBG95]] // CHECK4: omp_loop.exit: -// CHECK4-NEXT: br label [[OMP_LOOP_AFTER:%.*]], !dbg [[DBG95]] // CHECK4: omp_loop.after: -// CHECK4-NEXT: br label [[FOR_INC:%.*]], !dbg [[DBG97:![0-9]+]] // CHECK4: for.inc: -// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[LOADGEP_I]], align 4, !dbg [[DBG88]] -// CHECK4-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP6]], 1, !dbg [[DBG88]] -// CHECK4-NEXT: store i32 [[INC2]], ptr [[LOADGEP_I]], align 4, !dbg [[DBG88]] -// CHECK4-NEXT: br label [[FOR_COND]], !dbg [[DBG88]], !llvm.loop [[LOOP98:![0-9]+]] // CHECK4: omp_loop.body: -// CHECK4-NEXT: call void @__captured_stmt.3(ptr [[K]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]), !dbg [[DBG95]] -// CHECK4-NEXT: [[TMP7:%.*]] = load i32, ptr [[LOADGEP_RES]], align 4, !dbg [[DBG99:![0-9]+]] -// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1, !dbg [[DBG99]] -// CHECK4-NEXT: store i32 [[INC]], ptr [[LOADGEP_RES]], align 4, !dbg [[DBG99]] -// CHECK4-NEXT: br label [[OMP_LOOP_INC]], !dbg [[DBG95]] +// CHECK4-NEXT: [[TMP12:%.*]] = add i32 [[OMP_LOOP_IV:%.*]], [[TMP7:%.*]], !dbg [[DBG98:![-9]+]] +// CHECK4: call void @__captured_stmt.3(ptr [[K]], i32 [[TMP12]], ptr [[AGG_CAPTURED1]]), !dbg [[DBG96:![0-9]+]] // CHECK4: omp_loop.inc: -// CHECK4-NEXT: [[OMP_LOOP_NEXT]] = add nuw i32 [[OMP_LOOP_IV]], 1, !dbg [[DBG95]] -// CHECK4-NEXT: br label [[OMP_LOOP_HEADER]], !dbg [[DBG95]] // CHECK4: omp.par.outlined.exit.exitStub: // CHECK4-NEXT: ret void // Index: clang/test/OpenMP/loop_bind_messages.cpp =================================================================== --- /dev/null +++ clang/test/OpenMP/loop_bind_messages.cpp @@ -0,0 +1,76 @@ +#ifndef HEADER +#define HEADER +// RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -verify %s + +#define NNN 50 +int aaa[NNN]; + +void parallel_loop() { + #pragma omp parallel + { + #pragma omp loop + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } +} + +void teams_loop() { + int var1, var2; + + #pragma omp teams + { + #pragma omp loop bind(teams) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + + #pragma omp loop bind(teams) collapse(2) private(var1) + for (int i = 0 ; i < 3 ; i++) { + for (int j = 0 ; j < NNN ; j++) { + var1 += aaa[j]; + } + } + } +} + +void orphan_loop_with_bind() { + #pragma omp loop bind(parallel) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } +} + +void orphan_loop_no_bind() { + #pragma omp loop // expected-error{{expected 'bind' clause for loop construct without an enclosing OpenMP construct}} + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } +} + +void teams_loop_reduction() { + int total = 0; + + #pragma omp teams + { + #pragma omp loop bind(teams) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + + #pragma omp loop bind(teams) reduction(+:total) // expected-error{{reduction clause not handled with '#pragma omp loop bind(teams)'}} + for (int j = 0 ; j < NNN ; j++) { + total+=aaa[j]; + } + } +} + +int main(int argc, char *argv[]) { + parallel_loop(); + teams_loop(); + orphan_loop_with_bind(); + orphan_loop_no_bind(); + teams_loop_reduction(); +} + +#endif Index: clang/test/OpenMP/loop_bind_enclosed.cpp =================================================================== --- /dev/null +++ clang/test/OpenMP/loop_bind_enclosed.cpp @@ -0,0 +1,140 @@ +// expected-no-diagnostics +// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s + +#define NNN 50 +int aaa[NNN]; + +void parallel_taskgroup_loop() { + #pragma omp parallel + { + #pragma omp taskgroup + for (int i = 0 ; i < 2 ; i++) { + #pragma omp loop + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } + } +} +// CHECK-LABEL: define {{[^@]+}}@_Z23parallel_taskgroup_loopv +// CHECK-NEXT: entry: +// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 0, ptr @{{[_A-Za-z0-9]*}}.omp_outlined{{[.]*}}) +// CHECK-NEXT: ret void +// CHECK-LABEL: define {{[^@]+}}@{{[_A-Za-z0-9]*}}.omp_outlined +// CHECK-NEXT: entry: +// CHECK: call void @__kmpc_taskgroup(ptr @1, i32 %1) +// CHECK-LABEL: omp.inner.for.end: +// CHECK-LABEL: for.end: +// CHECK-NEXT: call void @__kmpc_end_taskgroup(ptr @1, i32 %1) +// CHECK-NEXT: ret void + +void parallel_taskwait_loop() { + #pragma omp parallel + { + #pragma omp taskwait + for (int i = 0 ; i < 2 ; i++) { + #pragma omp loop + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } + } +} +// CHECK-LABEL: define {{[^@]+}}@_Z22parallel_taskwait_loopv +// CHECK-NEXT: entry: +// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 0, ptr @{{[_A-Za-z0-9]*}}.omp_outlined{{[.]*}}) +// CHECK-NEXT: ret void +// CHECK-LABEL: define {{[^@]+}}@{{[_A-Za-z0-9]*}}.omp_outlined +// CHECK: call i32 @__kmpc_omp_taskwait(ptr @1, i32 %1) +// CHECK-LABEL: omp.loop.exit: +// CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @2, i32 %1) +// CHECK-NEXT: call void @__kmpc_barrier(ptr @3, i32 %1) +// CHECK-LABEL: for.end: +// CHECK-NEXT: ret void + +void parallel_single_loop() { + #pragma omp parallel + { + for (int i = 0 ; i < 2 ; i++) { + #pragma omp single + #pragma omp loop + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } + } +} +// CHECK-LABEL: define {{[^@]+}}@_Z20parallel_single_loopv +// CHECK-NEXT: entry: +// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 0, ptr @{{[_A-Za-z0-9]*}}.omp_outlined{{[.]*}}) +// CHECK-NEXT: ret void +// CHECK-LABEL: define {{[^@]+}}@{{[_A-Za-z0-9]*}}.omp_outlined +// CHECK-NEXT: entry: +// CHECK: [[TMP:%.*]] = call i32 @__kmpc_single(ptr @1, i32 %2) +// CHECK-LABEL: omp.inner.for.end: +// CHECK: call void @__kmpc_end_single(ptr @1, i32 %2) +// CHECK-LABEL: for.end: +// CHECK-NEXT: ret void + +void parallel_order_loop() { + #pragma omp parallel + { + #pragma omp for order(concurrent) + { + for (int i = 0 ; i < 2 ; i++) { + #pragma omp loop + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } + } + } +} +// CHECK-LABEL: define {{[^@]+}}@_Z19parallel_order_loopv +// CHECK-NEXT: entry: +// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 0, ptr @{{[_A-Za-z0-9]*}}.omp_outlined{{[.]*}}) +// CHECK-NEXT: ret void +// CHECK-LABEL: define {{[^@]+}}@{{[_A-Za-z0-9]*}}.omp_outlined +// CHECK-NEXT: entry: +// CHECK: call void @__kmpc_for_static_fini(ptr @2, i32 %1) +// CHECK-NEXT: call void @__kmpc_barrier(ptr @3, i32 %1) +// CHECK-NEXT: ret void + + +void parallel_cancel_loop(bool flag) { + #pragma omp ordered + for (int i = 0 ; i < 2 ; i++) { + #pragma omp parallel + { + #pragma omp cancel parallel if(flag) + aaa[0] = 0; + #pragma omp loop bind(parallel) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } + } +} +// CHECK-LABEL: define {{[^@]+}}@_Z20parallel_cancel_loopb +// CHECK-NEXT: entry: +// CHECK-NEXT: [[FLAG_DOTADDR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK: call void @__kmpc_ordered(ptr @1, i32 %0) +// CHECK-LABEL: for.body: +// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 1, ptr @_Z20parallel_cancel_loopb.omp_outlined, ptr [[FLAG_DOTADDR]]) +// CHECK-LABEL: for.end: +// CHECK-NEXT: call void @__kmpc_end_ordered(ptr @1, i32 %0) +// CHECK-NEXT: ret void + +int +main(int argc, char *argv[]) { + parallel_taskgroup_loop(); + parallel_taskwait_loop(); + parallel_single_loop(); + parallel_order_loop(); + parallel_cancel_loop(true); + parallel_cancel_loop(false); + + return 0; +} Index: clang/test/OpenMP/loop_bind_codegen.cpp =================================================================== --- /dev/null +++ clang/test/OpenMP/loop_bind_codegen.cpp @@ -0,0 +1,141 @@ +// expected-no-diagnostics +// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s + + +#define NNN 50 +int aaa[NNN]; + +void parallel_loop() { + #pragma omp parallel + { + #pragma omp loop bind(parallel) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } +} + +void parallel_loop_orphan() { + #pragma omp loop bind(parallel) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } +} + + +void teams_loop() { + #pragma omp teams + { + #pragma omp loop bind(teams) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } +} + +void thread_loop() { + #pragma omp parallel + { + #pragma omp loop bind(thread) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } +} + +void thread_loop_orphan() { + #pragma omp loop bind(thread) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } +} + +int main() { + parallel_loop(); + parallel_loop_orphan(); + teams_loop(); + thread_loop(); + thread_loop_orphan(); + + return 0; +} +// CHECK-LABEL: define {{[^@]+}}@_Z13parallel_loopv +// CHECK-NEXT: entry: +// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @3, i32 0, ptr @{{[_A-Za-z0-9]*}}.omp_outlined{{[.]*}}) +// CHECK-LABEL: define {{[^@]+}}@{{[_A-Za-z0-9]*}}.omp_outlined +// CHECK-NEXT: entry: +// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK: call void @__kmpc_for_static_init_4(ptr @1, i32 %1, i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK-LABEL: cond.true: +// CHECK-NEXT: br label [[COND_END:%.*]] +// CHECK-LABEL: cond.false: +// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK-NEXT: br label [[COND_END]] +// CHECK: omp.inner.for.cond: +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK: omp.inner.for.body: +// CHECK: omp.loop.exit: +// CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @1, i32 %1) +// CHECK-NEXT: call void @__kmpc_barrier(ptr @2, i32 %1) +// CHECK-NEXT: ret void +// +// CHECK-LABEL: define {{[^@]+}}@_Z20parallel_loop_orphanv +// CHECK-NEXT: entry: +// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK: call void @__kmpc_for_static_init_4(ptr @1, i32 %0, i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK-LABEL: cond.true: +// CHECK: omp.inner.for.cond: +// CHECK: omp.loop.exit: +// +// CHECK-LABEL: define {{[^@]+}}@_Z10teams_loopv +// CHECK-NEXT: entry: +// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @3, i32 0, ptr @{{[_A-Za-z0-9]*}}.omp_outlined{{[.1]*}}) +// CHECK-NEXT: ret void +// +// CHECK-LABEL: define {{[^@]+}}@{{[_A-Za-z0-9]*}}.omp_outlined +// CHECK-NEXT: entry: +// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK: call void @__kmpc_for_static_init_4(ptr @4, i32 %1, i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK-LABEL: cond.true: +// CHECK-NEXT: br label [[COND_END:%.*]] +// CHECK-LABEL: cond.false: +// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK-NEXT: br label [[COND_END]] +// CHECK: omp.inner.for.cond: +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK: omp.inner.for.body: +// CHECK: omp.loop.exit: +// CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @4, i32 %1) +// CHECK-NEXT: ret void +// +// CHECK-LABEL: define {{[^@]+}}@_Z11thread_loopv +// CHECK-NEXT: entry: +// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @3, i32 0, ptr @{{[_A-Za-z0-9]*}}.omp_outlined{{[.2]*}}) +// CHECK-LABEL: define {{[^@]+}}@{{[_A-Za-z0-9]*}}.omp_outlined +// CHECK: omp.inner.for.cond: +// +// CHECK-LABEL: define {{[^@]+}}@_Z18thread_loop_orphanv +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK: omp.inner.for.cond: +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// +// CHECK-LABEL: @main{{.*}} Index: clang/test/OpenMP/generic_loop_ast_print.cpp =================================================================== --- clang/test/OpenMP/generic_loop_ast_print.cpp +++ clang/test/OpenMP/generic_loop_ast_print.cpp @@ -23,7 +23,7 @@ //PRINT: template <typename T, int C> void templ_foo(T t) { //PRINT: T j, z; -//PRINT: #pragma omp loop collapse(C) reduction(+: z) lastprivate(j) bind(thread) +//PRINT: #pragma omp simd collapse(C) reduction(+: z) lastprivate(j) //PRINT: for (T i = 0; i < t; ++i) //PRINT: for (j = 0; j < t; ++j) //PRINT: z += i + j; @@ -31,20 +31,19 @@ //DUMP: FunctionTemplateDecl{{.*}}templ_foo //DUMP: TemplateTypeParmDecl{{.*}}T //DUMP: NonTypeTemplateParmDecl{{.*}}C -//DUMP: OMPGenericLoopDirective +//DUMP: OMPSimdDirective //DUMP: OMPCollapseClause //DUMP: DeclRefExpr{{.*}}'C' 'int' //DUMP: OMPReductionClause //DUMP: DeclRefExpr{{.*}}'z' 'T' //DUMP: OMPLastprivateClause //DUMP: DeclRefExpr{{.*}}'j' 'T' -//DUMP: OMPBindClause //DUMP: ForStmt //DUMP: ForStmt //PRINT: template<> void templ_foo<int, 2>(int t) { //PRINT: int j, z; -//PRINT: #pragma omp loop collapse(2) reduction(+: z) lastprivate(j) bind(thread) +//PRINT: #pragma omp simd collapse(2) reduction(+: z) lastprivate(j) //PRINT: for (int i = 0; i < t; ++i) //PRINT: for (j = 0; j < t; ++j) //PRINT: z += i + j; @@ -53,7 +52,7 @@ //DUMP: TemplateArgument type 'int' //DUMP: TemplateArgument integral 2 //DUMP: ParmVarDecl{{.*}}'int':'int' -//DUMP: OMPGenericLoopDirective +//DUMP: OMPSimdDirective //DUMP: OMPCollapseClause //DUMP: ConstantExpr{{.*}}'int' //DUMP: value: Int 2 @@ -61,7 +60,6 @@ //DUMP: DeclRefExpr{{.*}}'z' 'int':'int' //DUMP: OMPLastprivateClause //DUMP: DeclRefExpr{{.*}}'j' 'int':'int' -//DUMP: OMPBindClause //DUMP: ForStmt template <typename T, int C> void templ_foo(T t) { @@ -82,12 +80,12 @@ int aaa[1000]; //PRINT: #pragma omp target teams distribute parallel for map(tofrom: MTX) - //PRINT: #pragma omp loop + //PRINT: #pragma omp simd //DUMP: OMPTargetTeamsDistributeParallelForDirective //DUMP: CapturedStmt //DUMP: ForStmt //DUMP: CompoundStmt - //DUMP: OMPGenericLoopDirective + //DUMP: OMPSimdDirective #pragma omp target teams distribute parallel for map(MTX) for (auto i = 0; i < N; ++i) { #pragma omp loop @@ -97,11 +95,11 @@ } //PRINT: #pragma omp target teams - //PRINT: #pragma omp loop + //PRINT: #pragma omp distribute //DUMP: OMPTargetTeamsDirective //DUMP: CapturedStmt //DUMP: ForStmt - //DUMP: OMPGenericLoopDirective + //DUMP: OMPDistributeDirective #pragma omp target teams for (int i=0; i<1000; ++i) { #pragma omp loop @@ -111,8 +109,8 @@ } int j, z, z1; - //PRINT: #pragma omp loop collapse(2) private(z) lastprivate(j) order(concurrent) reduction(+: z1) bind(parallel) - //DUMP: OMPGenericLoopDirective + //PRINT: #pragma omp for collapse(2) private(z) lastprivate(j) order(concurrent) reduction(+: z1) + //DUMP: OMPForDirective //DUMP: OMPCollapseClause //DUMP: IntegerLiteral{{.*}}2 //DUMP: OMPPrivateClause @@ -122,7 +120,6 @@ //DUMP: OMPOrderClause //DUMP: OMPReductionClause //DUMP-NEXT: DeclRefExpr{{.*}}'z1' - //DUMP: OMPBindClause //DUMP: ForStmt //DUMP: ForStmt #pragma omp loop collapse(2) private(z) lastprivate(j) order(concurrent) \ @@ -136,10 +133,9 @@ } //PRINT: #pragma omp target teams - //PRINT: #pragma omp loop bind(teams) + //PRINT: #pragma omp distribute //DUMP: OMPTargetTeamsDirective - //DUMP: OMPGenericLoopDirective - //DUMP: OMPBindClause + //DUMP: OMPDistributeDirective //DUMP: ForStmt #pragma omp target teams #pragma omp loop bind(teams) @@ -147,11 +143,10 @@ //PRINT: #pragma omp target //PRINT: #pragma omp teams - //PRINT: #pragma omp loop bind(teams) + //PRINT: #pragma omp distribute //DUMP: OMPTargetDirective //DUMP: OMPTeamsDirective - //DUMP: OMPGenericLoopDirective - //DUMP: OMPBindClause + //DUMP: OMPDistributeDirective //DUMP: ForStmt #pragma omp target #pragma omp teams @@ -159,17 +154,6 @@ for (auto i = 0; i < N; ++i) { } } -//PRINT: void nobindingfunc() { -//DUMP: FunctionDecl {{.*}}nobindingfunc 'void ()' -void nobindingfunc() -{ - //PRINT: #pragma omp loop - //DUMP: OMPGenericLoopDirective - //DUMP: ForStmt - #pragma omp loop - for (int i=0; i<10; ++i) { } -} - void bar() { templ_foo<int,2>(8); Index: clang/lib/Sema/SemaOpenMP.cpp =================================================================== --- clang/lib/Sema/SemaOpenMP.cpp +++ clang/lib/Sema/SemaOpenMP.cpp @@ -337,6 +337,11 @@ /// Vector of declare variant construct traits. SmallVector<llvm::omp::TraitProperty, 8> ConstructTraits; + /// GenericLoopDirective with bind clause is mapped to other directives, + /// like for, distribute and simd. Presently, set MappedDirective to OMPLoop. + /// This may also be used in a similar way for other constructs. + OpenMPDirectiveKind MappedDirective = OMPD_unknown; + public: explicit DSAStackTy(Sema &S) : SemaRef(S) {} @@ -636,6 +641,17 @@ const SharingMapTy *Top = getTopOfStackOrNull(); return Top ? Top->Directive : OMPD_unknown; } + OpenMPDirectiveKind getMappedDirective() const { return MappedDirective; } + void setCurrentDirective(OpenMPDirectiveKind NewDK) { + SharingMapTy *Top = getTopOfStackOrNull(); + assert(Top && + "Before calling setCurrentDirective Top of Stack not to be NULL."); + // Store the old into MappedDirective & assign argument NewDK to Directive. + Top->Directive = NewDK; + } + void setMappedDirective(OpenMPDirectiveKind NewDK) { + MappedDirective = NewDK; + } /// Returns directive kind at specified level. OpenMPDirectiveKind getDirective(unsigned Level) const { assert(!isStackEmpty() && "No directive at specified level."); @@ -6102,6 +6118,76 @@ } } +bool Sema::mapLoopConstruct( + llvm::SmallVector<OMPClause *, 8> *ClausesWithoutBind, + ArrayRef<OMPClause *> Clauses, OpenMPBindClauseKind BindKind, + OpenMPDirectiveKind *Kind) { + + bool UseClausesWithoutBind = false; + + // Restricting to "#pragma omp loop bind" + if (getLangOpts().OpenMP >= 50 && *Kind == OMPD_loop) { + if (BindKind == OMPC_BIND_unknown) { + // Setting the enclosing teams or parallel construct for the loop + // directive without bind clause. + BindKind = OMPC_BIND_thread; // Default bind(thread) if binding is unknown + + const OpenMPDirectiveKind ParentDirective = + DSAStack->getParentDirective(); + if (ParentDirective == OMPD_unknown) { + Diag(DSAStack->getDefaultDSALocation(), + diag::err_omp_bind_required_on_loop); + } else if (ParentDirective == OMPD_parallel || + ParentDirective == OMPD_target_parallel) { + BindKind = OMPC_BIND_parallel; + } else if (ParentDirective == OMPD_teams || + ParentDirective == OMPD_target_teams) { + BindKind = OMPC_BIND_teams; + } + } else { + // bind clause is present, so we should set flag indicating to only + // use the clauses that aren't the bind clause for the new directive that + // loop is lowered to. + UseClausesWithoutBind = true; + } + + for (OMPClause *C : Clauses) { + // Spec restriction : bind(teams) and reduction not permitted. + if (BindKind == OMPC_BIND_teams && + C->getClauseKind() == llvm::omp::Clause::OMPC_reduction) + Diag(DSAStack->getDefaultDSALocation(), + diag::err_omp_loop_reduction_clause); + + // A new Vector ClausesWithoutBind, which does not contain the bind + // clause, for passing to new directive. + if (C->getClauseKind() != llvm::omp::Clause::OMPC_bind) + ClausesWithoutBind->push_back(C); + } + + switch (BindKind) { + case OMPC_BIND_parallel: + *Kind = OMPD_for; + DSAStack->setCurrentDirective(OMPD_for); + DSAStack->setMappedDirective(OMPD_loop); + break; + case OMPC_BIND_teams: + *Kind = OMPD_distribute; + DSAStack->setCurrentDirective(OMPD_distribute); + DSAStack->setMappedDirective(OMPD_loop); + break; + case OMPC_BIND_thread: + *Kind = OMPD_simd; + DSAStack->setCurrentDirective(OMPD_simd); + DSAStack->setMappedDirective(OMPD_loop); + break; + case OMPC_BIND_unknown: + break; + } + } + + return UseClausesWithoutBind; +} + StmtResult Sema::ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, @@ -6122,10 +6208,22 @@ isOpenMPTargetDataManagementDirective(Kind))) Diag(StartLoc, diag::warn_hip_omp_target_directives); + llvm::SmallVector<OMPClause *, 8> ClausesWithoutBind; + bool UseClausesWithoutBind = false; + + UseClausesWithoutBind = + mapLoopConstruct(&ClausesWithoutBind, Clauses, BindKind, &Kind); + + llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit; VarsWithInheritedDSAType VarsWithInheritedDSA; bool ErrorFound = false; - ClausesWithImplicit.append(Clauses.begin(), Clauses.end()); + if (getLangOpts().OpenMP >= 50 && UseClausesWithoutBind) { + ClausesWithImplicit.append(ClausesWithoutBind.begin(), + ClausesWithoutBind.end()); + } else { + ClausesWithImplicit.append(Clauses.begin(), Clauses.end()); + } if (AStmt && !CurContext->isDependentContext() && Kind != OMPD_atomic && Kind != OMPD_critical && Kind != OMPD_section && Kind != OMPD_master && Kind != OMPD_masked && !isOpenMPLoopTransformationDirective(Kind)) { @@ -9202,9 +9300,13 @@ auto *CXXFor = dyn_cast_or_null<CXXForRangeStmt>(S); // Ranged for is supported only in OpenMP 5.0. if (!For && (SemaRef.LangOpts.OpenMP <= 45 || !CXXFor)) { + OpenMPDirectiveKind DK = (SemaRef.getLangOpts().OpenMP < 50 || + DSA.getMappedDirective() == OMPD_unknown) + ? DKind + : DSA.getMappedDirective(); SemaRef.Diag(S->getBeginLoc(), diag::err_omp_not_for) << (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr) - << getOpenMPDirectiveName(DKind) << TotalNestedLoopCount + << getOpenMPDirectiveName(DK) << TotalNestedLoopCount << (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount; if (TotalNestedLoopCount > 1) { if (CollapseLoopCountExpr && OrderedLoopCountExpr) @@ -10288,6 +10390,24 @@ return false; } +static bool checkGenericLoopLastprivate(Sema &S, ArrayRef<OMPClause *> Clauses, + OpenMPDirectiveKind K, + DSAStackTy *Stack); + +bool Sema::checkLastPrivateForMappedDirectives(ArrayRef<OMPClause *> Clauses) { + + // Check for syntax of lastprivate + // Param of the lastprivate have different meanings in the mapped directives + // e.g. "omp loop" Only loop iteration vars are allowed in lastprivate clause + // "omp for" lastprivate vars must be shared + if (getLangOpts().OpenMP >= 50 && + (DSAStack->getMappedDirective() == OMPD_loop && + checkGenericLoopLastprivate(*this, Clauses, OMPD_loop, DSAStack))) { + return false; + } + return true; +} + StmtResult Sema::ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, @@ -10295,6 +10415,9 @@ if (!AStmt) return StmtError(); + if (!checkLastPrivateForMappedDirectives(Clauses)) + return StmtError(); + assert(isa<CapturedStmt>(AStmt) && "Captured statement expected"); OMPLoopBasedDirective::HelperExprs B; // In presence of clause 'collapse' or 'ordered' with number of loops, it will @@ -10323,8 +10446,8 @@ return StmtError(); setFunctionHasBranchProtectedScope(); - return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount, - Clauses, AStmt, B); + return OMPSimdDirective::Create( + Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } StmtResult @@ -10334,6 +10457,9 @@ if (!AStmt) return StmtError(); + if (!checkLastPrivateForMappedDirectives(Clauses)) + return StmtError(); + assert(isa<CapturedStmt>(AStmt) && "Captured statement expected"); OMPLoopBasedDirective::HelperExprs B; // In presence of clause 'collapse' or 'ordered' with number of loops, it will @@ -10370,6 +10496,9 @@ if (!AStmt) return StmtError(); + if (!checkLastPrivateForMappedDirectives(Clauses)) + return StmtError(); + assert(isa<CapturedStmt>(AStmt) && "Captured statement expected"); OMPLoopBasedDirective::HelperExprs B; // In presence of clause 'collapse' or 'ordered' with number of loops, it will @@ -13887,6 +14016,9 @@ if (!AStmt) return StmtError(); + if (!checkLastPrivateForMappedDirectives(Clauses)) + return StmtError(); + assert(isa<CapturedStmt>(AStmt) && "Captured statement expected"); OMPLoopBasedDirective::HelperExprs B; // In presence of clause 'collapse' with number of loops, it will @@ -13902,8 +14034,8 @@ "omp for loop exprs were not built"); setFunctionHasBranchProtectedScope(); - return OMPDistributeDirective::Create(Context, StartLoc, EndLoc, - NestedLoopCount, Clauses, AStmt, B); + return OMPDistributeDirective::Create( + Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } StmtResult Sema::ActOnOpenMPDistributeParallelForDirective( Index: clang/include/clang/Sema/Sema.h =================================================================== --- clang/include/clang/Sema/Sema.h +++ clang/include/clang/Sema/Sema.h @@ -11091,6 +11091,12 @@ /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; + bool checkLastPrivateForMappedDirectives(ArrayRef<OMPClause *> Clauses); + bool mapLoopConstruct(llvm::SmallVector<OMPClause *, 8> *ClausesWithoutBind, + ArrayRef<OMPClause *> Clauses, + OpenMPBindClauseKind BindKind, + OpenMPDirectiveKind *Kind); + public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a Index: clang/include/clang/Basic/DiagnosticSemaKinds.td =================================================================== --- clang/include/clang/Basic/DiagnosticSemaKinds.td +++ clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -9816,6 +9816,11 @@ def warn_loop_ctrl_binds_to_inner : Warning< "'%0' is bound to current loop, GCC binds it to the enclosing loop">, InGroup<GccCompat>; +def err_omp_bind_required_on_loop : Error< + "expected 'bind' clause for loop construct without an enclosing OpenMP " + "construct">; +def err_omp_loop_reduction_clause : Error< + "reduction clause not handled with '#pragma omp loop bind(teams)'">; def warn_break_binds_to_switch : Warning< "'break' is bound to loop, GCC binds it to switch">, InGroup<GccCompat>;
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits