doru1004 created this revision.
doru1004 added reviewers: ronl, jdoerfert, jhuber6, carlo.bertolli, 
JonChesterfield, dhruvachak, gregrodgers, ABataev.
doru1004 added a project: OpenMP.
Herald added subscribers: sunshaoce, guansong, yaxunl.
Herald added a project: All.
doru1004 requested review of this revision.
Herald added subscribers: cfe-commits, jplehr, sstefan1.
Herald added a project: clang.

This patch avoids emitting `__kmpc_alloc_shared` allocation calls for 
implicitly cast variables which are `CK_ArrayToPointerDecay` that are not 
having their address taken explicitly.

Note: if the condition should be refined instead of removed then I am looking 
for suggestions as to how to keep the check for CK_ArrayToPointerDecay but 
restrict its applicability with further conditions. It is not clear to me what 
those conditions could be hence the complete removal of the condition. So far 
none of the existing lit tests needed to be changed as a consuquence of this 
change and no  LLVM/OpenMP tests have failed.

OpenMP-Opt is usually able to transform the `__kmpc_alloc_shared` calls emitted 
this way to allocas except in this case the size of the allocated local array 
(256) is preventing that from happening (limit is 128).


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D148805

Files:
  clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
  clang/test/OpenMP/target_alloc_shared_emission.cpp

Index: clang/test/OpenMP/target_alloc_shared_emission.cpp
===================================================================
--- /dev/null
+++ clang/test/OpenMP/target_alloc_shared_emission.cpp
@@ -0,0 +1,827 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
+// REQUIRES: amdgpu-registered-target
+
+
+// Test target codegen - host bc file has to be created first.
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host-amd.bc
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-target-debug -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host-amd.bc -o - | FileCheck %s --check-prefix=CHECK-AMD
+
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host-nvidia.bc
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-unknown-unknown -emit-llvm %s -fopenmp-target-debug -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host-nvidia.bc -o - | FileCheck %s --check-prefix=CHECK-NVIDIA
+
+// expected-no-diagnostics
+
+#ifndef HEADER
+#define HEADER
+
+void foo(int *stack);
+
+void emits_alloc_shared(const int *localPadding , int *res)
+{
+    int stack[64];
+    int stackptr = 0;
+    stack[stackptr++] = -1;
+    *res = 0;
+
+    do
+    {
+      if(localPadding[0] > 0)
+        stack[stackptr++] = 0;
+      *res = stack[--stackptr];
+      foo(&stack[2]);
+    } while (*res > 0);
+}
+
+void does_not_emit_alloc_shared(const int *localPadding , int *res)
+{
+    int stack[64];
+    int stackptr = 0;
+    stack[stackptr++] = -1;
+    *res = 0;
+
+    do
+    {
+      if(localPadding[0] > 0)
+        stack[stackptr++] = 0;
+      *res = stack[--stackptr];
+    } while (*res > 0);
+}
+
+#define N 1000
+
+int main() {
+    const int maz = 1;
+    const int may = 2;
+    const int max = 3;
+    int res;
+    int localPadding[N];
+#pragma omp target teams distribute parallel for map(tofrom: localPadding[:N],maz, may, max)
+
+    for (int pi = 0; pi < N; pi++)
+    {
+        for (int hz = 0; hz <= maz; hz++)
+            for (int hy = 0; hy <= may; hy++)
+                for (int hx = 0; hx <= max; hx++) {
+                    emits_alloc_shared(localPadding, &res);
+                    does_not_emit_alloc_shared(localPadding, &res);
+                }
+        localPadding[pi] = res;
+    }
+    return 0;
+}
+
+#endif
+// CHECK-AMD-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l58
+// CHECK-AMD-SAME: (ptr noundef nonnull align 4 dereferenceable(4000) [[LOCALPADDING:%.*]], i64 noundef [[RES:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-AMD-NEXT:  entry:
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[RES_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[RES_CASTED:%.*]] = alloca i64, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCALPADDING_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[RES_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[RES_CASTED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES_CASTED]] to ptr
+// CHECK-AMD-NEXT:    [[DOTZERO_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTZERO_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[DOTTHREADID_TEMP__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTTHREADID_TEMP_]] to ptr
+// CHECK-AMD-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i64 [[RES]], ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr), i8 2, i1 false)
+// CHECK-AMD-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK-AMD-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK-AMD:       user_code.entry:
+// CHECK-AMD-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// CHECK-AMD-NEXT:    [[TMP3:%.*]] = load i32, ptr [[RES_ADDR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 [[TMP3]], ptr [[RES_CASTED_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP4:%.*]] = load i64, ptr [[RES_CASTED_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i32 0, ptr [[DOTZERO_ADDR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP__ASCAST]], align 4
+// CHECK-AMD-NEXT:    call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP__ASCAST]], ptr [[DOTZERO_ADDR_ASCAST]], ptr [[TMP0]], i64 [[TMP4]]) #[[ATTR3:[0-9]+]]
+// CHECK-AMD-NEXT:    call void @__kmpc_target_deinit(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i8 2)
+// CHECK-AMD-NEXT:    ret void
+// CHECK-AMD:       worker.exit:
+// CHECK-AMD-NEXT:    ret void
+//
+//
+// CHECK-AMD-LABEL: define {{[^@]+}}@__omp_outlined__
+// CHECK-AMD-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[LOCALPADDING:%.*]], i64 noundef [[RES:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK-AMD-NEXT:  entry:
+// CHECK-AMD-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[RES_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[PI:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[RES_CASTED:%.*]] = alloca i64, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCALPADDING_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[RES_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
+// CHECK-AMD-NEXT:    [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_COMB_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_COMB_LB]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_COMB_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_COMB_UB]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
+// CHECK-AMD-NEXT:    [[PI_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PI]] to ptr
+// CHECK-AMD-NEXT:    [[RES_CASTED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES_CASTED]] to ptr
+// CHECK-AMD-NEXT:    [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// CHECK-AMD-NEXT:    store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i64 [[RES]], ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i32 0, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 999, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
+// CHECK-AMD-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK-AMD-NEXT:    call void @__kmpc_distribute_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB2:[0-9]+]] to ptr), i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_COMB_LB_ASCAST]], ptr [[DOTOMP_COMB_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 [[NVPTX_NUM_THREADS]])
+// CHECK-AMD-NEXT:    [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 999
+// CHECK-AMD-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK-AMD:       cond.true:
+// CHECK-AMD-NEXT:    br label [[COND_END:%.*]]
+// CHECK-AMD:       cond.false:
+// CHECK-AMD-NEXT:    [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[COND_END]]
+// CHECK-AMD:       cond.end:
+// CHECK-AMD-NEXT:    [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK-AMD-NEXT:    store i32 [[COND]], ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 [[TMP5]], ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK-AMD:       omp.inner.for.cond:
+// CHECK-AMD-NEXT:    [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 1000
+// CHECK-AMD-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK-AMD:       omp.inner.for.body:
+// CHECK-AMD-NEXT:    [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK-AMD-NEXT:    [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK-AMD-NEXT:    [[TMP11:%.*]] = load i32, ptr [[RES_ADDR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 [[TMP11]], ptr [[RES_CASTED_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP12:%.*]] = load i64, ptr [[RES_CASTED_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// CHECK-AMD-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to ptr
+// CHECK-AMD-NEXT:    store ptr [[TMP14]], ptr [[TMP13]], align 8
+// CHECK-AMD-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// CHECK-AMD-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to ptr
+// CHECK-AMD-NEXT:    store ptr [[TMP16]], ptr [[TMP15]], align 8
+// CHECK-AMD-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
+// CHECK-AMD-NEXT:    store ptr [[TMP0]], ptr [[TMP17]], align 8
+// CHECK-AMD-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 3
+// CHECK-AMD-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP12]] to ptr
+// CHECK-AMD-NEXT:    store ptr [[TMP19]], ptr [[TMP18]], align 8
+// CHECK-AMD-NEXT:    call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__.1, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 4)
+// CHECK-AMD-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK-AMD:       omp.inner.for.inc:
+// CHECK-AMD-NEXT:    [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
+// CHECK-AMD-NEXT:    store i32 [[ADD]], ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
+// CHECK-AMD-NEXT:    store i32 [[ADD2]], ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP24:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP25:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
+// CHECK-AMD-NEXT:    store i32 [[ADD3]], ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP26:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP26]], 999
+// CHECK-AMD-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
+// CHECK-AMD:       cond.true5:
+// CHECK-AMD-NEXT:    br label [[COND_END7:%.*]]
+// CHECK-AMD:       cond.false6:
+// CHECK-AMD-NEXT:    [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[COND_END7]]
+// CHECK-AMD:       cond.end7:
+// CHECK-AMD-NEXT:    [[COND8:%.*]] = phi i32 [ 999, [[COND_TRUE5]] ], [ [[TMP27]], [[COND_FALSE6]] ]
+// CHECK-AMD-NEXT:    store i32 [[COND8]], ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP28:%.*]] = load i32, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 [[TMP28]], ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[OMP_INNER_FOR_COND]]
+// CHECK-AMD:       omp.inner.for.end:
+// CHECK-AMD-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK-AMD:       omp.loop.exit:
+// CHECK-AMD-NEXT:    call void @__kmpc_distribute_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP2]])
+// CHECK-AMD-NEXT:    ret void
+//
+//
+// CHECK-AMD-LABEL: define {{[^@]+}}@__omp_outlined__.1
+// CHECK-AMD-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[LOCALPADDING:%.*]], i64 noundef [[RES:%.*]]) #[[ATTR1]] {
+// CHECK-AMD-NEXT:  entry:
+// CHECK-AMD-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[RES_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[PI:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[HZ:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[HY:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[HX:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[DOTPREVIOUS_LB__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTPREVIOUS_LB__ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[DOTPREVIOUS_UB__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTPREVIOUS_UB__ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCALPADDING_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[RES_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
+// CHECK-AMD-NEXT:    [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_LB]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_UB]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
+// CHECK-AMD-NEXT:    [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
+// CHECK-AMD-NEXT:    [[PI_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PI]] to ptr
+// CHECK-AMD-NEXT:    [[HZ_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[HZ]] to ptr
+// CHECK-AMD-NEXT:    [[HY_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[HY]] to ptr
+// CHECK-AMD-NEXT:    [[HX_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[HX]] to ptr
+// CHECK-AMD-NEXT:    store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i64 [[RES]], ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i32 0, ptr [[DOTOMP_LB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 999, ptr [[DOTOMP_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK-AMD-NEXT:    [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
+// CHECK-AMD-NEXT:    store i32 [[CONV]], ptr [[DOTOMP_LB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 [[CONV1]], ptr [[DOTOMP_UB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
+// CHECK-AMD-NEXT:    call void @__kmpc_for_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB3:[0-9]+]] to ptr), i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_LB_ASCAST]], ptr [[DOTOMP_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 1)
+// CHECK-AMD-NEXT:    [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 [[TMP5]], ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK-AMD:       omp.inner.for.cond:
+// CHECK-AMD-NEXT:    [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
+// CHECK-AMD-NEXT:    [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
+// CHECK-AMD-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK-AMD:       omp.inner.for.body:
+// CHECK-AMD-NEXT:    [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
+// CHECK-AMD-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK-AMD-NEXT:    store i32 [[ADD]], ptr [[PI_ASCAST]], align 4
+// CHECK-AMD-NEXT:    store i32 0, ptr [[HZ_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[FOR_COND:%.*]]
+// CHECK-AMD:       for.cond:
+// CHECK-AMD-NEXT:    [[TMP9:%.*]] = load i32, ptr [[HZ_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], 1
+// CHECK-AMD-NEXT:    br i1 [[CMP3]], label [[FOR_BODY:%.*]], label [[FOR_END16:%.*]]
+// CHECK-AMD:       for.body:
+// CHECK-AMD-NEXT:    store i32 0, ptr [[HY_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[FOR_COND4:%.*]]
+// CHECK-AMD:       for.cond4:
+// CHECK-AMD-NEXT:    [[TMP10:%.*]] = load i32, ptr [[HY_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP10]], 2
+// CHECK-AMD-NEXT:    br i1 [[CMP5]], label [[FOR_BODY6:%.*]], label [[FOR_END13:%.*]]
+// CHECK-AMD:       for.body6:
+// CHECK-AMD-NEXT:    store i32 0, ptr [[HX_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[FOR_COND7:%.*]]
+// CHECK-AMD:       for.cond7:
+// CHECK-AMD-NEXT:    [[TMP11:%.*]] = load i32, ptr [[HX_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP11]], 3
+// CHECK-AMD-NEXT:    br i1 [[CMP8]], label [[FOR_BODY9:%.*]], label [[FOR_END:%.*]]
+// CHECK-AMD:       for.body9:
+// CHECK-AMD-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 0
+// CHECK-AMD-NEXT:    call void @_Z18emits_alloc_sharedPKiPi(ptr noundef [[ARRAYDECAY]], ptr noundef [[RES_ADDR_ASCAST]]) #[[ATTR8:[0-9]+]]
+// CHECK-AMD-NEXT:    [[ARRAYDECAY10:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 0
+// CHECK-AMD-NEXT:    call void @_Z26does_not_emit_alloc_sharedPKiPi(ptr noundef [[ARRAYDECAY10]], ptr noundef [[RES_ADDR_ASCAST]]) #[[ATTR8]]
+// CHECK-AMD-NEXT:    br label [[FOR_INC:%.*]]
+// CHECK-AMD:       for.inc:
+// CHECK-AMD-NEXT:    [[TMP12:%.*]] = load i32, ptr [[HX_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP12]], 1
+// CHECK-AMD-NEXT:    store i32 [[INC]], ptr [[HX_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]]
+// CHECK-AMD:       for.end:
+// CHECK-AMD-NEXT:    br label [[FOR_INC11:%.*]]
+// CHECK-AMD:       for.inc11:
+// CHECK-AMD-NEXT:    [[TMP13:%.*]] = load i32, ptr [[HY_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[INC12:%.*]] = add nsw i32 [[TMP13]], 1
+// CHECK-AMD-NEXT:    store i32 [[INC12]], ptr [[HY_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[FOR_COND4]], !llvm.loop [[LOOP9:![0-9]+]]
+// CHECK-AMD:       for.end13:
+// CHECK-AMD-NEXT:    br label [[FOR_INC14:%.*]]
+// CHECK-AMD:       for.inc14:
+// CHECK-AMD-NEXT:    [[TMP14:%.*]] = load i32, ptr [[HZ_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[INC15:%.*]] = add nsw i32 [[TMP14]], 1
+// CHECK-AMD-NEXT:    store i32 [[INC15]], ptr [[HZ_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
+// CHECK-AMD:       for.end16:
+// CHECK-AMD-NEXT:    [[TMP15:%.*]] = load i32, ptr [[RES_ADDR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP16:%.*]] = load i32, ptr [[PI_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
+// CHECK-AMD-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// CHECK-AMD-NEXT:    store i32 [[TMP15]], ptr [[ARRAYIDX]], align 4
+// CHECK-AMD-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK-AMD:       omp.body.continue:
+// CHECK-AMD-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK-AMD:       omp.inner.for.inc:
+// CHECK-AMD-NEXT:    [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
+// CHECK-AMD-NEXT:    store i32 [[ADD17]], ptr [[DOTOMP_IV_ASCAST]], align 4
+// CHECK-AMD-NEXT:    br label [[OMP_INNER_FOR_COND]]
+// CHECK-AMD:       omp.inner.for.end:
+// CHECK-AMD-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK-AMD:       omp.loop.exit:
+// CHECK-AMD-NEXT:    call void @__kmpc_distribute_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP4]])
+// CHECK-AMD-NEXT:    ret void
+//
+//
+// CHECK-AMD-LABEL: define {{[^@]+}}@_Z18emits_alloc_sharedPKiPi
+// CHECK-AMD-SAME: (ptr noundef [[LOCALPADDING:%.*]], ptr noundef [[RES:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK-AMD-NEXT:  entry:
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[RES_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[STACKPTR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCALPADDING_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[RES_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[STACKPTR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STACKPTR]] to ptr
+// CHECK-AMD-NEXT:    [[STACK:%.*]] = call align 8 ptr @__kmpc_alloc_shared(i64 256)
+// CHECK-AMD-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store ptr [[RES]], ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i32 0, ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP0:%.*]] = load i32, ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
+// CHECK-AMD-NEXT:    store i32 [[INC]], ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP0]] to i64
+// CHECK-AMD-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 [[IDXPROM]]
+// CHECK-AMD-NEXT:    store i32 -1, ptr [[ARRAYIDX]], align 4
+// CHECK-AMD-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i32 0, ptr [[TMP1]], align 4
+// CHECK-AMD-NEXT:    br label [[DO_BODY:%.*]]
+// CHECK-AMD:       do.body:
+// CHECK-AMD-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 0
+// CHECK-AMD-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
+// CHECK-AMD-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 0
+// CHECK-AMD-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+// CHECK-AMD:       if.then:
+// CHECK-AMD-NEXT:    [[TMP4:%.*]] = load i32, ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[INC2:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK-AMD-NEXT:    store i32 [[INC2]], ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[TMP4]] to i64
+// CHECK-AMD-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 [[IDXPROM3]]
+// CHECK-AMD-NEXT:    store i32 0, ptr [[ARRAYIDX4]], align 4
+// CHECK-AMD-NEXT:    br label [[IF_END]]
+// CHECK-AMD:       if.end:
+// CHECK-AMD-NEXT:    [[TMP5:%.*]] = load i32, ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[DEC:%.*]] = add nsw i32 [[TMP5]], -1
+// CHECK-AMD-NEXT:    store i32 [[DEC]], ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[DEC]] to i64
+// CHECK-AMD-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 [[IDXPROM5]]
+// CHECK-AMD-NEXT:    [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
+// CHECK-AMD-NEXT:    [[TMP7:%.*]] = load ptr, ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i32 [[TMP6]], ptr [[TMP7]], align 4
+// CHECK-AMD-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 2
+// CHECK-AMD-NEXT:    call void @_Z3fooPi(ptr noundef [[ARRAYIDX7]]) #[[ATTR8]]
+// CHECK-AMD-NEXT:    br label [[DO_COND:%.*]]
+// CHECK-AMD:       do.cond:
+// CHECK-AMD-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+// CHECK-AMD-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP9]], 0
+// CHECK-AMD-NEXT:    br i1 [[CMP8]], label [[DO_BODY]], label [[DO_END:%.*]], !llvm.loop [[LOOP11:![0-9]+]]
+// CHECK-AMD:       do.end:
+// CHECK-AMD-NEXT:    call void @__kmpc_free_shared(ptr [[STACK]], i64 256)
+// CHECK-AMD-NEXT:    ret void
+//
+//
+// CHECK-AMD-LABEL: define {{[^@]+}}@_Z26does_not_emit_alloc_sharedPKiPi
+// CHECK-AMD-SAME: (ptr noundef [[LOCALPADDING:%.*]], ptr noundef [[RES:%.*]]) #[[ATTR2]] {
+// CHECK-AMD-NEXT:  entry:
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[RES_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-AMD-NEXT:    [[STACK:%.*]] = alloca [64 x i32], align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[STACKPTR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-AMD-NEXT:    [[LOCALPADDING_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCALPADDING_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[RES_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES_ADDR]] to ptr
+// CHECK-AMD-NEXT:    [[STACK_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STACK]] to ptr
+// CHECK-AMD-NEXT:    [[STACKPTR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STACKPTR]] to ptr
+// CHECK-AMD-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store ptr [[RES]], ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i32 0, ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[TMP0:%.*]] = load i32, ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
+// CHECK-AMD-NEXT:    store i32 [[INC]], ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP0]] to i64
+// CHECK-AMD-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK_ASCAST]], i64 0, i64 [[IDXPROM]]
+// CHECK-AMD-NEXT:    store i32 -1, ptr [[ARRAYIDX]], align 4
+// CHECK-AMD-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i32 0, ptr [[TMP1]], align 4
+// CHECK-AMD-NEXT:    br label [[DO_BODY:%.*]]
+// CHECK-AMD:       do.body:
+// CHECK-AMD-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 0
+// CHECK-AMD-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
+// CHECK-AMD-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 0
+// CHECK-AMD-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+// CHECK-AMD:       if.then:
+// CHECK-AMD-NEXT:    [[TMP4:%.*]] = load i32, ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[INC2:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK-AMD-NEXT:    store i32 [[INC2]], ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[TMP4]] to i64
+// CHECK-AMD-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK_ASCAST]], i64 0, i64 [[IDXPROM3]]
+// CHECK-AMD-NEXT:    store i32 0, ptr [[ARRAYIDX4]], align 4
+// CHECK-AMD-NEXT:    br label [[IF_END]]
+// CHECK-AMD:       if.end:
+// CHECK-AMD-NEXT:    [[TMP5:%.*]] = load i32, ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[DEC:%.*]] = add nsw i32 [[TMP5]], -1
+// CHECK-AMD-NEXT:    store i32 [[DEC]], ptr [[STACKPTR_ASCAST]], align 4
+// CHECK-AMD-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[DEC]] to i64
+// CHECK-AMD-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK_ASCAST]], i64 0, i64 [[IDXPROM5]]
+// CHECK-AMD-NEXT:    [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
+// CHECK-AMD-NEXT:    [[TMP7:%.*]] = load ptr, ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    store i32 [[TMP6]], ptr [[TMP7]], align 4
+// CHECK-AMD-NEXT:    br label [[DO_COND:%.*]]
+// CHECK-AMD:       do.cond:
+// CHECK-AMD-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[RES_ADDR_ASCAST]], align 8
+// CHECK-AMD-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+// CHECK-AMD-NEXT:    [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], 0
+// CHECK-AMD-NEXT:    br i1 [[CMP7]], label [[DO_BODY]], label [[DO_END:%.*]], !llvm.loop [[LOOP12:![0-9]+]]
+// CHECK-AMD:       do.end:
+// CHECK-AMD-NEXT:    ret void
+//
+//
+// CHECK-NVIDIA-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l58
+// CHECK-NVIDIA-SAME: (ptr noundef nonnull align 4 dereferenceable(4000) [[LOCALPADDING:%.*]], i64 noundef [[RES:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NVIDIA-NEXT:  entry:
+// CHECK-NVIDIA-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[RES_ADDR:%.*]] = alloca i64, align 8
+// CHECK-NVIDIA-NEXT:    [[RES_CASTED:%.*]] = alloca i64, align 8
+// CHECK-NVIDIA-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i64 [[RES]], ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @[[GLOB1:[0-9]+]], i8 2, i1 false)
+// CHECK-NVIDIA-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK-NVIDIA-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK-NVIDIA:       user_code.entry:
+// CHECK-NVIDIA-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK-NVIDIA-NEXT:    [[TMP3:%.*]] = load i32, ptr [[RES_ADDR]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 [[TMP3]], ptr [[RES_CASTED]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP4:%.*]] = load i64, ptr [[RES_CASTED]], align 8
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[DOTZERO_ADDR]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
+// CHECK-NVIDIA-NEXT:    call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i64 [[TMP4]]) #[[ATTR3:[0-9]+]]
+// CHECK-NVIDIA-NEXT:    call void @__kmpc_target_deinit(ptr @[[GLOB1]], i8 2)
+// CHECK-NVIDIA-NEXT:    ret void
+// CHECK-NVIDIA:       worker.exit:
+// CHECK-NVIDIA-NEXT:    ret void
+//
+//
+// CHECK-NVIDIA-LABEL: define {{[^@]+}}@__omp_outlined__
+// CHECK-NVIDIA-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[LOCALPADDING:%.*]], i64 noundef [[RES:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK-NVIDIA-NEXT:  entry:
+// CHECK-NVIDIA-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[RES_ADDR:%.*]] = alloca i64, align 8
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[TMP:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[PI:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[RES_CASTED:%.*]] = alloca i64, align 8
+// CHECK-NVIDIA-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 8
+// CHECK-NVIDIA-NEXT:    store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i64 [[RES]], ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 999, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK-NVIDIA-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
+// CHECK-NVIDIA-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK-NVIDIA-NEXT:    call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
+// CHECK-NVIDIA-NEXT:    [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 999
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK-NVIDIA:       cond.true:
+// CHECK-NVIDIA-NEXT:    br label [[COND_END:%.*]]
+// CHECK-NVIDIA:       cond.false:
+// CHECK-NVIDIA-NEXT:    [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[COND_END]]
+// CHECK-NVIDIA:       cond.end:
+// CHECK-NVIDIA-NEXT:    [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK-NVIDIA-NEXT:    store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK-NVIDIA:       omp.inner.for.cond:
+// CHECK-NVIDIA-NEXT:    [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 1000
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK-NVIDIA:       omp.inner.for.body:
+// CHECK-NVIDIA-NEXT:    [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK-NVIDIA-NEXT:    [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK-NVIDIA-NEXT:    [[TMP11:%.*]] = load i32, ptr [[RES_ADDR]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 [[TMP11]], ptr [[RES_CASTED]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP12:%.*]] = load i64, ptr [[RES_CASTED]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// CHECK-NVIDIA-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to ptr
+// CHECK-NVIDIA-NEXT:    store ptr [[TMP14]], ptr [[TMP13]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
+// CHECK-NVIDIA-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to ptr
+// CHECK-NVIDIA-NEXT:    store ptr [[TMP16]], ptr [[TMP15]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
+// CHECK-NVIDIA-NEXT:    store ptr [[TMP0]], ptr [[TMP17]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
+// CHECK-NVIDIA-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP12]] to ptr
+// CHECK-NVIDIA-NEXT:    store ptr [[TMP19]], ptr [[TMP18]], align 8
+// CHECK-NVIDIA-NEXT:    call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 4)
+// CHECK-NVIDIA-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK-NVIDIA:       omp.inner.for.inc:
+// CHECK-NVIDIA-NEXT:    [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK-NVIDIA-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
+// CHECK-NVIDIA-NEXT:    store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK-NVIDIA-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
+// CHECK-NVIDIA-NEXT:    store i32 [[ADD2]], ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP24:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP25:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK-NVIDIA-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
+// CHECK-NVIDIA-NEXT:    store i32 [[ADD3]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP26:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP26]], 999
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
+// CHECK-NVIDIA:       cond.true5:
+// CHECK-NVIDIA-NEXT:    br label [[COND_END7:%.*]]
+// CHECK-NVIDIA:       cond.false6:
+// CHECK-NVIDIA-NEXT:    [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[COND_END7]]
+// CHECK-NVIDIA:       cond.end7:
+// CHECK-NVIDIA-NEXT:    [[COND8:%.*]] = phi i32 [ 999, [[COND_TRUE5]] ], [ [[TMP27]], [[COND_FALSE6]] ]
+// CHECK-NVIDIA-NEXT:    store i32 [[COND8]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP28:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 [[TMP28]], ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[OMP_INNER_FOR_COND]]
+// CHECK-NVIDIA:       omp.inner.for.end:
+// CHECK-NVIDIA-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK-NVIDIA:       omp.loop.exit:
+// CHECK-NVIDIA-NEXT:    call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
+// CHECK-NVIDIA-NEXT:    ret void
+//
+//
+// CHECK-NVIDIA-LABEL: define {{[^@]+}}@__omp_outlined__1
+// CHECK-NVIDIA-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[LOCALPADDING:%.*]], i64 noundef [[RES:%.*]]) #[[ATTR1]] {
+// CHECK-NVIDIA-NEXT:  entry:
+// CHECK-NVIDIA-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK-NVIDIA-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK-NVIDIA-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[RES_ADDR:%.*]] = alloca i64, align 8
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[TMP:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[PI:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[HZ:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[HY:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[HX:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i64 [[RES]], ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 999, ptr [[DOTOMP_UB]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK-NVIDIA-NEXT:    [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
+// CHECK-NVIDIA-NEXT:    store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
+// CHECK-NVIDIA-NEXT:    call void @__kmpc_for_static_init_4(ptr @[[GLOB3:[0-9]+]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK-NVIDIA-NEXT:    [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK-NVIDIA:       omp.inner.for.cond:
+// CHECK-NVIDIA-NEXT:    [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
+// CHECK-NVIDIA-NEXT:    [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK-NVIDIA:       omp.inner.for.body:
+// CHECK-NVIDIA-NEXT:    [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
+// CHECK-NVIDIA-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK-NVIDIA-NEXT:    store i32 [[ADD]], ptr [[PI]], align 4
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[HZ]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[FOR_COND:%.*]]
+// CHECK-NVIDIA:       for.cond:
+// CHECK-NVIDIA-NEXT:    [[TMP9:%.*]] = load i32, ptr [[HZ]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], 1
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP3]], label [[FOR_BODY:%.*]], label [[FOR_END16:%.*]]
+// CHECK-NVIDIA:       for.body:
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[HY]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[FOR_COND4:%.*]]
+// CHECK-NVIDIA:       for.cond4:
+// CHECK-NVIDIA-NEXT:    [[TMP10:%.*]] = load i32, ptr [[HY]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP10]], 2
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP5]], label [[FOR_BODY6:%.*]], label [[FOR_END13:%.*]]
+// CHECK-NVIDIA:       for.body6:
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[HX]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[FOR_COND7:%.*]]
+// CHECK-NVIDIA:       for.cond7:
+// CHECK-NVIDIA-NEXT:    [[TMP11:%.*]] = load i32, ptr [[HX]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP11]], 3
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP8]], label [[FOR_BODY9:%.*]], label [[FOR_END:%.*]]
+// CHECK-NVIDIA:       for.body9:
+// CHECK-NVIDIA-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 0
+// CHECK-NVIDIA-NEXT:    call void @_Z18emits_alloc_sharedPKiPi(ptr noundef [[ARRAYDECAY]], ptr noundef [[RES_ADDR]]) #[[ATTR8:[0-9]+]]
+// CHECK-NVIDIA-NEXT:    [[ARRAYDECAY10:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 0
+// CHECK-NVIDIA-NEXT:    call void @_Z26does_not_emit_alloc_sharedPKiPi(ptr noundef [[ARRAYDECAY10]], ptr noundef [[RES_ADDR]]) #[[ATTR8]]
+// CHECK-NVIDIA-NEXT:    br label [[FOR_INC:%.*]]
+// CHECK-NVIDIA:       for.inc:
+// CHECK-NVIDIA-NEXT:    [[TMP12:%.*]] = load i32, ptr [[HX]], align 4
+// CHECK-NVIDIA-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP12]], 1
+// CHECK-NVIDIA-NEXT:    store i32 [[INC]], ptr [[HX]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[FOR_COND7]], !llvm.loop [[LOOP6:![0-9]+]]
+// CHECK-NVIDIA:       for.end:
+// CHECK-NVIDIA-NEXT:    br label [[FOR_INC11:%.*]]
+// CHECK-NVIDIA:       for.inc11:
+// CHECK-NVIDIA-NEXT:    [[TMP13:%.*]] = load i32, ptr [[HY]], align 4
+// CHECK-NVIDIA-NEXT:    [[INC12:%.*]] = add nsw i32 [[TMP13]], 1
+// CHECK-NVIDIA-NEXT:    store i32 [[INC12]], ptr [[HY]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[FOR_COND4]], !llvm.loop [[LOOP8:![0-9]+]]
+// CHECK-NVIDIA:       for.end13:
+// CHECK-NVIDIA-NEXT:    br label [[FOR_INC14:%.*]]
+// CHECK-NVIDIA:       for.inc14:
+// CHECK-NVIDIA-NEXT:    [[TMP14:%.*]] = load i32, ptr [[HZ]], align 4
+// CHECK-NVIDIA-NEXT:    [[INC15:%.*]] = add nsw i32 [[TMP14]], 1
+// CHECK-NVIDIA-NEXT:    store i32 [[INC15]], ptr [[HZ]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
+// CHECK-NVIDIA:       for.end16:
+// CHECK-NVIDIA-NEXT:    [[TMP15:%.*]] = load i32, ptr [[RES_ADDR]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP16:%.*]] = load i32, ptr [[PI]], align 4
+// CHECK-NVIDIA-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// CHECK-NVIDIA-NEXT:    store i32 [[TMP15]], ptr [[ARRAYIDX]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK-NVIDIA:       omp.body.continue:
+// CHECK-NVIDIA-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK-NVIDIA:       omp.inner.for.inc:
+// CHECK-NVIDIA-NEXT:    [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK-NVIDIA-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
+// CHECK-NVIDIA-NEXT:    store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[OMP_INNER_FOR_COND]]
+// CHECK-NVIDIA:       omp.inner.for.end:
+// CHECK-NVIDIA-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK-NVIDIA:       omp.loop.exit:
+// CHECK-NVIDIA-NEXT:    call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
+// CHECK-NVIDIA-NEXT:    ret void
+//
+//
+// CHECK-NVIDIA-LABEL: define {{[^@]+}}@_Z18emits_alloc_sharedPKiPi
+// CHECK-NVIDIA-SAME: (ptr noundef [[LOCALPADDING:%.*]], ptr noundef [[RES:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK-NVIDIA-NEXT:  entry:
+// CHECK-NVIDIA-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[RES_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[STACKPTR:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    [[STACK:%.*]] = call align 8 ptr @__kmpc_alloc_shared(i64 256)
+// CHECK-NVIDIA-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store ptr [[RES]], ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP0:%.*]] = load i32, ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
+// CHECK-NVIDIA-NEXT:    store i32 [[INC]], ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP0]] to i64
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 [[IDXPROM]]
+// CHECK-NVIDIA-NEXT:    store i32 -1, ptr [[ARRAYIDX]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[TMP1]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[DO_BODY:%.*]]
+// CHECK-NVIDIA:       do.body:
+// CHECK-NVIDIA-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 0
+// CHECK-NVIDIA-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 0
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+// CHECK-NVIDIA:       if.then:
+// CHECK-NVIDIA-NEXT:    [[TMP4:%.*]] = load i32, ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[INC2:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK-NVIDIA-NEXT:    store i32 [[INC2]], ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[TMP4]] to i64
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 [[IDXPROM3]]
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[ARRAYIDX4]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[IF_END]]
+// CHECK-NVIDIA:       if.end:
+// CHECK-NVIDIA-NEXT:    [[TMP5:%.*]] = load i32, ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[DEC:%.*]] = add nsw i32 [[TMP5]], -1
+// CHECK-NVIDIA-NEXT:    store i32 [[DEC]], ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[DEC]] to i64
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 [[IDXPROM5]]
+// CHECK-NVIDIA-NEXT:    [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP7:%.*]] = load ptr, ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i32 [[TMP6]], ptr [[TMP7]], align 4
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 2
+// CHECK-NVIDIA-NEXT:    call void @_Z3fooPi(ptr noundef [[ARRAYIDX7]]) #[[ATTR8]]
+// CHECK-NVIDIA-NEXT:    br label [[DO_COND:%.*]]
+// CHECK-NVIDIA:       do.cond:
+// CHECK-NVIDIA-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP9]], 0
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP8]], label [[DO_BODY]], label [[DO_END:%.*]], !llvm.loop [[LOOP10:![0-9]+]]
+// CHECK-NVIDIA:       do.end:
+// CHECK-NVIDIA-NEXT:    call void @__kmpc_free_shared(ptr [[STACK]], i64 256)
+// CHECK-NVIDIA-NEXT:    ret void
+//
+//
+// CHECK-NVIDIA-LABEL: define {{[^@]+}}@_Z26does_not_emit_alloc_sharedPKiPi
+// CHECK-NVIDIA-SAME: (ptr noundef [[LOCALPADDING:%.*]], ptr noundef [[RES:%.*]]) #[[ATTR2]] {
+// CHECK-NVIDIA-NEXT:  entry:
+// CHECK-NVIDIA-NEXT:    [[LOCALPADDING_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[RES_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NVIDIA-NEXT:    [[STACK:%.*]] = alloca [64 x i32], align 4
+// CHECK-NVIDIA-NEXT:    [[STACKPTR:%.*]] = alloca i32, align 4
+// CHECK-NVIDIA-NEXT:    store ptr [[LOCALPADDING]], ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store ptr [[RES]], ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP0:%.*]] = load i32, ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
+// CHECK-NVIDIA-NEXT:    store i32 [[INC]], ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP0]] to i64
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 [[IDXPROM]]
+// CHECK-NVIDIA-NEXT:    store i32 -1, ptr [[ARRAYIDX]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[TMP1]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[DO_BODY:%.*]]
+// CHECK-NVIDIA:       do.body:
+// CHECK-NVIDIA-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[LOCALPADDING_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 0
+// CHECK-NVIDIA-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 0
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+// CHECK-NVIDIA:       if.then:
+// CHECK-NVIDIA-NEXT:    [[TMP4:%.*]] = load i32, ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[INC2:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK-NVIDIA-NEXT:    store i32 [[INC2]], ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[TMP4]] to i64
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 [[IDXPROM3]]
+// CHECK-NVIDIA-NEXT:    store i32 0, ptr [[ARRAYIDX4]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[IF_END]]
+// CHECK-NVIDIA:       if.end:
+// CHECK-NVIDIA-NEXT:    [[TMP5:%.*]] = load i32, ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[DEC:%.*]] = add nsw i32 [[TMP5]], -1
+// CHECK-NVIDIA-NEXT:    store i32 [[DEC]], ptr [[STACKPTR]], align 4
+// CHECK-NVIDIA-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[DEC]] to i64
+// CHECK-NVIDIA-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [64 x i32], ptr [[STACK]], i64 0, i64 [[IDXPROM5]]
+// CHECK-NVIDIA-NEXT:    [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
+// CHECK-NVIDIA-NEXT:    [[TMP7:%.*]] = load ptr, ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    store i32 [[TMP6]], ptr [[TMP7]], align 4
+// CHECK-NVIDIA-NEXT:    br label [[DO_COND:%.*]]
+// CHECK-NVIDIA:       do.cond:
+// CHECK-NVIDIA-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[RES_ADDR]], align 8
+// CHECK-NVIDIA-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+// CHECK-NVIDIA-NEXT:    [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], 0
+// CHECK-NVIDIA-NEXT:    br i1 [[CMP7]], label [[DO_BODY]], label [[DO_END:%.*]], !llvm.loop [[LOOP11:![0-9]+]]
+// CHECK-NVIDIA:       do.end:
+// CHECK-NVIDIA-NEXT:    ret void
+//
Index: clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
===================================================================
--- clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -445,14 +445,7 @@
   void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
     if (!E)
       return;
-    if (E->getCastKind() == CK_ArrayToPointerDecay) {
-      const bool SavedAllEscaped = AllEscaped;
-      AllEscaped = true;
-      Visit(E->getSubExpr());
-      AllEscaped = SavedAllEscaped;
-    } else {
-      Visit(E->getSubExpr());
-    }
+    Visit(E->getSubExpr());
   }
   void VisitExpr(const Expr *E) {
     if (!E)
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
  • [PATCH] D148805: [C... Gheorghe-Teodor Bercea via Phabricator via cfe-commits

Reply via email to