// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test target codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK45-64
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK45-32
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK45-32-EX
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK-64
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32-EX
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// Check that the execution mode of all 2 target regions on the gpu is set to NonSPMD Mode.
#define N 1000
template<typename tx>
tx ftemplate(int n) {
tx a[N];
short aa[N];
tx b[10];
#pragma omp target simd
for(int i = 0; i < n; i++) {
a[i] = 1;
}
#pragma omp target simd
for (int i = 0; i < n; i++) {
aa[i] += 1;
}
#pragma omp target simd
for(int i = 0; i < 10; i++) {
b[i] += 1;
}
#pragma omp target simd reduction(+:n)
for(int i = 0; i < 10; i++) {
b[i] += 1;
}
return a[0];
}
int bar(int n){
int a = 0;
a += ftemplate<int>(n);
return a;
}
#endif
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
// CHECK45-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK45-64-NEXT: entry:
// CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK45-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK45-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
// CHECK45-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-64: user_code.entry:
// CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK45-64-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK45-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK45-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK45-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK45-64-NEXT: store i32 0, ptr [[I]], align 4
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK45-64-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK45-64: simd.if.then:
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-64: omp.inner.for.cond:
// CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
// CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-64-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK45-64-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-64: omp.inner.for.body:
// CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK45-64-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-64-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
// CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK45-64-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-64: omp.body.continue:
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-64: omp.inner.for.inc:
// CHECK45-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK45-64-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK45-64: worker.exit:
// CHECK45-64-NEXT: ret void
// CHECK45-64: omp.inner.for.end:
// CHECK45-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-64-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
// CHECK45-64-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
// CHECK45-64-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
// CHECK45-64-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
// CHECK45-64-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
// CHECK45-64-NEXT: br label [[SIMD_IF_END]]
// CHECK45-64: simd.if.end:
// CHECK45-64-NEXT: call void @__kmpc_target_deinit()
// CHECK45-64-NEXT: ret void
//
//
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
// CHECK45-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
// CHECK45-64-NEXT: entry:
// CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK45-64-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
// CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK45-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
// CHECK45-64-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
// CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-64: user_code.entry:
// CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK45-64-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK45-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK45-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK45-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK45-64-NEXT: store i32 0, ptr [[I]], align 4
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK45-64-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK45-64: simd.if.then:
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-64: omp.inner.for.cond:
// CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
// CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-64-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK45-64-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-64: omp.inner.for.body:
// CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK45-64-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-64-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
// CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK45-64-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK45-64-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
// CHECK45-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
// CHECK45-64-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK45-64-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-64: omp.body.continue:
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-64: omp.inner.for.inc:
// CHECK45-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-64-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK45-64-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
// CHECK45-64: worker.exit:
// CHECK45-64-NEXT: ret void
// CHECK45-64: omp.inner.for.end:
// CHECK45-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-64-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
// CHECK45-64-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
// CHECK45-64-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
// CHECK45-64-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
// CHECK45-64-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
// CHECK45-64-NEXT: br label [[SIMD_IF_END]]
// CHECK45-64: simd.if.end:
// CHECK45-64-NEXT: call void @__kmpc_target_deinit()
// CHECK45-64-NEXT: ret void
//
//
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
// CHECK45-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK45-64-NEXT: entry:
// CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK45-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK45-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-64: user_code.entry:
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-64: omp.inner.for.cond:
// CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
// CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// CHECK45-64-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-64: omp.inner.for.body:
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
// CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-64-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK45-64-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-64: omp.body.continue:
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-64: omp.inner.for.inc:
// CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-64-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-64-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
// CHECK45-64: worker.exit:
// CHECK45-64-NEXT: ret void
// CHECK45-64: omp.inner.for.end:
// CHECK45-64-NEXT: store i32 10, ptr [[I]], align 4
// CHECK45-64-NEXT: call void @__kmpc_target_deinit()
// CHECK45-64-NEXT: ret void
//
//
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
// CHECK45-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
// CHECK45-64-NEXT: entry:
// CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK45-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
// CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: [[N1:%.*]] = alloca i32, align 4
// CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK45-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK45-64-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK45-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 8
// CHECK45-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-64: user_code.entry:
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-64-NEXT: store i32 0, ptr [[N1]], align 4
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-64: omp.inner.for.cond:
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
// CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
// CHECK45-64-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-64: omp.inner.for.body:
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
// CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64
// CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-64-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-64-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-64: omp.body.continue:
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-64: omp.inner.for.inc:
// CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-64-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK45-64-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
// CHECK45-64: worker.exit:
// CHECK45-64-NEXT: ret void
// CHECK45-64: omp.inner.for.end:
// CHECK45-64-NEXT: store i32 10, ptr [[I]], align 4
// CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK45-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
// CHECK45-64-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
// CHECK45-64-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
// CHECK45-64-NEXT: call void @__kmpc_target_deinit()
// CHECK45-64-NEXT: ret void
//
//
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
// CHECK45-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK45-32-NEXT: entry:
// CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK45-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
// CHECK45-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-32: user_code.entry:
// CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK45-32-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK45-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK45-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK45-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK45-32-NEXT: store i32 0, ptr [[I]], align 4
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK45-32-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK45-32: simd.if.then:
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-32: omp.inner.for.cond:
// CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
// CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-32-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK45-32-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-32: omp.inner.for.body:
// CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK45-32-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-32-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP8]]
// CHECK45-32-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-32: omp.body.continue:
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-32: omp.inner.for.inc:
// CHECK45-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK45-32-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK45-32: worker.exit:
// CHECK45-32-NEXT: ret void
// CHECK45-32: omp.inner.for.end:
// CHECK45-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
// CHECK45-32-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
// CHECK45-32-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
// CHECK45-32-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
// CHECK45-32-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
// CHECK45-32-NEXT: br label [[SIMD_IF_END]]
// CHECK45-32: simd.if.end:
// CHECK45-32-NEXT: call void @__kmpc_target_deinit()
// CHECK45-32-NEXT: ret void
//
//
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
// CHECK45-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
// CHECK45-32-NEXT: entry:
// CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK45-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
// CHECK45-32-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
// CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-32: user_code.entry:
// CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK45-32-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK45-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK45-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK45-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK45-32-NEXT: store i32 0, ptr [[I]], align 4
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK45-32-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK45-32: simd.if.then:
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-32: omp.inner.for.cond:
// CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
// CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-32-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK45-32-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-32: omp.inner.for.body:
// CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK45-32-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-32-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP8]]
// CHECK45-32-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
// CHECK45-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
// CHECK45-32-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK45-32-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-32: omp.body.continue:
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-32: omp.inner.for.inc:
// CHECK45-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK45-32-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
// CHECK45-32: worker.exit:
// CHECK45-32-NEXT: ret void
// CHECK45-32: omp.inner.for.end:
// CHECK45-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
// CHECK45-32-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
// CHECK45-32-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
// CHECK45-32-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
// CHECK45-32-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
// CHECK45-32-NEXT: br label [[SIMD_IF_END]]
// CHECK45-32: simd.if.end:
// CHECK45-32-NEXT: call void @__kmpc_target_deinit()
// CHECK45-32-NEXT: ret void
//
//
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
// CHECK45-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK45-32-NEXT: entry:
// CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK45-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-32: user_code.entry:
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-32: omp.inner.for.cond:
// CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
// CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// CHECK45-32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-32: omp.inner.for.body:
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP4]]
// CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK45-32-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-32: omp.body.continue:
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-32: omp.inner.for.inc:
// CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-32-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
// CHECK45-32: worker.exit:
// CHECK45-32-NEXT: ret void
// CHECK45-32: omp.inner.for.end:
// CHECK45-32-NEXT: store i32 10, ptr [[I]], align 4
// CHECK45-32-NEXT: call void @__kmpc_target_deinit()
// CHECK45-32-NEXT: ret void
//
//
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
// CHECK45-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
// CHECK45-32-NEXT: entry:
// CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: [[N1:%.*]] = alloca i32, align 4
// CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK45-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK45-32-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK45-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 4
// CHECK45-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-32: user_code.entry:
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-32-NEXT: store i32 0, ptr [[N1]], align 4
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-32: omp.inner.for.cond:
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
// CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
// CHECK45-32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-32: omp.inner.for.body:
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
// CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP5]]
// CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-32-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-32: omp.body.continue:
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-32: omp.inner.for.inc:
// CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK45-32-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
// CHECK45-32: worker.exit:
// CHECK45-32-NEXT: ret void
// CHECK45-32: omp.inner.for.end:
// CHECK45-32-NEXT: store i32 10, ptr [[I]], align 4
// CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK45-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
// CHECK45-32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
// CHECK45-32-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
// CHECK45-32-NEXT: call void @__kmpc_target_deinit()
// CHECK45-32-NEXT: ret void
//
//
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
// CHECK45-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK45-32-EX-NEXT: entry:
// CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK45-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
// CHECK45-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-32-EX: user_code.entry:
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK45-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK45-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK45-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK45-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK45-32-EX-NEXT: store i32 0, ptr [[I]], align 4
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK45-32-EX: simd.if.then:
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-32-EX: omp.inner.for.cond:
// CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
// CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-32-EX-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK45-32-EX-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-32-EX: omp.inner.for.body:
// CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK45-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-32-EX-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP8]]
// CHECK45-32-EX-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-32-EX: omp.body.continue:
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-32-EX: omp.inner.for.inc:
// CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK45-32-EX-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK45-32-EX: worker.exit:
// CHECK45-32-EX-NEXT: ret void
// CHECK45-32-EX: omp.inner.for.end:
// CHECK45-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-EX-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
// CHECK45-32-EX-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
// CHECK45-32-EX-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
// CHECK45-32-EX-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
// CHECK45-32-EX-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
// CHECK45-32-EX-NEXT: br label [[SIMD_IF_END]]
// CHECK45-32-EX: simd.if.end:
// CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK45-32-EX-NEXT: ret void
//
//
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
// CHECK45-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
// CHECK45-32-EX-NEXT: entry:
// CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK45-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
// CHECK45-32-EX-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-32-EX: user_code.entry:
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK45-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK45-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK45-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK45-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK45-32-EX-NEXT: store i32 0, ptr [[I]], align 4
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK45-32-EX: simd.if.then:
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-32-EX: omp.inner.for.cond:
// CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
// CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-32-EX-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK45-32-EX-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-32-EX: omp.inner.for.body:
// CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK45-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-32-EX-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP8]]
// CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-EX-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
// CHECK45-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
// CHECK45-32-EX-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK45-32-EX-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-32-EX: omp.body.continue:
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-32-EX: omp.inner.for.inc:
// CHECK45-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-EX-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK45-32-EX-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
// CHECK45-32-EX: worker.exit:
// CHECK45-32-EX-NEXT: ret void
// CHECK45-32-EX: omp.inner.for.end:
// CHECK45-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK45-32-EX-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
// CHECK45-32-EX-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
// CHECK45-32-EX-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
// CHECK45-32-EX-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
// CHECK45-32-EX-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
// CHECK45-32-EX-NEXT: br label [[SIMD_IF_END]]
// CHECK45-32-EX: simd.if.end:
// CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK45-32-EX-NEXT: ret void
//
//
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
// CHECK45-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK45-32-EX-NEXT: entry:
// CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK45-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-32-EX: user_code.entry:
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-32-EX: omp.inner.for.cond:
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
// CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-32-EX: omp.inner.for.body:
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP4]]
// CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-EX-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK45-32-EX-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-32-EX: omp.body.continue:
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-32-EX: omp.inner.for.inc:
// CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-EX-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-32-EX-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
// CHECK45-32-EX: worker.exit:
// CHECK45-32-EX-NEXT: ret void
// CHECK45-32-EX: omp.inner.for.end:
// CHECK45-32-EX-NEXT: store i32 10, ptr [[I]], align 4
// CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK45-32-EX-NEXT: ret void
//
//
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
// CHECK45-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
// CHECK45-32-EX-NEXT: entry:
// CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
// CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: [[N1:%.*]] = alloca i32, align 4
// CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK45-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK45-32-EX-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 4
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
// CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK45-32-EX: user_code.entry:
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK45-32-EX-NEXT: store i32 0, ptr [[N1]], align 4
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK45-32-EX: omp.inner.for.cond:
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
// CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
// CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK45-32-EX: omp.inner.for.body:
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
// CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK45-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP5]]
// CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-EX-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK45-32-EX-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK45-32-EX: omp.body.continue:
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK45-32-EX: omp.inner.for.inc:
// CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-EX-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK45-32-EX-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
// CHECK45-32-EX: worker.exit:
// CHECK45-32-EX-NEXT: ret void
// CHECK45-32-EX: omp.inner.for.end:
// CHECK45-32-EX-NEXT: store i32 10, ptr [[I]], align 4
// CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
// CHECK45-32-EX-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
// CHECK45-32-EX-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
// CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK45-32-EX-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
// CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK-64-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK-64-NEXT: store i32 0, ptr [[I]], align 4
// CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK-64-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK-64: simd.if.then:
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-64: omp.inner.for.cond:
// CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
// CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-64-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK-64-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-64: omp.inner.for.body:
// CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-64-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-64-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
// CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK-64-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-64: omp.body.continue:
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-64: omp.inner.for.inc:
// CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-64-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
// CHECK-64: omp.inner.for.end:
// CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-64-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
// CHECK-64-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
// CHECK-64-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
// CHECK-64-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
// CHECK-64-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
// CHECK-64-NEXT: br label [[SIMD_IF_END]]
// CHECK-64: simd.if.end:
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
// CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK-64-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK-64-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK-64-NEXT: store i32 0, ptr [[I]], align 4
// CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK-64-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK-64: simd.if.then:
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-64: omp.inner.for.cond:
// CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
// CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-64-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK-64-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-64: omp.inner.for.body:
// CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-64-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-64-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
// CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK-64-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK-64-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
// CHECK-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
// CHECK-64-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK-64-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-64: omp.body.continue:
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-64: omp.inner.for.inc:
// CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-64-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK-64-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
// CHECK-64: omp.inner.for.end:
// CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-64-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
// CHECK-64-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
// CHECK-64-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
// CHECK-64-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
// CHECK-64-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
// CHECK-64-NEXT: br label [[SIMD_IF_END]]
// CHECK-64: simd.if.end:
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
// CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-64: omp.inner.for.cond:
// CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
// CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-64: omp.inner.for.body:
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
// CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-64-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK-64-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-64: omp.body.continue:
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-64: omp.inner.for.inc:
// CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-64-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-64-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
// CHECK-64: omp.inner.for.end:
// CHECK-64-NEXT: store i32 10, ptr [[I]], align 4
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
// CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[N1:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 8
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-64-NEXT: store i32 0, ptr [[N1]], align 4
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-64: omp.inner.for.cond:
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
// CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
// CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-64: omp.inner.for.body:
// CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
// CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64
// CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-64-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-64-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-64: omp.body.continue:
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-64: omp.inner.for.inc:
// CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-64-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK-64-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
// CHECK-64: omp.inner.for.end:
// CHECK-64-NEXT: store i32 10, ptr [[I]], align 4
// CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
// CHECK-64-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
// CHECK-64-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
// CHECK-64-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
// CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK-32-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK-32-NEXT: store i32 0, ptr [[I]], align 4
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK-32-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK-32: simd.if.then:
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-32: omp.inner.for.cond:
// CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
// CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-32-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK-32-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-32: omp.inner.for.body:
// CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-32-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-32-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP8]]
// CHECK-32-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-32: omp.body.continue:
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-32: omp.inner.for.inc:
// CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-32-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
// CHECK-32: omp.inner.for.end:
// CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
// CHECK-32-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
// CHECK-32-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
// CHECK-32-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
// CHECK-32-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
// CHECK-32-NEXT: br label [[SIMD_IF_END]]
// CHECK-32: simd.if.end:
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
// CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK-32-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK-32-NEXT: store i32 0, ptr [[I]], align 4
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK-32-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK-32: simd.if.then:
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-32: omp.inner.for.cond:
// CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
// CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-32-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK-32-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-32: omp.inner.for.body:
// CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-32-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-32-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP8]]
// CHECK-32-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
// CHECK-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
// CHECK-32-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK-32-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-32: omp.body.continue:
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-32: omp.inner.for.inc:
// CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK-32-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
// CHECK-32: omp.inner.for.end:
// CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
// CHECK-32-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
// CHECK-32-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
// CHECK-32-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
// CHECK-32-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
// CHECK-32-NEXT: br label [[SIMD_IF_END]]
// CHECK-32: simd.if.end:
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
// CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-32: omp.inner.for.cond:
// CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
// CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-32: omp.inner.for.body:
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP4]]
// CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK-32-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-32: omp.body.continue:
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-32: omp.inner.for.inc:
// CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-32-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
// CHECK-32: omp.inner.for.end:
// CHECK-32-NEXT: store i32 10, ptr [[I]], align 4
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
// CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[N1:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-32-NEXT: store i32 0, ptr [[N1]], align 4
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-32: omp.inner.for.cond:
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
// CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
// CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-32: omp.inner.for.body:
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
// CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP5]]
// CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-32-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-32: omp.body.continue:
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-32: omp.inner.for.inc:
// CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK-32-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
// CHECK-32: omp.inner.for.end:
// CHECK-32-NEXT: store i32 10, ptr [[I]], align 4
// CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
// CHECK-32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
// CHECK-32-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
// CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32-EX: user_code.entry:
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK-32-EX-NEXT: store i32 0, ptr [[I]], align 4
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK-32-EX-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK-32-EX: simd.if.then:
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-32-EX: omp.inner.for.cond:
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-32-EX-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK-32-EX-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-32-EX: omp.inner.for.body:
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-32-EX-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP8]]
// CHECK-32-EX-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-32-EX: omp.body.continue:
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-32-EX: omp.inner.for.inc:
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-32-EX-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK-32-EX: worker.exit:
// CHECK-32-EX-NEXT: ret void
// CHECK-32-EX: omp.inner.for.end:
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-EX-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
// CHECK-32-EX-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
// CHECK-32-EX-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
// CHECK-32-EX-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
// CHECK-32-EX-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
// CHECK-32-EX-NEXT: br label [[SIMD_IF_END]]
// CHECK-32-EX: simd.if.end:
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
// CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32-EX: user_code.entry:
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
// CHECK-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK-32-EX-NEXT: store i32 0, ptr [[I]], align 4
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK-32-EX-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK-32-EX: simd.if.then:
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-32-EX: omp.inner.for.cond:
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-32-EX-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
// CHECK-32-EX-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-32-EX: omp.inner.for.body:
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-32-EX-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP8]]
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-EX-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
// CHECK-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
// CHECK-32-EX-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
// CHECK-32-EX-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-32-EX: omp.body.continue:
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-32-EX: omp.inner.for.inc:
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-EX-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK-32-EX-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
// CHECK-32-EX: worker.exit:
// CHECK-32-EX-NEXT: ret void
// CHECK-32-EX: omp.inner.for.end:
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-32-EX-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
// CHECK-32-EX-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
// CHECK-32-EX-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
// CHECK-32-EX-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
// CHECK-32-EX-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
// CHECK-32-EX-NEXT: br label [[SIMD_IF_END]]
// CHECK-32-EX: simd.if.end:
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
// CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32-EX: user_code.entry:
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-32-EX: omp.inner.for.cond:
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
// CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
// CHECK-32-EX-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-32-EX: omp.inner.for.body:
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
// CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP4]]
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-EX-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK-32-EX-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-32-EX: omp.body.continue:
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-32-EX: omp.inner.for.inc:
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-EX-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-32-EX-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
// CHECK-32-EX: worker.exit:
// CHECK-32-EX-NEXT: ret void
// CHECK-32-EX: omp.inner.for.end:
// CHECK-32-EX-NEXT: store i32 10, ptr [[I]], align 4
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
// CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[N1:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32-EX: user_code.entry:
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
// CHECK-32-EX-NEXT: store i32 0, ptr [[N1]], align 4
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-32-EX: omp.inner.for.cond:
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
// CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
// CHECK-32-EX-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-32-EX: omp.inner.for.body:
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
// CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP5]]
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-EX-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-32-EX-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-32-EX: omp.body.continue:
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-32-EX: omp.inner.for.inc:
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-EX-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK-32-EX-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
// CHECK-32-EX: worker.exit:
// CHECK-32-EX-NEXT: ret void
// CHECK-32-EX: omp.inner.for.end:
// CHECK-32-EX-NEXT: store i32 10, ptr [[I]], align 4
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
// CHECK-32-EX-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
// CHECK-32-EX-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-EX-NEXT: ret void
//