// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// expected-no-diagnostics
// Check code generation
// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -std=c++20 -fclang-abi-compat=latest -fopenmp -fopenmp-version=60 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// Check same results after serialization round-trip
// FIXME: They should be exactly the same but currently differ in function order
// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -std=c++20 -fclang-abi-compat=latest -fopenmp -fopenmp-version=60 -emit-pch -o %t %s
// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -std=c++20 -fclang-abi-compat=latest -fopenmp -fopenmp-version=60 -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK2
#ifndef HEADER
#define HEADER
// placeholder for loop body code.
extern "C" void body(...) {}
extern "C" void foo2(int start1, int start2, int end1, int end2, int step1, int step2) {
#pragma omp interchange
for (int i = start1; i < end1; i += step1)
for (int j = start2; j < end2; j += step2)
body(i, j);
}
extern "C" void foo3() {
#pragma omp for
#pragma omp interchange
for (int i = 7; i < 17; i += 3)
for (int j = 7; j < 17; j += 3)
body(i, j);
}
extern "C" void foo4() {
#pragma omp for collapse(2)
for (int k = 7; k < 17; k += 3)
#pragma omp interchange
for (int i = 7; i < 17; i += 3)
for (int j = 7; j < 17; j += 3)
body(i, j);
}
extern "C" void foo6() {
#pragma omp for collapse(4)
for (int i = 7; i < 17; i += 3)
#pragma omp interchange
for (int j = 7; j < 17; j += 3)
for (int k = 7; k < 17; k += 3)
for (int l = 7; l < 17; l += 3)
body(i, j, k, l);
}
extern "C" void foo9() {
double arr[128];
#pragma omp interchange
for (double c = 42; auto && v : arr)
for (int i = 0; i < 42; i += 2)
body(c, v, i);
}
extern "C" void foo10() {
double A[128], B[16];
#pragma omp for collapse(4)
for (int i = 0; i < 128; ++i)
#pragma omp interchange
for (double c = 42; auto aa : A)
for (double d = 42; auto &bb : B)
for (int j = 0; j < 128; ++j)
body(i, c, aa, d, bb, j);
}
#endif /* HEADER */
// CHECK1-LABEL: define {{[^@]+}}@body
// CHECK1-SAME: (...) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@foo2
// CHECK1-SAME: (i32 noundef [[START1:%.*]], i32 noundef [[START2:%.*]], i32 noundef [[END1:%.*]], i32 noundef [[END2:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[START1_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[START2_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[END1_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[END2_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[STEP1_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[STEP2_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTNEW_STEP7:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_0_IV_J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_1_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32 [[START1]], ptr [[START1_ADDR]], align 4
// CHECK1-NEXT: store i32 [[START2]], ptr [[START2_ADDR]], align 4
// CHECK1-NEXT: store i32 [[END1]], ptr [[END1_ADDR]], align 4
// CHECK1-NEXT: store i32 [[END2]], ptr [[END2_ADDR]], align 4
// CHECK1-NEXT: store i32 [[STEP1]], ptr [[STEP1_ADDR]], align 4
// CHECK1-NEXT: store i32 [[STEP2]], ptr [[STEP2_ADDR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[START1_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP0]], ptr [[I]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[START1_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[END1_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP1_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]]
// CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]]
// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[START2_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], ptr [[J]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[START2_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR_5]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[END2_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[STEP2_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTNEW_STEP7]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4
// CHECK1-NEXT: [[SUB9:%.*]] = sub i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: [[SUB10:%.*]] = sub i32 [[SUB9]], 1
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTNEW_STEP7]], align 4
// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[SUB10]], [[TMP14]]
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP7]], align 4
// CHECK1-NEXT: [[DIV12:%.*]] = udiv i32 [[ADD11]], [[TMP15]]
// CHECK1-NEXT: [[SUB13:%.*]] = sub i32 [[DIV12]], 1
// CHECK1-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_8]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK1-NEXT: br label [[FOR_COND:%.*]]
// CHECK1: for.cond:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_8]], align 4
// CHECK1-NEXT: [[ADD14:%.*]] = add i32 [[TMP17]], 1
// CHECK1-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP16]], [[ADD14]]
// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END24:%.*]]
// CHECK1: for.body:
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTNEW_STEP7]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], [[TMP20]]
// CHECK1-NEXT: [[ADD15:%.*]] = add i32 [[TMP18]], [[MUL]]
// CHECK1-NEXT: store i32 [[ADD15]], ptr [[J]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND16:%.*]]
// CHECK1: for.cond16:
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[ADD17:%.*]] = add i32 [[TMP22]], 1
// CHECK1-NEXT: [[CMP18:%.*]] = icmp ult i32 [[TMP21]], [[ADD17]]
// CHECK1-NEXT: br i1 [[CMP18]], label [[FOR_BODY19:%.*]], label [[FOR_END:%.*]]
// CHECK1: for.body19:
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
// CHECK1-NEXT: [[MUL20:%.*]] = mul i32 [[TMP24]], [[TMP25]]
// CHECK1-NEXT: [[ADD21:%.*]] = add i32 [[TMP23]], [[MUL20]]
// CHECK1-NEXT: store i32 [[ADD21]], ptr [[I]], align 4
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[I]], align 4
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[J]], align 4
// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP26]], i32 noundef [[TMP27]])
// CHECK1-NEXT: br label [[FOR_INC:%.*]]
// CHECK1: for.inc:
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add i32 [[TMP28]], 1
// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND16]], !llvm.loop [[LOOP3:![0-9]+]]
// CHECK1: for.end:
// CHECK1-NEXT: br label [[FOR_INC22:%.*]]
// CHECK1: for.inc22:
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK1-NEXT: [[INC23:%.*]] = add i32 [[TMP29]], 1
// CHECK1-NEXT: store i32 [[INC23]], ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
// CHECK1: for.end24:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@foo3
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_0_IV_J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_1_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
// CHECK1-NEXT: store i32 7, ptr [[I]], align 4
// CHECK1-NEXT: store i32 7, ptr [[J]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 3, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 3
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK1-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP7]], 3
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 7, [[MUL2]]
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[J]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND:%.*]]
// CHECK1: for.cond:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP8]], 4
// CHECK1-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK1: for.body:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[TMP9]], 3
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 7, [[MUL5]]
// CHECK1-NEXT: store i32 [[ADD6]], ptr [[I]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[J]], align 4
// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP10]], i32 noundef [[TMP11]])
// CHECK1-NEXT: br label [[FOR_INC:%.*]]
// CHECK1: for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
// CHECK1: for.end:
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK1-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@foo4
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_0_IV_J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_1_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
// CHECK1-NEXT: store i32 7, ptr [[I]], align 4
// CHECK1-NEXT: store i32 7, ptr [[J]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 15, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 15
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 15, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP6]], 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 3
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 7, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[K]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP8]], 4
// CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], [[MUL4]]
// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
// CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK1-NEXT: [[MUL7:%.*]] = mul nsw i32 [[TMP9]], 3
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 7, [[MUL7]]
// CHECK1-NEXT: store i32 [[ADD8]], ptr [[J]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND:%.*]]
// CHECK1: for.cond:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: [[CMP9:%.*]] = icmp slt i32 [[TMP10]], 4
// CHECK1-NEXT: br i1 [[CMP9]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK1: for.body:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP11]], 3
// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 7, [[MUL10]]
// CHECK1-NEXT: store i32 [[ADD11]], ptr [[I]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4
// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP12]], i32 noundef [[TMP13]])
// CHECK1-NEXT: br label [[FOR_INC:%.*]]
// CHECK1: for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK1: for.end:
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@foo6
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_0_IV_K:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_1_IV_J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[L:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
// CHECK1-NEXT: store i32 7, ptr [[J]], align 4
// CHECK1-NEXT: store i32 7, ptr [[K]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 255, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 255
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 255, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP6]], 64
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 3
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 7, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV5:%.*]] = sdiv i32 [[TMP8]], 64
// CHECK1-NEXT: [[MUL6:%.*]] = mul nsw i32 [[DIV5]], 64
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], [[MUL6]]
// CHECK1-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB]], 16
// CHECK1-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
// CHECK1-NEXT: store i32 [[ADD9]], ptr [[DOTPERMUTED_0_IV_K]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV10:%.*]] = sdiv i32 [[TMP10]], 64
// CHECK1-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 64
// CHECK1-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP9]], [[MUL11]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV13:%.*]] = sdiv i32 [[TMP12]], 64
// CHECK1-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 64
// CHECK1-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP11]], [[MUL14]]
// CHECK1-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 16
// CHECK1-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 16
// CHECK1-NEXT: [[SUB18:%.*]] = sub nsw i32 [[SUB12]], [[MUL17]]
// CHECK1-NEXT: [[DIV19:%.*]] = sdiv i32 [[SUB18]], 4
// CHECK1-NEXT: [[MUL20:%.*]] = mul nsw i32 [[DIV19]], 1
// CHECK1-NEXT: [[ADD21:%.*]] = add nsw i32 0, [[MUL20]]
// CHECK1-NEXT: store i32 [[ADD21]], ptr [[DOTPERMUTED_1_IV_J]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV22:%.*]] = sdiv i32 [[TMP14]], 64
// CHECK1-NEXT: [[MUL23:%.*]] = mul nsw i32 [[DIV22]], 64
// CHECK1-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP13]], [[MUL23]]
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV25:%.*]] = sdiv i32 [[TMP16]], 64
// CHECK1-NEXT: [[MUL26:%.*]] = mul nsw i32 [[DIV25]], 64
// CHECK1-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP15]], [[MUL26]]
// CHECK1-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 16
// CHECK1-NEXT: [[MUL29:%.*]] = mul nsw i32 [[DIV28]], 16
// CHECK1-NEXT: [[SUB30:%.*]] = sub nsw i32 [[SUB24]], [[MUL29]]
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV31:%.*]] = sdiv i32 [[TMP18]], 64
// CHECK1-NEXT: [[MUL32:%.*]] = mul nsw i32 [[DIV31]], 64
// CHECK1-NEXT: [[SUB33:%.*]] = sub nsw i32 [[TMP17]], [[MUL32]]
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV34:%.*]] = sdiv i32 [[TMP20]], 64
// CHECK1-NEXT: [[MUL35:%.*]] = mul nsw i32 [[DIV34]], 64
// CHECK1-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP19]], [[MUL35]]
// CHECK1-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 16
// CHECK1-NEXT: [[MUL38:%.*]] = mul nsw i32 [[DIV37]], 16
// CHECK1-NEXT: [[SUB39:%.*]] = sub nsw i32 [[SUB33]], [[MUL38]]
// CHECK1-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 4
// CHECK1-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 4
// CHECK1-NEXT: [[SUB42:%.*]] = sub nsw i32 [[SUB30]], [[MUL41]]
// CHECK1-NEXT: [[MUL43:%.*]] = mul nsw i32 [[SUB42]], 3
// CHECK1-NEXT: [[ADD44:%.*]] = add nsw i32 7, [[MUL43]]
// CHECK1-NEXT: store i32 [[ADD44]], ptr [[L]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_K]], align 4
// CHECK1-NEXT: [[MUL45:%.*]] = mul nsw i32 [[TMP21]], 3
// CHECK1-NEXT: [[ADD46:%.*]] = add nsw i32 7, [[MUL45]]
// CHECK1-NEXT: store i32 [[ADD46]], ptr [[K]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_J]], align 4
// CHECK1-NEXT: [[MUL47:%.*]] = mul nsw i32 [[TMP22]], 3
// CHECK1-NEXT: [[ADD48:%.*]] = add nsw i32 7, [[MUL47]]
// CHECK1-NEXT: store i32 [[ADD48]], ptr [[J]], align 4
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[J]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[K]], align 4
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[L]], align 4
// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP23]], i32 noundef [[TMP24]], i32 noundef [[TMP25]], i32 noundef [[TMP26]])
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD49:%.*]] = add nsw i32 [[TMP27]], 1
// CHECK1-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@foo9
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARR:%.*]] = alloca [128 x double], align 16
// CHECK1-NEXT: [[C:%.*]] = alloca double, align 8
// CHECK1-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[__END2:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_0_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_1_IV___BEGIN2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[V:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store double 4.200000e+01, ptr [[C]], align 8
// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__RANGE2]], align 8
// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP0]], i64 0, i64 0
// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 128
// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE2]], align 8
// CHECK1-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP1]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYDECAY1]], ptr [[__BEGIN2]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE2]], align 8
// CHECK1-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP2]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYDECAY2]], ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[__END2]], align 8
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTCAPTURE_EXPR_3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK1-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP4]] to i64
// CHECK1-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP5]] to i64
// CHECK1-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
// CHECK1-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
// CHECK1-NEXT: [[SUB5:%.*]] = sub nsw i64 [[DIV]], 1
// CHECK1-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 8
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND:%.*]]
// CHECK1: for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], 21
// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END15:%.*]]
// CHECK1: for.body:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 2
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD6]], ptr [[I]], align 4
// CHECK1-NEXT: store i64 0, ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK1-NEXT: br label [[FOR_COND7:%.*]]
// CHECK1: for.cond7:
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP9]], 1
// CHECK1-NEXT: [[CMP9:%.*]] = icmp slt i64 [[TMP8]], [[ADD8]]
// CHECK1-NEXT: br i1 [[CMP9]], label [[FOR_BODY10:%.*]], label [[FOR_END:%.*]]
// CHECK1: for.body10:
// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK1-NEXT: [[MUL11:%.*]] = mul nsw i64 [[TMP11]], 1
// CHECK1-NEXT: [[ADD_PTR12:%.*]] = getelementptr inbounds double, ptr [[TMP10]], i64 [[MUL11]]
// CHECK1-NEXT: store ptr [[ADD_PTR12]], ptr [[__BEGIN2]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__BEGIN2]], align 8
// CHECK1-NEXT: store ptr [[TMP12]], ptr [[V]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[C]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[V]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = load double, ptr [[TMP14]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK1-NEXT: call void (...) @body(double noundef [[TMP13]], double noundef [[TMP15]], i32 noundef [[TMP16]])
// CHECK1-NEXT: br label [[FOR_INC:%.*]]
// CHECK1: for.inc:
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK1-NEXT: [[INC:%.*]] = add nsw i64 [[TMP17]], 1
// CHECK1-NEXT: store i64 [[INC]], ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK1-NEXT: br label [[FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK1: for.end:
// CHECK1-NEXT: br label [[FOR_INC13:%.*]]
// CHECK1: for.inc13:
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK1-NEXT: [[INC14:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK1-NEXT: store i32 [[INC14]], ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
// CHECK1: for.end15:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@foo10
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A:%.*]] = alloca [128 x double], align 16
// CHECK1-NEXT: [[B:%.*]] = alloca [16 x double], align 16
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[_TMP2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[C:%.*]] = alloca double, align 8
// CHECK1-NEXT: [[__RANGE3:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[__END3:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[__BEGIN3:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[D:%.*]] = alloca double, align 8
// CHECK1-NEXT: [[__RANGE4:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[__END4:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[__BEGIN4:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_14:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_15:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_0_IV___BEGIN4:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPERMUTED_1_IV___BEGIN3:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I37:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPERMUTED_0_IV___BEGIN438:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPERMUTED_1_IV___BEGIN339:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[J40:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[BB:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[AA:%.*]] = alloca double, align 8
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
// CHECK1-NEXT: store double 4.200000e+01, ptr [[C]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[__RANGE3]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE3]], align 8
// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP1]], i64 0, i64 0
// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 128
// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END3]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE3]], align 8
// CHECK1-NEXT: [[ARRAYDECAY4:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP2]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYDECAY4]], ptr [[__BEGIN3]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[__RANGE3]], align 8
// CHECK1-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYDECAY5]], ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__END3]], align 8
// CHECK1-NEXT: store ptr [[TMP4]], ptr [[DOTCAPTURE_EXPR_6]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_6]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK1-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP5]] to i64
// CHECK1-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP6]] to i64
// CHECK1-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
// CHECK1-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
// CHECK1-NEXT: [[SUB8:%.*]] = sub nsw i64 [[DIV]], 1
// CHECK1-NEXT: store i64 [[SUB8]], ptr [[DOTCAPTURE_EXPR_7]], align 8
// CHECK1-NEXT: store double 4.200000e+01, ptr [[D]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[__RANGE4]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[__RANGE4]], align 8
// CHECK1-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [16 x double], ptr [[TMP7]], i64 0, i64 0
// CHECK1-NEXT: [[ADD_PTR10:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY9]], i64 16
// CHECK1-NEXT: store ptr [[ADD_PTR10]], ptr [[__END4]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE4]], align 8
// CHECK1-NEXT: [[ARRAYDECAY11:%.*]] = getelementptr inbounds [16 x double], ptr [[TMP8]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYDECAY11]], ptr [[__BEGIN4]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE4]], align 8
// CHECK1-NEXT: [[ARRAYDECAY13:%.*]] = getelementptr inbounds [16 x double], ptr [[TMP9]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYDECAY13]], ptr [[DOTCAPTURE_EXPR_12]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__END4]], align 8
// CHECK1-NEXT: store ptr [[TMP10]], ptr [[DOTCAPTURE_EXPR_14]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_14]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_12]], align 8
// CHECK1-NEXT: [[SUB_PTR_LHS_CAST16:%.*]] = ptrtoint ptr [[TMP11]] to i64
// CHECK1-NEXT: [[SUB_PTR_RHS_CAST17:%.*]] = ptrtoint ptr [[TMP12]] to i64
// CHECK1-NEXT: [[SUB_PTR_SUB18:%.*]] = sub i64 [[SUB_PTR_LHS_CAST16]], [[SUB_PTR_RHS_CAST17]]
// CHECK1-NEXT: [[SUB_PTR_DIV19:%.*]] = sdiv exact i64 [[SUB_PTR_SUB18]], 8
// CHECK1-NEXT: [[SUB20:%.*]] = sub nsw i64 [[SUB_PTR_DIV19]], 1
// CHECK1-NEXT: [[ADD21:%.*]] = add nsw i64 [[SUB20]], 1
// CHECK1-NEXT: [[DIV22:%.*]] = sdiv i64 [[ADD21]], 1
// CHECK1-NEXT: [[SUB23:%.*]] = sub nsw i64 [[DIV22]], 1
// CHECK1-NEXT: store i64 [[SUB23]], ptr [[DOTCAPTURE_EXPR_15]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_15]], align 8
// CHECK1-NEXT: [[ADD25:%.*]] = add nsw i64 [[TMP13]], 1
// CHECK1-NEXT: store i64 [[ADD25]], ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_7]], align 8
// CHECK1-NEXT: [[ADD27:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK1-NEXT: store i64 [[ADD27]], ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB29:%.*]] = sub nsw i64 [[TMP15]], 0
// CHECK1-NEXT: [[DIV30:%.*]] = sdiv i64 [[SUB29]], 1
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 128, [[DIV30]]
// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB31:%.*]] = sub nsw i64 [[TMP16]], 0
// CHECK1-NEXT: [[DIV32:%.*]] = sdiv i64 [[SUB31]], 1
// CHECK1-NEXT: [[MUL33:%.*]] = mul nsw i64 [[MUL]], [[DIV32]]
// CHECK1-NEXT: [[MUL34:%.*]] = mul nsw i64 [[MUL33]], 128
// CHECK1-NEXT: [[SUB35:%.*]] = sub nsw i64 [[MUL34]], 1
// CHECK1-NEXT: store i64 [[SUB35]], ptr [[DOTCAPTURE_EXPR_28]], align 8
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
// CHECK1-NEXT: store i64 0, ptr [[DOTPERMUTED_0_IV___BEGIN4]], align 8
// CHECK1-NEXT: store i64 0, ptr [[DOTPERMUTED_1_IV___BEGIN3]], align 8
// CHECK1-NEXT: store i32 0, ptr [[J]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i64 0, [[TMP17]]
// CHECK1-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: land.lhs.true:
// CHECK1-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[CMP36:%.*]] = icmp slt i64 0, [[TMP18]]
// CHECK1-NEXT: br i1 [[CMP36]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_28]], align 8
// CHECK1-NEXT: store i64 [[TMP19]], ptr [[DOTOMP_UB]], align 8
// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK1-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_28]], align 8
// CHECK1-NEXT: [[CMP41:%.*]] = icmp sgt i64 [[TMP20]], [[TMP21]]
// CHECK1-NEXT: br i1 [[CMP41]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_28]], align 8
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ [[TMP22]], [[COND_TRUE]] ], [ [[TMP23]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP24]], ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP42:%.*]] = icmp sle i64 [[TMP25]], [[TMP26]]
// CHECK1-NEXT: br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB43:%.*]] = sub nsw i64 [[TMP28]], 0
// CHECK1-NEXT: [[DIV44:%.*]] = sdiv i64 [[SUB43]], 1
// CHECK1-NEXT: [[MUL45:%.*]] = mul nsw i64 1, [[DIV44]]
// CHECK1-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB46:%.*]] = sub nsw i64 [[TMP29]], 0
// CHECK1-NEXT: [[DIV47:%.*]] = sdiv i64 [[SUB46]], 1
// CHECK1-NEXT: [[MUL48:%.*]] = mul nsw i64 [[MUL45]], [[DIV47]]
// CHECK1-NEXT: [[MUL49:%.*]] = mul nsw i64 [[MUL48]], 128
// CHECK1-NEXT: [[DIV50:%.*]] = sdiv i64 [[TMP27]], [[MUL49]]
// CHECK1-NEXT: [[MUL51:%.*]] = mul nsw i64 [[DIV50]], 1
// CHECK1-NEXT: [[ADD52:%.*]] = add nsw i64 0, [[MUL51]]
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[ADD52]] to i32
// CHECK1-NEXT: store i32 [[CONV]], ptr [[I37]], align 4
// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB53:%.*]] = sub nsw i64 [[TMP32]], 0
// CHECK1-NEXT: [[DIV54:%.*]] = sdiv i64 [[SUB53]], 1
// CHECK1-NEXT: [[MUL55:%.*]] = mul nsw i64 1, [[DIV54]]
// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB56:%.*]] = sub nsw i64 [[TMP33]], 0
// CHECK1-NEXT: [[DIV57:%.*]] = sdiv i64 [[SUB56]], 1
// CHECK1-NEXT: [[MUL58:%.*]] = mul nsw i64 [[MUL55]], [[DIV57]]
// CHECK1-NEXT: [[MUL59:%.*]] = mul nsw i64 [[MUL58]], 128
// CHECK1-NEXT: [[DIV60:%.*]] = sdiv i64 [[TMP31]], [[MUL59]]
// CHECK1-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB61:%.*]] = sub nsw i64 [[TMP34]], 0
// CHECK1-NEXT: [[DIV62:%.*]] = sdiv i64 [[SUB61]], 1
// CHECK1-NEXT: [[MUL63:%.*]] = mul nsw i64 1, [[DIV62]]
// CHECK1-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB64:%.*]] = sub nsw i64 [[TMP35]], 0
// CHECK1-NEXT: [[DIV65:%.*]] = sdiv i64 [[SUB64]], 1
// CHECK1-NEXT: [[MUL66:%.*]] = mul nsw i64 [[MUL63]], [[DIV65]]
// CHECK1-NEXT: [[MUL67:%.*]] = mul nsw i64 [[MUL66]], 128
// CHECK1-NEXT: [[MUL68:%.*]] = mul nsw i64 [[DIV60]], [[MUL67]]
// CHECK1-NEXT: [[SUB69:%.*]] = sub nsw i64 [[TMP30]], [[MUL68]]
// CHECK1-NEXT: [[TMP36:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB70:%.*]] = sub nsw i64 [[TMP36]], 0
// CHECK1-NEXT: [[DIV71:%.*]] = sdiv i64 [[SUB70]], 1
// CHECK1-NEXT: [[MUL72:%.*]] = mul nsw i64 1, [[DIV71]]
// CHECK1-NEXT: [[MUL73:%.*]] = mul nsw i64 [[MUL72]], 128
// CHECK1-NEXT: [[DIV74:%.*]] = sdiv i64 [[SUB69]], [[MUL73]]
// CHECK1-NEXT: [[MUL75:%.*]] = mul nsw i64 [[DIV74]], 1
// CHECK1-NEXT: [[ADD76:%.*]] = add nsw i64 0, [[MUL75]]
// CHECK1-NEXT: store i64 [[ADD76]], ptr [[DOTPERMUTED_0_IV___BEGIN438]], align 8
// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB77:%.*]] = sub nsw i64 [[TMP39]], 0
// CHECK1-NEXT: [[DIV78:%.*]] = sdiv i64 [[SUB77]], 1
// CHECK1-NEXT: [[MUL79:%.*]] = mul nsw i64 1, [[DIV78]]
// CHECK1-NEXT: [[TMP40:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB80:%.*]] = sub nsw i64 [[TMP40]], 0
// CHECK1-NEXT: [[DIV81:%.*]] = sdiv i64 [[SUB80]], 1
// CHECK1-NEXT: [[MUL82:%.*]] = mul nsw i64 [[MUL79]], [[DIV81]]
// CHECK1-NEXT: [[MUL83:%.*]] = mul nsw i64 [[MUL82]], 128
// CHECK1-NEXT: [[DIV84:%.*]] = sdiv i64 [[TMP38]], [[MUL83]]
// CHECK1-NEXT: [[TMP41:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB85:%.*]] = sub nsw i64 [[TMP41]], 0
// CHECK1-NEXT: [[DIV86:%.*]] = sdiv i64 [[SUB85]], 1
// CHECK1-NEXT: [[MUL87:%.*]] = mul nsw i64 1, [[DIV86]]
// CHECK1-NEXT: [[TMP42:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB88:%.*]] = sub nsw i64 [[TMP42]], 0
// CHECK1-NEXT: [[DIV89:%.*]] = sdiv i64 [[SUB88]], 1
// CHECK1-NEXT: [[MUL90:%.*]] = mul nsw i64 [[MUL87]], [[DIV89]]
// CHECK1-NEXT: [[MUL91:%.*]] = mul nsw i64 [[MUL90]], 128
// CHECK1-NEXT: [[MUL92:%.*]] = mul nsw i64 [[DIV84]], [[MUL91]]
// CHECK1-NEXT: [[SUB93:%.*]] = sub nsw i64 [[TMP37]], [[MUL92]]
// CHECK1-NEXT: [[TMP43:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP44:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB94:%.*]] = sub nsw i64 [[TMP45]], 0
// CHECK1-NEXT: [[DIV95:%.*]] = sdiv i64 [[SUB94]], 1
// CHECK1-NEXT: [[MUL96:%.*]] = mul nsw i64 1, [[DIV95]]
// CHECK1-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB97:%.*]] = sub nsw i64 [[TMP46]], 0
// CHECK1-NEXT: [[DIV98:%.*]] = sdiv i64 [[SUB97]], 1
// CHECK1-NEXT: [[MUL99:%.*]] = mul nsw i64 [[MUL96]], [[DIV98]]
// CHECK1-NEXT: [[MUL100:%.*]] = mul nsw i64 [[MUL99]], 128
// CHECK1-NEXT: [[DIV101:%.*]] = sdiv i64 [[TMP44]], [[MUL100]]
// CHECK1-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB102:%.*]] = sub nsw i64 [[TMP47]], 0
// CHECK1-NEXT: [[DIV103:%.*]] = sdiv i64 [[SUB102]], 1
// CHECK1-NEXT: [[MUL104:%.*]] = mul nsw i64 1, [[DIV103]]
// CHECK1-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB105:%.*]] = sub nsw i64 [[TMP48]], 0
// CHECK1-NEXT: [[DIV106:%.*]] = sdiv i64 [[SUB105]], 1
// CHECK1-NEXT: [[MUL107:%.*]] = mul nsw i64 [[MUL104]], [[DIV106]]
// CHECK1-NEXT: [[MUL108:%.*]] = mul nsw i64 [[MUL107]], 128
// CHECK1-NEXT: [[MUL109:%.*]] = mul nsw i64 [[DIV101]], [[MUL108]]
// CHECK1-NEXT: [[SUB110:%.*]] = sub nsw i64 [[TMP43]], [[MUL109]]
// CHECK1-NEXT: [[TMP49:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB111:%.*]] = sub nsw i64 [[TMP49]], 0
// CHECK1-NEXT: [[DIV112:%.*]] = sdiv i64 [[SUB111]], 1
// CHECK1-NEXT: [[MUL113:%.*]] = mul nsw i64 1, [[DIV112]]
// CHECK1-NEXT: [[MUL114:%.*]] = mul nsw i64 [[MUL113]], 128
// CHECK1-NEXT: [[DIV115:%.*]] = sdiv i64 [[SUB110]], [[MUL114]]
// CHECK1-NEXT: [[TMP50:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB116:%.*]] = sub nsw i64 [[TMP50]], 0
// CHECK1-NEXT: [[DIV117:%.*]] = sdiv i64 [[SUB116]], 1
// CHECK1-NEXT: [[MUL118:%.*]] = mul nsw i64 1, [[DIV117]]
// CHECK1-NEXT: [[MUL119:%.*]] = mul nsw i64 [[MUL118]], 128
// CHECK1-NEXT: [[MUL120:%.*]] = mul nsw i64 [[DIV115]], [[MUL119]]
// CHECK1-NEXT: [[SUB121:%.*]] = sub nsw i64 [[SUB93]], [[MUL120]]
// CHECK1-NEXT: [[DIV122:%.*]] = sdiv i64 [[SUB121]], 128
// CHECK1-NEXT: [[MUL123:%.*]] = mul nsw i64 [[DIV122]], 1
// CHECK1-NEXT: [[ADD124:%.*]] = add nsw i64 0, [[MUL123]]
// CHECK1-NEXT: store i64 [[ADD124]], ptr [[DOTPERMUTED_1_IV___BEGIN339]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB125:%.*]] = sub nsw i64 [[TMP53]], 0
// CHECK1-NEXT: [[DIV126:%.*]] = sdiv i64 [[SUB125]], 1
// CHECK1-NEXT: [[MUL127:%.*]] = mul nsw i64 1, [[DIV126]]
// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB128:%.*]] = sub nsw i64 [[TMP54]], 0
// CHECK1-NEXT: [[DIV129:%.*]] = sdiv i64 [[SUB128]], 1
// CHECK1-NEXT: [[MUL130:%.*]] = mul nsw i64 [[MUL127]], [[DIV129]]
// CHECK1-NEXT: [[MUL131:%.*]] = mul nsw i64 [[MUL130]], 128
// CHECK1-NEXT: [[DIV132:%.*]] = sdiv i64 [[TMP52]], [[MUL131]]
// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB133:%.*]] = sub nsw i64 [[TMP55]], 0
// CHECK1-NEXT: [[DIV134:%.*]] = sdiv i64 [[SUB133]], 1
// CHECK1-NEXT: [[MUL135:%.*]] = mul nsw i64 1, [[DIV134]]
// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB136:%.*]] = sub nsw i64 [[TMP56]], 0
// CHECK1-NEXT: [[DIV137:%.*]] = sdiv i64 [[SUB136]], 1
// CHECK1-NEXT: [[MUL138:%.*]] = mul nsw i64 [[MUL135]], [[DIV137]]
// CHECK1-NEXT: [[MUL139:%.*]] = mul nsw i64 [[MUL138]], 128
// CHECK1-NEXT: [[MUL140:%.*]] = mul nsw i64 [[DIV132]], [[MUL139]]
// CHECK1-NEXT: [[SUB141:%.*]] = sub nsw i64 [[TMP51]], [[MUL140]]
// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB142:%.*]] = sub nsw i64 [[TMP59]], 0
// CHECK1-NEXT: [[DIV143:%.*]] = sdiv i64 [[SUB142]], 1
// CHECK1-NEXT: [[MUL144:%.*]] = mul nsw i64 1, [[DIV143]]
// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB145:%.*]] = sub nsw i64 [[TMP60]], 0
// CHECK1-NEXT: [[DIV146:%.*]] = sdiv i64 [[SUB145]], 1
// CHECK1-NEXT: [[MUL147:%.*]] = mul nsw i64 [[MUL144]], [[DIV146]]
// CHECK1-NEXT: [[MUL148:%.*]] = mul nsw i64 [[MUL147]], 128
// CHECK1-NEXT: [[DIV149:%.*]] = sdiv i64 [[TMP58]], [[MUL148]]
// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB150:%.*]] = sub nsw i64 [[TMP61]], 0
// CHECK1-NEXT: [[DIV151:%.*]] = sdiv i64 [[SUB150]], 1
// CHECK1-NEXT: [[MUL152:%.*]] = mul nsw i64 1, [[DIV151]]
// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB153:%.*]] = sub nsw i64 [[TMP62]], 0
// CHECK1-NEXT: [[DIV154:%.*]] = sdiv i64 [[SUB153]], 1
// CHECK1-NEXT: [[MUL155:%.*]] = mul nsw i64 [[MUL152]], [[DIV154]]
// CHECK1-NEXT: [[MUL156:%.*]] = mul nsw i64 [[MUL155]], 128
// CHECK1-NEXT: [[MUL157:%.*]] = mul nsw i64 [[DIV149]], [[MUL156]]
// CHECK1-NEXT: [[SUB158:%.*]] = sub nsw i64 [[TMP57]], [[MUL157]]
// CHECK1-NEXT: [[TMP63:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB159:%.*]] = sub nsw i64 [[TMP63]], 0
// CHECK1-NEXT: [[DIV160:%.*]] = sdiv i64 [[SUB159]], 1
// CHECK1-NEXT: [[MUL161:%.*]] = mul nsw i64 1, [[DIV160]]
// CHECK1-NEXT: [[MUL162:%.*]] = mul nsw i64 [[MUL161]], 128
// CHECK1-NEXT: [[DIV163:%.*]] = sdiv i64 [[SUB158]], [[MUL162]]
// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB164:%.*]] = sub nsw i64 [[TMP64]], 0
// CHECK1-NEXT: [[DIV165:%.*]] = sdiv i64 [[SUB164]], 1
// CHECK1-NEXT: [[MUL166:%.*]] = mul nsw i64 1, [[DIV165]]
// CHECK1-NEXT: [[MUL167:%.*]] = mul nsw i64 [[MUL166]], 128
// CHECK1-NEXT: [[MUL168:%.*]] = mul nsw i64 [[DIV163]], [[MUL167]]
// CHECK1-NEXT: [[SUB169:%.*]] = sub nsw i64 [[SUB141]], [[MUL168]]
// CHECK1-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP67:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB170:%.*]] = sub nsw i64 [[TMP67]], 0
// CHECK1-NEXT: [[DIV171:%.*]] = sdiv i64 [[SUB170]], 1
// CHECK1-NEXT: [[MUL172:%.*]] = mul nsw i64 1, [[DIV171]]
// CHECK1-NEXT: [[TMP68:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB173:%.*]] = sub nsw i64 [[TMP68]], 0
// CHECK1-NEXT: [[DIV174:%.*]] = sdiv i64 [[SUB173]], 1
// CHECK1-NEXT: [[MUL175:%.*]] = mul nsw i64 [[MUL172]], [[DIV174]]
// CHECK1-NEXT: [[MUL176:%.*]] = mul nsw i64 [[MUL175]], 128
// CHECK1-NEXT: [[DIV177:%.*]] = sdiv i64 [[TMP66]], [[MUL176]]
// CHECK1-NEXT: [[TMP69:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB178:%.*]] = sub nsw i64 [[TMP69]], 0
// CHECK1-NEXT: [[DIV179:%.*]] = sdiv i64 [[SUB178]], 1
// CHECK1-NEXT: [[MUL180:%.*]] = mul nsw i64 1, [[DIV179]]
// CHECK1-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB181:%.*]] = sub nsw i64 [[TMP70]], 0
// CHECK1-NEXT: [[DIV182:%.*]] = sdiv i64 [[SUB181]], 1
// CHECK1-NEXT: [[MUL183:%.*]] = mul nsw i64 [[MUL180]], [[DIV182]]
// CHECK1-NEXT: [[MUL184:%.*]] = mul nsw i64 [[MUL183]], 128
// CHECK1-NEXT: [[MUL185:%.*]] = mul nsw i64 [[DIV177]], [[MUL184]]
// CHECK1-NEXT: [[SUB186:%.*]] = sub nsw i64 [[TMP65]], [[MUL185]]
// CHECK1-NEXT: [[TMP71:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP72:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP73:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB187:%.*]] = sub nsw i64 [[TMP73]], 0
// CHECK1-NEXT: [[DIV188:%.*]] = sdiv i64 [[SUB187]], 1
// CHECK1-NEXT: [[MUL189:%.*]] = mul nsw i64 1, [[DIV188]]
// CHECK1-NEXT: [[TMP74:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB190:%.*]] = sub nsw i64 [[TMP74]], 0
// CHECK1-NEXT: [[DIV191:%.*]] = sdiv i64 [[SUB190]], 1
// CHECK1-NEXT: [[MUL192:%.*]] = mul nsw i64 [[MUL189]], [[DIV191]]
// CHECK1-NEXT: [[MUL193:%.*]] = mul nsw i64 [[MUL192]], 128
// CHECK1-NEXT: [[DIV194:%.*]] = sdiv i64 [[TMP72]], [[MUL193]]
// CHECK1-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK1-NEXT: [[SUB195:%.*]] = sub nsw i64 [[TMP75]], 0
// CHECK1-NEXT: [[DIV196:%.*]] = sdiv i64 [[SUB195]], 1
// CHECK1-NEXT: [[MUL197:%.*]] = mul nsw i64 1, [[DIV196]]
// CHECK1-NEXT: [[TMP76:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB198:%.*]] = sub nsw i64 [[TMP76]], 0
// CHECK1-NEXT: [[DIV199:%.*]] = sdiv i64 [[SUB198]], 1
// CHECK1-NEXT: [[MUL200:%.*]] = mul nsw i64 [[MUL197]], [[DIV199]]
// CHECK1-NEXT: [[MUL201:%.*]] = mul nsw i64 [[MUL200]], 128
// CHECK1-NEXT: [[MUL202:%.*]] = mul nsw i64 [[DIV194]], [[MUL201]]
// CHECK1-NEXT: [[SUB203:%.*]] = sub nsw i64 [[TMP71]], [[MUL202]]
// CHECK1-NEXT: [[TMP77:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB204:%.*]] = sub nsw i64 [[TMP77]], 0
// CHECK1-NEXT: [[DIV205:%.*]] = sdiv i64 [[SUB204]], 1
// CHECK1-NEXT: [[MUL206:%.*]] = mul nsw i64 1, [[DIV205]]
// CHECK1-NEXT: [[MUL207:%.*]] = mul nsw i64 [[MUL206]], 128
// CHECK1-NEXT: [[DIV208:%.*]] = sdiv i64 [[SUB203]], [[MUL207]]
// CHECK1-NEXT: [[TMP78:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK1-NEXT: [[SUB209:%.*]] = sub nsw i64 [[TMP78]], 0
// CHECK1-NEXT: [[DIV210:%.*]] = sdiv i64 [[SUB209]], 1
// CHECK1-NEXT: [[MUL211:%.*]] = mul nsw i64 1, [[DIV210]]
// CHECK1-NEXT: [[MUL212:%.*]] = mul nsw i64 [[MUL211]], 128
// CHECK1-NEXT: [[MUL213:%.*]] = mul nsw i64 [[DIV208]], [[MUL212]]
// CHECK1-NEXT: [[SUB214:%.*]] = sub nsw i64 [[SUB186]], [[MUL213]]
// CHECK1-NEXT: [[DIV215:%.*]] = sdiv i64 [[SUB214]], 128
// CHECK1-NEXT: [[MUL216:%.*]] = mul nsw i64 [[DIV215]], 128
// CHECK1-NEXT: [[SUB217:%.*]] = sub nsw i64 [[SUB169]], [[MUL216]]
// CHECK1-NEXT: [[MUL218:%.*]] = mul nsw i64 [[SUB217]], 1
// CHECK1-NEXT: [[ADD219:%.*]] = add nsw i64 0, [[MUL218]]
// CHECK1-NEXT: [[CONV220:%.*]] = trunc i64 [[ADD219]] to i32
// CHECK1-NEXT: store i32 [[CONV220]], ptr [[J40]], align 4
// CHECK1-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_12]], align 8
// CHECK1-NEXT: [[TMP80:%.*]] = load i64, ptr [[DOTPERMUTED_0_IV___BEGIN438]], align 8
// CHECK1-NEXT: [[MUL221:%.*]] = mul nsw i64 [[TMP80]], 1
// CHECK1-NEXT: [[ADD_PTR222:%.*]] = getelementptr inbounds double, ptr [[TMP79]], i64 [[MUL221]]
// CHECK1-NEXT: store ptr [[ADD_PTR222]], ptr [[__BEGIN4]], align 8
// CHECK1-NEXT: [[TMP81:%.*]] = load ptr, ptr [[__BEGIN4]], align 8
// CHECK1-NEXT: store ptr [[TMP81]], ptr [[BB]], align 8
// CHECK1-NEXT: [[TMP82:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK1-NEXT: [[TMP83:%.*]] = load i64, ptr [[DOTPERMUTED_1_IV___BEGIN339]], align 8
// CHECK1-NEXT: [[MUL223:%.*]] = mul nsw i64 [[TMP83]], 1
// CHECK1-NEXT: [[ADD_PTR224:%.*]] = getelementptr inbounds double, ptr [[TMP82]], i64 [[MUL223]]
// CHECK1-NEXT: store ptr [[ADD_PTR224]], ptr [[__BEGIN3]], align 8
// CHECK1-NEXT: [[TMP84:%.*]] = load ptr, ptr [[__BEGIN3]], align 8
// CHECK1-NEXT: [[TMP85:%.*]] = load double, ptr [[TMP84]], align 8
// CHECK1-NEXT: store double [[TMP85]], ptr [[AA]], align 8
// CHECK1-NEXT: [[TMP86:%.*]] = load i32, ptr [[I37]], align 4
// CHECK1-NEXT: [[TMP87:%.*]] = load double, ptr [[C]], align 8
// CHECK1-NEXT: [[TMP88:%.*]] = load double, ptr [[AA]], align 8
// CHECK1-NEXT: [[TMP89:%.*]] = load double, ptr [[D]], align 8
// CHECK1-NEXT: [[TMP90:%.*]] = load ptr, ptr [[BB]], align 8
// CHECK1-NEXT: [[TMP91:%.*]] = load double, ptr [[TMP90]], align 8
// CHECK1-NEXT: [[TMP92:%.*]] = load i32, ptr [[J40]], align 4
// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP86]], double noundef [[TMP87]], double noundef [[TMP88]], double noundef [[TMP89]], double noundef [[TMP91]], i32 noundef [[TMP92]])
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP93:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[ADD225:%.*]] = add nsw i64 [[TMP93]], 1
// CHECK1-NEXT: store i64 [[ADD225]], ptr [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@body
// CHECK2-SAME: (...) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@foo10
// CHECK2-SAME: () #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A:%.*]] = alloca [128 x double], align 16
// CHECK2-NEXT: [[B:%.*]] = alloca [16 x double], align 16
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[_TMP2:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[C:%.*]] = alloca double, align 8
// CHECK2-NEXT: [[__RANGE3:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[__END3:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[__BEGIN3:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[D:%.*]] = alloca double, align 8
// CHECK2-NEXT: [[__RANGE4:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[__END4:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[__BEGIN4:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_14:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_15:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_0_IV___BEGIN4:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTPERMUTED_1_IV___BEGIN3:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I37:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_0_IV___BEGIN438:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTPERMUTED_1_IV___BEGIN339:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[J40:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[BB:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[AA:%.*]] = alloca double, align 8
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
// CHECK2-NEXT: store double 4.200000e+01, ptr [[C]], align 8
// CHECK2-NEXT: store ptr [[A]], ptr [[__RANGE3]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE3]], align 8
// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP1]], i64 0, i64 0
// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 128
// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END3]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE3]], align 8
// CHECK2-NEXT: [[ARRAYDECAY4:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP2]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYDECAY4]], ptr [[__BEGIN3]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[__RANGE3]], align 8
// CHECK2-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP3]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYDECAY5]], ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[__END3]], align 8
// CHECK2-NEXT: store ptr [[TMP4]], ptr [[DOTCAPTURE_EXPR_6]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_6]], align 8
// CHECK2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK2-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP5]] to i64
// CHECK2-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP6]] to i64
// CHECK2-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
// CHECK2-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
// CHECK2-NEXT: [[SUB8:%.*]] = sub nsw i64 [[DIV]], 1
// CHECK2-NEXT: store i64 [[SUB8]], ptr [[DOTCAPTURE_EXPR_7]], align 8
// CHECK2-NEXT: store double 4.200000e+01, ptr [[D]], align 8
// CHECK2-NEXT: store ptr [[B]], ptr [[__RANGE4]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[__RANGE4]], align 8
// CHECK2-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [16 x double], ptr [[TMP7]], i64 0, i64 0
// CHECK2-NEXT: [[ADD_PTR10:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY9]], i64 16
// CHECK2-NEXT: store ptr [[ADD_PTR10]], ptr [[__END4]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE4]], align 8
// CHECK2-NEXT: [[ARRAYDECAY11:%.*]] = getelementptr inbounds [16 x double], ptr [[TMP8]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYDECAY11]], ptr [[__BEGIN4]], align 8
// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE4]], align 8
// CHECK2-NEXT: [[ARRAYDECAY13:%.*]] = getelementptr inbounds [16 x double], ptr [[TMP9]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYDECAY13]], ptr [[DOTCAPTURE_EXPR_12]], align 8
// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__END4]], align 8
// CHECK2-NEXT: store ptr [[TMP10]], ptr [[DOTCAPTURE_EXPR_14]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_14]], align 8
// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_12]], align 8
// CHECK2-NEXT: [[SUB_PTR_LHS_CAST16:%.*]] = ptrtoint ptr [[TMP11]] to i64
// CHECK2-NEXT: [[SUB_PTR_RHS_CAST17:%.*]] = ptrtoint ptr [[TMP12]] to i64
// CHECK2-NEXT: [[SUB_PTR_SUB18:%.*]] = sub i64 [[SUB_PTR_LHS_CAST16]], [[SUB_PTR_RHS_CAST17]]
// CHECK2-NEXT: [[SUB_PTR_DIV19:%.*]] = sdiv exact i64 [[SUB_PTR_SUB18]], 8
// CHECK2-NEXT: [[SUB20:%.*]] = sub nsw i64 [[SUB_PTR_DIV19]], 1
// CHECK2-NEXT: [[ADD21:%.*]] = add nsw i64 [[SUB20]], 1
// CHECK2-NEXT: [[DIV22:%.*]] = sdiv i64 [[ADD21]], 1
// CHECK2-NEXT: [[SUB23:%.*]] = sub nsw i64 [[DIV22]], 1
// CHECK2-NEXT: store i64 [[SUB23]], ptr [[DOTCAPTURE_EXPR_15]], align 8
// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_15]], align 8
// CHECK2-NEXT: [[ADD25:%.*]] = add nsw i64 [[TMP13]], 1
// CHECK2-NEXT: store i64 [[ADD25]], ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_7]], align 8
// CHECK2-NEXT: [[ADD27:%.*]] = add nsw i64 [[TMP14]], 1
// CHECK2-NEXT: store i64 [[ADD27]], ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB29:%.*]] = sub nsw i64 [[TMP15]], 0
// CHECK2-NEXT: [[DIV30:%.*]] = sdiv i64 [[SUB29]], 1
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 128, [[DIV30]]
// CHECK2-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB31:%.*]] = sub nsw i64 [[TMP16]], 0
// CHECK2-NEXT: [[DIV32:%.*]] = sdiv i64 [[SUB31]], 1
// CHECK2-NEXT: [[MUL33:%.*]] = mul nsw i64 [[MUL]], [[DIV32]]
// CHECK2-NEXT: [[MUL34:%.*]] = mul nsw i64 [[MUL33]], 128
// CHECK2-NEXT: [[SUB35:%.*]] = sub nsw i64 [[MUL34]], 1
// CHECK2-NEXT: store i64 [[SUB35]], ptr [[DOTCAPTURE_EXPR_28]], align 8
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
// CHECK2-NEXT: store i64 0, ptr [[DOTPERMUTED_0_IV___BEGIN4]], align 8
// CHECK2-NEXT: store i64 0, ptr [[DOTPERMUTED_1_IV___BEGIN3]], align 8
// CHECK2-NEXT: store i32 0, ptr [[J]], align 4
// CHECK2-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i64 0, [[TMP17]]
// CHECK2-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK2: land.lhs.true:
// CHECK2-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[CMP36:%.*]] = icmp slt i64 0, [[TMP18]]
// CHECK2-NEXT: br i1 [[CMP36]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
// CHECK2: omp.precond.then:
// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_28]], align 8
// CHECK2-NEXT: store i64 [[TMP19]], ptr [[DOTOMP_UB]], align 8
// CHECK2-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK2-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
// CHECK2-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_28]], align 8
// CHECK2-NEXT: [[CMP41:%.*]] = icmp sgt i64 [[TMP20]], [[TMP21]]
// CHECK2-NEXT: br i1 [[CMP41]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_28]], align 8
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i64 [ [[TMP22]], [[COND_TRUE]] ], [ [[TMP23]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8
// CHECK2-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
// CHECK2-NEXT: store i64 [[TMP24]], ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
// CHECK2-NEXT: [[CMP42:%.*]] = icmp sle i64 [[TMP25]], [[TMP26]]
// CHECK2-NEXT: br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB43:%.*]] = sub nsw i64 [[TMP28]], 0
// CHECK2-NEXT: [[DIV44:%.*]] = sdiv i64 [[SUB43]], 1
// CHECK2-NEXT: [[MUL45:%.*]] = mul nsw i64 1, [[DIV44]]
// CHECK2-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB46:%.*]] = sub nsw i64 [[TMP29]], 0
// CHECK2-NEXT: [[DIV47:%.*]] = sdiv i64 [[SUB46]], 1
// CHECK2-NEXT: [[MUL48:%.*]] = mul nsw i64 [[MUL45]], [[DIV47]]
// CHECK2-NEXT: [[MUL49:%.*]] = mul nsw i64 [[MUL48]], 128
// CHECK2-NEXT: [[DIV50:%.*]] = sdiv i64 [[TMP27]], [[MUL49]]
// CHECK2-NEXT: [[MUL51:%.*]] = mul nsw i64 [[DIV50]], 1
// CHECK2-NEXT: [[ADD52:%.*]] = add nsw i64 0, [[MUL51]]
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[ADD52]] to i32
// CHECK2-NEXT: store i32 [[CONV]], ptr [[I37]], align 4
// CHECK2-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB53:%.*]] = sub nsw i64 [[TMP32]], 0
// CHECK2-NEXT: [[DIV54:%.*]] = sdiv i64 [[SUB53]], 1
// CHECK2-NEXT: [[MUL55:%.*]] = mul nsw i64 1, [[DIV54]]
// CHECK2-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB56:%.*]] = sub nsw i64 [[TMP33]], 0
// CHECK2-NEXT: [[DIV57:%.*]] = sdiv i64 [[SUB56]], 1
// CHECK2-NEXT: [[MUL58:%.*]] = mul nsw i64 [[MUL55]], [[DIV57]]
// CHECK2-NEXT: [[MUL59:%.*]] = mul nsw i64 [[MUL58]], 128
// CHECK2-NEXT: [[DIV60:%.*]] = sdiv i64 [[TMP31]], [[MUL59]]
// CHECK2-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB61:%.*]] = sub nsw i64 [[TMP34]], 0
// CHECK2-NEXT: [[DIV62:%.*]] = sdiv i64 [[SUB61]], 1
// CHECK2-NEXT: [[MUL63:%.*]] = mul nsw i64 1, [[DIV62]]
// CHECK2-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB64:%.*]] = sub nsw i64 [[TMP35]], 0
// CHECK2-NEXT: [[DIV65:%.*]] = sdiv i64 [[SUB64]], 1
// CHECK2-NEXT: [[MUL66:%.*]] = mul nsw i64 [[MUL63]], [[DIV65]]
// CHECK2-NEXT: [[MUL67:%.*]] = mul nsw i64 [[MUL66]], 128
// CHECK2-NEXT: [[MUL68:%.*]] = mul nsw i64 [[DIV60]], [[MUL67]]
// CHECK2-NEXT: [[SUB69:%.*]] = sub nsw i64 [[TMP30]], [[MUL68]]
// CHECK2-NEXT: [[TMP36:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB70:%.*]] = sub nsw i64 [[TMP36]], 0
// CHECK2-NEXT: [[DIV71:%.*]] = sdiv i64 [[SUB70]], 1
// CHECK2-NEXT: [[MUL72:%.*]] = mul nsw i64 1, [[DIV71]]
// CHECK2-NEXT: [[MUL73:%.*]] = mul nsw i64 [[MUL72]], 128
// CHECK2-NEXT: [[DIV74:%.*]] = sdiv i64 [[SUB69]], [[MUL73]]
// CHECK2-NEXT: [[MUL75:%.*]] = mul nsw i64 [[DIV74]], 1
// CHECK2-NEXT: [[ADD76:%.*]] = add nsw i64 0, [[MUL75]]
// CHECK2-NEXT: store i64 [[ADD76]], ptr [[DOTPERMUTED_0_IV___BEGIN438]], align 8
// CHECK2-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB77:%.*]] = sub nsw i64 [[TMP39]], 0
// CHECK2-NEXT: [[DIV78:%.*]] = sdiv i64 [[SUB77]], 1
// CHECK2-NEXT: [[MUL79:%.*]] = mul nsw i64 1, [[DIV78]]
// CHECK2-NEXT: [[TMP40:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB80:%.*]] = sub nsw i64 [[TMP40]], 0
// CHECK2-NEXT: [[DIV81:%.*]] = sdiv i64 [[SUB80]], 1
// CHECK2-NEXT: [[MUL82:%.*]] = mul nsw i64 [[MUL79]], [[DIV81]]
// CHECK2-NEXT: [[MUL83:%.*]] = mul nsw i64 [[MUL82]], 128
// CHECK2-NEXT: [[DIV84:%.*]] = sdiv i64 [[TMP38]], [[MUL83]]
// CHECK2-NEXT: [[TMP41:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB85:%.*]] = sub nsw i64 [[TMP41]], 0
// CHECK2-NEXT: [[DIV86:%.*]] = sdiv i64 [[SUB85]], 1
// CHECK2-NEXT: [[MUL87:%.*]] = mul nsw i64 1, [[DIV86]]
// CHECK2-NEXT: [[TMP42:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB88:%.*]] = sub nsw i64 [[TMP42]], 0
// CHECK2-NEXT: [[DIV89:%.*]] = sdiv i64 [[SUB88]], 1
// CHECK2-NEXT: [[MUL90:%.*]] = mul nsw i64 [[MUL87]], [[DIV89]]
// CHECK2-NEXT: [[MUL91:%.*]] = mul nsw i64 [[MUL90]], 128
// CHECK2-NEXT: [[MUL92:%.*]] = mul nsw i64 [[DIV84]], [[MUL91]]
// CHECK2-NEXT: [[SUB93:%.*]] = sub nsw i64 [[TMP37]], [[MUL92]]
// CHECK2-NEXT: [[TMP43:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP44:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB94:%.*]] = sub nsw i64 [[TMP45]], 0
// CHECK2-NEXT: [[DIV95:%.*]] = sdiv i64 [[SUB94]], 1
// CHECK2-NEXT: [[MUL96:%.*]] = mul nsw i64 1, [[DIV95]]
// CHECK2-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB97:%.*]] = sub nsw i64 [[TMP46]], 0
// CHECK2-NEXT: [[DIV98:%.*]] = sdiv i64 [[SUB97]], 1
// CHECK2-NEXT: [[MUL99:%.*]] = mul nsw i64 [[MUL96]], [[DIV98]]
// CHECK2-NEXT: [[MUL100:%.*]] = mul nsw i64 [[MUL99]], 128
// CHECK2-NEXT: [[DIV101:%.*]] = sdiv i64 [[TMP44]], [[MUL100]]
// CHECK2-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB102:%.*]] = sub nsw i64 [[TMP47]], 0
// CHECK2-NEXT: [[DIV103:%.*]] = sdiv i64 [[SUB102]], 1
// CHECK2-NEXT: [[MUL104:%.*]] = mul nsw i64 1, [[DIV103]]
// CHECK2-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB105:%.*]] = sub nsw i64 [[TMP48]], 0
// CHECK2-NEXT: [[DIV106:%.*]] = sdiv i64 [[SUB105]], 1
// CHECK2-NEXT: [[MUL107:%.*]] = mul nsw i64 [[MUL104]], [[DIV106]]
// CHECK2-NEXT: [[MUL108:%.*]] = mul nsw i64 [[MUL107]], 128
// CHECK2-NEXT: [[MUL109:%.*]] = mul nsw i64 [[DIV101]], [[MUL108]]
// CHECK2-NEXT: [[SUB110:%.*]] = sub nsw i64 [[TMP43]], [[MUL109]]
// CHECK2-NEXT: [[TMP49:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB111:%.*]] = sub nsw i64 [[TMP49]], 0
// CHECK2-NEXT: [[DIV112:%.*]] = sdiv i64 [[SUB111]], 1
// CHECK2-NEXT: [[MUL113:%.*]] = mul nsw i64 1, [[DIV112]]
// CHECK2-NEXT: [[MUL114:%.*]] = mul nsw i64 [[MUL113]], 128
// CHECK2-NEXT: [[DIV115:%.*]] = sdiv i64 [[SUB110]], [[MUL114]]
// CHECK2-NEXT: [[TMP50:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB116:%.*]] = sub nsw i64 [[TMP50]], 0
// CHECK2-NEXT: [[DIV117:%.*]] = sdiv i64 [[SUB116]], 1
// CHECK2-NEXT: [[MUL118:%.*]] = mul nsw i64 1, [[DIV117]]
// CHECK2-NEXT: [[MUL119:%.*]] = mul nsw i64 [[MUL118]], 128
// CHECK2-NEXT: [[MUL120:%.*]] = mul nsw i64 [[DIV115]], [[MUL119]]
// CHECK2-NEXT: [[SUB121:%.*]] = sub nsw i64 [[SUB93]], [[MUL120]]
// CHECK2-NEXT: [[DIV122:%.*]] = sdiv i64 [[SUB121]], 128
// CHECK2-NEXT: [[MUL123:%.*]] = mul nsw i64 [[DIV122]], 1
// CHECK2-NEXT: [[ADD124:%.*]] = add nsw i64 0, [[MUL123]]
// CHECK2-NEXT: store i64 [[ADD124]], ptr [[DOTPERMUTED_1_IV___BEGIN339]], align 8
// CHECK2-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP52:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB125:%.*]] = sub nsw i64 [[TMP53]], 0
// CHECK2-NEXT: [[DIV126:%.*]] = sdiv i64 [[SUB125]], 1
// CHECK2-NEXT: [[MUL127:%.*]] = mul nsw i64 1, [[DIV126]]
// CHECK2-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB128:%.*]] = sub nsw i64 [[TMP54]], 0
// CHECK2-NEXT: [[DIV129:%.*]] = sdiv i64 [[SUB128]], 1
// CHECK2-NEXT: [[MUL130:%.*]] = mul nsw i64 [[MUL127]], [[DIV129]]
// CHECK2-NEXT: [[MUL131:%.*]] = mul nsw i64 [[MUL130]], 128
// CHECK2-NEXT: [[DIV132:%.*]] = sdiv i64 [[TMP52]], [[MUL131]]
// CHECK2-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB133:%.*]] = sub nsw i64 [[TMP55]], 0
// CHECK2-NEXT: [[DIV134:%.*]] = sdiv i64 [[SUB133]], 1
// CHECK2-NEXT: [[MUL135:%.*]] = mul nsw i64 1, [[DIV134]]
// CHECK2-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB136:%.*]] = sub nsw i64 [[TMP56]], 0
// CHECK2-NEXT: [[DIV137:%.*]] = sdiv i64 [[SUB136]], 1
// CHECK2-NEXT: [[MUL138:%.*]] = mul nsw i64 [[MUL135]], [[DIV137]]
// CHECK2-NEXT: [[MUL139:%.*]] = mul nsw i64 [[MUL138]], 128
// CHECK2-NEXT: [[MUL140:%.*]] = mul nsw i64 [[DIV132]], [[MUL139]]
// CHECK2-NEXT: [[SUB141:%.*]] = sub nsw i64 [[TMP51]], [[MUL140]]
// CHECK2-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB142:%.*]] = sub nsw i64 [[TMP59]], 0
// CHECK2-NEXT: [[DIV143:%.*]] = sdiv i64 [[SUB142]], 1
// CHECK2-NEXT: [[MUL144:%.*]] = mul nsw i64 1, [[DIV143]]
// CHECK2-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB145:%.*]] = sub nsw i64 [[TMP60]], 0
// CHECK2-NEXT: [[DIV146:%.*]] = sdiv i64 [[SUB145]], 1
// CHECK2-NEXT: [[MUL147:%.*]] = mul nsw i64 [[MUL144]], [[DIV146]]
// CHECK2-NEXT: [[MUL148:%.*]] = mul nsw i64 [[MUL147]], 128
// CHECK2-NEXT: [[DIV149:%.*]] = sdiv i64 [[TMP58]], [[MUL148]]
// CHECK2-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB150:%.*]] = sub nsw i64 [[TMP61]], 0
// CHECK2-NEXT: [[DIV151:%.*]] = sdiv i64 [[SUB150]], 1
// CHECK2-NEXT: [[MUL152:%.*]] = mul nsw i64 1, [[DIV151]]
// CHECK2-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB153:%.*]] = sub nsw i64 [[TMP62]], 0
// CHECK2-NEXT: [[DIV154:%.*]] = sdiv i64 [[SUB153]], 1
// CHECK2-NEXT: [[MUL155:%.*]] = mul nsw i64 [[MUL152]], [[DIV154]]
// CHECK2-NEXT: [[MUL156:%.*]] = mul nsw i64 [[MUL155]], 128
// CHECK2-NEXT: [[MUL157:%.*]] = mul nsw i64 [[DIV149]], [[MUL156]]
// CHECK2-NEXT: [[SUB158:%.*]] = sub nsw i64 [[TMP57]], [[MUL157]]
// CHECK2-NEXT: [[TMP63:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB159:%.*]] = sub nsw i64 [[TMP63]], 0
// CHECK2-NEXT: [[DIV160:%.*]] = sdiv i64 [[SUB159]], 1
// CHECK2-NEXT: [[MUL161:%.*]] = mul nsw i64 1, [[DIV160]]
// CHECK2-NEXT: [[MUL162:%.*]] = mul nsw i64 [[MUL161]], 128
// CHECK2-NEXT: [[DIV163:%.*]] = sdiv i64 [[SUB158]], [[MUL162]]
// CHECK2-NEXT: [[TMP64:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB164:%.*]] = sub nsw i64 [[TMP64]], 0
// CHECK2-NEXT: [[DIV165:%.*]] = sdiv i64 [[SUB164]], 1
// CHECK2-NEXT: [[MUL166:%.*]] = mul nsw i64 1, [[DIV165]]
// CHECK2-NEXT: [[MUL167:%.*]] = mul nsw i64 [[MUL166]], 128
// CHECK2-NEXT: [[MUL168:%.*]] = mul nsw i64 [[DIV163]], [[MUL167]]
// CHECK2-NEXT: [[SUB169:%.*]] = sub nsw i64 [[SUB141]], [[MUL168]]
// CHECK2-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP66:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP67:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB170:%.*]] = sub nsw i64 [[TMP67]], 0
// CHECK2-NEXT: [[DIV171:%.*]] = sdiv i64 [[SUB170]], 1
// CHECK2-NEXT: [[MUL172:%.*]] = mul nsw i64 1, [[DIV171]]
// CHECK2-NEXT: [[TMP68:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB173:%.*]] = sub nsw i64 [[TMP68]], 0
// CHECK2-NEXT: [[DIV174:%.*]] = sdiv i64 [[SUB173]], 1
// CHECK2-NEXT: [[MUL175:%.*]] = mul nsw i64 [[MUL172]], [[DIV174]]
// CHECK2-NEXT: [[MUL176:%.*]] = mul nsw i64 [[MUL175]], 128
// CHECK2-NEXT: [[DIV177:%.*]] = sdiv i64 [[TMP66]], [[MUL176]]
// CHECK2-NEXT: [[TMP69:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB178:%.*]] = sub nsw i64 [[TMP69]], 0
// CHECK2-NEXT: [[DIV179:%.*]] = sdiv i64 [[SUB178]], 1
// CHECK2-NEXT: [[MUL180:%.*]] = mul nsw i64 1, [[DIV179]]
// CHECK2-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB181:%.*]] = sub nsw i64 [[TMP70]], 0
// CHECK2-NEXT: [[DIV182:%.*]] = sdiv i64 [[SUB181]], 1
// CHECK2-NEXT: [[MUL183:%.*]] = mul nsw i64 [[MUL180]], [[DIV182]]
// CHECK2-NEXT: [[MUL184:%.*]] = mul nsw i64 [[MUL183]], 128
// CHECK2-NEXT: [[MUL185:%.*]] = mul nsw i64 [[DIV177]], [[MUL184]]
// CHECK2-NEXT: [[SUB186:%.*]] = sub nsw i64 [[TMP65]], [[MUL185]]
// CHECK2-NEXT: [[TMP71:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP72:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP73:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB187:%.*]] = sub nsw i64 [[TMP73]], 0
// CHECK2-NEXT: [[DIV188:%.*]] = sdiv i64 [[SUB187]], 1
// CHECK2-NEXT: [[MUL189:%.*]] = mul nsw i64 1, [[DIV188]]
// CHECK2-NEXT: [[TMP74:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB190:%.*]] = sub nsw i64 [[TMP74]], 0
// CHECK2-NEXT: [[DIV191:%.*]] = sdiv i64 [[SUB190]], 1
// CHECK2-NEXT: [[MUL192:%.*]] = mul nsw i64 [[MUL189]], [[DIV191]]
// CHECK2-NEXT: [[MUL193:%.*]] = mul nsw i64 [[MUL192]], 128
// CHECK2-NEXT: [[DIV194:%.*]] = sdiv i64 [[TMP72]], [[MUL193]]
// CHECK2-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_24]], align 8
// CHECK2-NEXT: [[SUB195:%.*]] = sub nsw i64 [[TMP75]], 0
// CHECK2-NEXT: [[DIV196:%.*]] = sdiv i64 [[SUB195]], 1
// CHECK2-NEXT: [[MUL197:%.*]] = mul nsw i64 1, [[DIV196]]
// CHECK2-NEXT: [[TMP76:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB198:%.*]] = sub nsw i64 [[TMP76]], 0
// CHECK2-NEXT: [[DIV199:%.*]] = sdiv i64 [[SUB198]], 1
// CHECK2-NEXT: [[MUL200:%.*]] = mul nsw i64 [[MUL197]], [[DIV199]]
// CHECK2-NEXT: [[MUL201:%.*]] = mul nsw i64 [[MUL200]], 128
// CHECK2-NEXT: [[MUL202:%.*]] = mul nsw i64 [[DIV194]], [[MUL201]]
// CHECK2-NEXT: [[SUB203:%.*]] = sub nsw i64 [[TMP71]], [[MUL202]]
// CHECK2-NEXT: [[TMP77:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB204:%.*]] = sub nsw i64 [[TMP77]], 0
// CHECK2-NEXT: [[DIV205:%.*]] = sdiv i64 [[SUB204]], 1
// CHECK2-NEXT: [[MUL206:%.*]] = mul nsw i64 1, [[DIV205]]
// CHECK2-NEXT: [[MUL207:%.*]] = mul nsw i64 [[MUL206]], 128
// CHECK2-NEXT: [[DIV208:%.*]] = sdiv i64 [[SUB203]], [[MUL207]]
// CHECK2-NEXT: [[TMP78:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_26]], align 8
// CHECK2-NEXT: [[SUB209:%.*]] = sub nsw i64 [[TMP78]], 0
// CHECK2-NEXT: [[DIV210:%.*]] = sdiv i64 [[SUB209]], 1
// CHECK2-NEXT: [[MUL211:%.*]] = mul nsw i64 1, [[DIV210]]
// CHECK2-NEXT: [[MUL212:%.*]] = mul nsw i64 [[MUL211]], 128
// CHECK2-NEXT: [[MUL213:%.*]] = mul nsw i64 [[DIV208]], [[MUL212]]
// CHECK2-NEXT: [[SUB214:%.*]] = sub nsw i64 [[SUB186]], [[MUL213]]
// CHECK2-NEXT: [[DIV215:%.*]] = sdiv i64 [[SUB214]], 128
// CHECK2-NEXT: [[MUL216:%.*]] = mul nsw i64 [[DIV215]], 128
// CHECK2-NEXT: [[SUB217:%.*]] = sub nsw i64 [[SUB169]], [[MUL216]]
// CHECK2-NEXT: [[MUL218:%.*]] = mul nsw i64 [[SUB217]], 1
// CHECK2-NEXT: [[ADD219:%.*]] = add nsw i64 0, [[MUL218]]
// CHECK2-NEXT: [[CONV220:%.*]] = trunc i64 [[ADD219]] to i32
// CHECK2-NEXT: store i32 [[CONV220]], ptr [[J40]], align 4
// CHECK2-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_12]], align 8
// CHECK2-NEXT: [[TMP80:%.*]] = load i64, ptr [[DOTPERMUTED_0_IV___BEGIN438]], align 8
// CHECK2-NEXT: [[MUL221:%.*]] = mul nsw i64 [[TMP80]], 1
// CHECK2-NEXT: [[ADD_PTR222:%.*]] = getelementptr inbounds double, ptr [[TMP79]], i64 [[MUL221]]
// CHECK2-NEXT: store ptr [[ADD_PTR222]], ptr [[__BEGIN4]], align 8
// CHECK2-NEXT: [[TMP81:%.*]] = load ptr, ptr [[__BEGIN4]], align 8
// CHECK2-NEXT: store ptr [[TMP81]], ptr [[BB]], align 8
// CHECK2-NEXT: [[TMP82:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK2-NEXT: [[TMP83:%.*]] = load i64, ptr [[DOTPERMUTED_1_IV___BEGIN339]], align 8
// CHECK2-NEXT: [[MUL223:%.*]] = mul nsw i64 [[TMP83]], 1
// CHECK2-NEXT: [[ADD_PTR224:%.*]] = getelementptr inbounds double, ptr [[TMP82]], i64 [[MUL223]]
// CHECK2-NEXT: store ptr [[ADD_PTR224]], ptr [[__BEGIN3]], align 8
// CHECK2-NEXT: [[TMP84:%.*]] = load ptr, ptr [[__BEGIN3]], align 8
// CHECK2-NEXT: [[TMP85:%.*]] = load double, ptr [[TMP84]], align 8
// CHECK2-NEXT: store double [[TMP85]], ptr [[AA]], align 8
// CHECK2-NEXT: [[TMP86:%.*]] = load i32, ptr [[I37]], align 4
// CHECK2-NEXT: [[TMP87:%.*]] = load double, ptr [[C]], align 8
// CHECK2-NEXT: [[TMP88:%.*]] = load double, ptr [[AA]], align 8
// CHECK2-NEXT: [[TMP89:%.*]] = load double, ptr [[D]], align 8
// CHECK2-NEXT: [[TMP90:%.*]] = load ptr, ptr [[BB]], align 8
// CHECK2-NEXT: [[TMP91:%.*]] = load double, ptr [[TMP90]], align 8
// CHECK2-NEXT: [[TMP92:%.*]] = load i32, ptr [[J40]], align 4
// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP86]], double noundef [[TMP87]], double noundef [[TMP88]], double noundef [[TMP89]], double noundef [[TMP91]], i32 noundef [[TMP92]])
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP93:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[ADD225:%.*]] = add nsw i64 [[TMP93]], 1
// CHECK2-NEXT: store i64 [[ADD225]], ptr [[DOTOMP_IV]], align 8
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
// CHECK2: omp.precond.end:
// CHECK2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@foo2
// CHECK2-SAME: (i32 noundef [[START1:%.*]], i32 noundef [[START2:%.*]], i32 noundef [[END1:%.*]], i32 noundef [[END2:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[START1_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[START2_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[END1_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[END2_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[STEP1_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[STEP2_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTNEW_STEP7:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_0_IV_J:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_1_IV_I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32 [[START1]], ptr [[START1_ADDR]], align 4
// CHECK2-NEXT: store i32 [[START2]], ptr [[START2_ADDR]], align 4
// CHECK2-NEXT: store i32 [[END1]], ptr [[END1_ADDR]], align 4
// CHECK2-NEXT: store i32 [[END2]], ptr [[END2_ADDR]], align 4
// CHECK2-NEXT: store i32 [[STEP1]], ptr [[STEP1_ADDR]], align 4
// CHECK2-NEXT: store i32 [[STEP2]], ptr [[STEP2_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[START1_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP0]], ptr [[I]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[START1_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[END1_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP1_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]]
// CHECK2-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]]
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]]
// CHECK2-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK2-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[START2_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP8]], ptr [[J]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[START2_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR_5]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[END2_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[STEP2_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTNEW_STEP7]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4
// CHECK2-NEXT: [[SUB9:%.*]] = sub i32 [[TMP12]], [[TMP13]]
// CHECK2-NEXT: [[SUB10:%.*]] = sub i32 [[SUB9]], 1
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTNEW_STEP7]], align 4
// CHECK2-NEXT: [[ADD11:%.*]] = add i32 [[SUB10]], [[TMP14]]
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP7]], align 4
// CHECK2-NEXT: [[DIV12:%.*]] = udiv i32 [[ADD11]], [[TMP15]]
// CHECK2-NEXT: [[SUB13:%.*]] = sub i32 [[DIV12]], 1
// CHECK2-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_8]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK2-NEXT: br label [[FOR_COND:%.*]]
// CHECK2: for.cond:
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_8]], align 4
// CHECK2-NEXT: [[ADD14:%.*]] = add i32 [[TMP17]], 1
// CHECK2-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP16]], [[ADD14]]
// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END24:%.*]]
// CHECK2: for.body:
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTNEW_STEP7]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], [[TMP20]]
// CHECK2-NEXT: [[ADD15:%.*]] = add i32 [[TMP18]], [[MUL]]
// CHECK2-NEXT: store i32 [[ADD15]], ptr [[J]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND16:%.*]]
// CHECK2: for.cond16:
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[ADD17:%.*]] = add i32 [[TMP22]], 1
// CHECK2-NEXT: [[CMP18:%.*]] = icmp ult i32 [[TMP21]], [[ADD17]]
// CHECK2-NEXT: br i1 [[CMP18]], label [[FOR_BODY19:%.*]], label [[FOR_END:%.*]]
// CHECK2: for.body19:
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
// CHECK2-NEXT: [[MUL20:%.*]] = mul i32 [[TMP24]], [[TMP25]]
// CHECK2-NEXT: [[ADD21:%.*]] = add i32 [[TMP23]], [[MUL20]]
// CHECK2-NEXT: store i32 [[ADD21]], ptr [[I]], align 4
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[I]], align 4
// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[J]], align 4
// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP26]], i32 noundef [[TMP27]])
// CHECK2-NEXT: br label [[FOR_INC:%.*]]
// CHECK2: for.inc:
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add i32 [[TMP28]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND16]], !llvm.loop [[LOOP3:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[FOR_INC22:%.*]]
// CHECK2: for.inc22:
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK2-NEXT: [[INC23:%.*]] = add i32 [[TMP29]], 1
// CHECK2-NEXT: store i32 [[INC23]], ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
// CHECK2: for.end24:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@foo3
// CHECK2-SAME: () #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_0_IV_J:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_1_IV_I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
// CHECK2-NEXT: store i32 7, ptr [[I]], align 4
// CHECK2-NEXT: store i32 7, ptr [[J]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 3, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 3
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK2-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP7]], 3
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 7, [[MUL2]]
// CHECK2-NEXT: store i32 [[ADD3]], ptr [[J]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND:%.*]]
// CHECK2: for.cond:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP8]], 4
// CHECK2-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK2: for.body:
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: [[MUL5:%.*]] = mul nsw i32 [[TMP9]], 3
// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 7, [[MUL5]]
// CHECK2-NEXT: store i32 [[ADD6]], ptr [[I]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[J]], align 4
// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP10]], i32 noundef [[TMP11]])
// CHECK2-NEXT: br label [[FOR_INC:%.*]]
// CHECK2: for.inc:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK2-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@foo4
// CHECK2-SAME: () #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_0_IV_J:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_1_IV_I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
// CHECK2-NEXT: store i32 7, ptr [[I]], align 4
// CHECK2-NEXT: store i32 7, ptr [[J]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 15, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 15
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 15, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
// CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP6]], 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 3
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 7, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], ptr [[K]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP8]], 4
// CHECK2-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 4
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], [[MUL4]]
// CHECK2-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
// CHECK2-NEXT: store i32 [[ADD6]], ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_J]], align 4
// CHECK2-NEXT: [[MUL7:%.*]] = mul nsw i32 [[TMP9]], 3
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 7, [[MUL7]]
// CHECK2-NEXT: store i32 [[ADD8]], ptr [[J]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND:%.*]]
// CHECK2: for.cond:
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: [[CMP9:%.*]] = icmp slt i32 [[TMP10]], 4
// CHECK2-NEXT: br i1 [[CMP9]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK2: for.body:
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP11]], 3
// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 7, [[MUL10]]
// CHECK2-NEXT: store i32 [[ADD11]], ptr [[I]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4
// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP12]], i32 noundef [[TMP13]])
// CHECK2-NEXT: br label [[FOR_INC:%.*]]
// CHECK2: for.inc:
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTPERMUTED_1_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK2-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@foo6
// CHECK2-SAME: () #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_0_IV_K:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_1_IV_J:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[L:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
// CHECK2-NEXT: store i32 7, ptr [[J]], align 4
// CHECK2-NEXT: store i32 7, ptr [[K]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 255, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 255
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 255, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
// CHECK2-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP6]], 64
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 3
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 7, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV5:%.*]] = sdiv i32 [[TMP8]], 64
// CHECK2-NEXT: [[MUL6:%.*]] = mul nsw i32 [[DIV5]], 64
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], [[MUL6]]
// CHECK2-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB]], 16
// CHECK2-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
// CHECK2-NEXT: store i32 [[ADD9]], ptr [[DOTPERMUTED_0_IV_K]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV10:%.*]] = sdiv i32 [[TMP10]], 64
// CHECK2-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 64
// CHECK2-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP9]], [[MUL11]]
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV13:%.*]] = sdiv i32 [[TMP12]], 64
// CHECK2-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 64
// CHECK2-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP11]], [[MUL14]]
// CHECK2-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 16
// CHECK2-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 16
// CHECK2-NEXT: [[SUB18:%.*]] = sub nsw i32 [[SUB12]], [[MUL17]]
// CHECK2-NEXT: [[DIV19:%.*]] = sdiv i32 [[SUB18]], 4
// CHECK2-NEXT: [[MUL20:%.*]] = mul nsw i32 [[DIV19]], 1
// CHECK2-NEXT: [[ADD21:%.*]] = add nsw i32 0, [[MUL20]]
// CHECK2-NEXT: store i32 [[ADD21]], ptr [[DOTPERMUTED_1_IV_J]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV22:%.*]] = sdiv i32 [[TMP14]], 64
// CHECK2-NEXT: [[MUL23:%.*]] = mul nsw i32 [[DIV22]], 64
// CHECK2-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP13]], [[MUL23]]
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV25:%.*]] = sdiv i32 [[TMP16]], 64
// CHECK2-NEXT: [[MUL26:%.*]] = mul nsw i32 [[DIV25]], 64
// CHECK2-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP15]], [[MUL26]]
// CHECK2-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 16
// CHECK2-NEXT: [[MUL29:%.*]] = mul nsw i32 [[DIV28]], 16
// CHECK2-NEXT: [[SUB30:%.*]] = sub nsw i32 [[SUB24]], [[MUL29]]
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV31:%.*]] = sdiv i32 [[TMP18]], 64
// CHECK2-NEXT: [[MUL32:%.*]] = mul nsw i32 [[DIV31]], 64
// CHECK2-NEXT: [[SUB33:%.*]] = sub nsw i32 [[TMP17]], [[MUL32]]
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV34:%.*]] = sdiv i32 [[TMP20]], 64
// CHECK2-NEXT: [[MUL35:%.*]] = mul nsw i32 [[DIV34]], 64
// CHECK2-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP19]], [[MUL35]]
// CHECK2-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 16
// CHECK2-NEXT: [[MUL38:%.*]] = mul nsw i32 [[DIV37]], 16
// CHECK2-NEXT: [[SUB39:%.*]] = sub nsw i32 [[SUB33]], [[MUL38]]
// CHECK2-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 4
// CHECK2-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 4
// CHECK2-NEXT: [[SUB42:%.*]] = sub nsw i32 [[SUB30]], [[MUL41]]
// CHECK2-NEXT: [[MUL43:%.*]] = mul nsw i32 [[SUB42]], 3
// CHECK2-NEXT: [[ADD44:%.*]] = add nsw i32 7, [[MUL43]]
// CHECK2-NEXT: store i32 [[ADD44]], ptr [[L]], align 4
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_K]], align 4
// CHECK2-NEXT: [[MUL45:%.*]] = mul nsw i32 [[TMP21]], 3
// CHECK2-NEXT: [[ADD46:%.*]] = add nsw i32 7, [[MUL45]]
// CHECK2-NEXT: store i32 [[ADD46]], ptr [[K]], align 4
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTPERMUTED_1_IV_J]], align 4
// CHECK2-NEXT: [[MUL47:%.*]] = mul nsw i32 [[TMP22]], 3
// CHECK2-NEXT: [[ADD48:%.*]] = add nsw i32 7, [[MUL47]]
// CHECK2-NEXT: store i32 [[ADD48]], ptr [[J]], align 4
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[J]], align 4
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[K]], align 4
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[L]], align 4
// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP23]], i32 noundef [[TMP24]], i32 noundef [[TMP25]], i32 noundef [[TMP26]])
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD49:%.*]] = add nsw i32 [[TMP27]], 1
// CHECK2-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@foo9
// CHECK2-SAME: () #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[ARR:%.*]] = alloca [128 x double], align 16
// CHECK2-NEXT: [[C:%.*]] = alloca double, align 8
// CHECK2-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[__END2:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_0_IV_I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPERMUTED_1_IV___BEGIN2:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[V:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: store double 4.200000e+01, ptr [[C]], align 8
// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__RANGE2]], align 8
// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP0]], i64 0, i64 0
// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 128
// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE2]], align 8
// CHECK2-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP1]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYDECAY1]], ptr [[__BEGIN2]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE2]], align 8
// CHECK2-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [128 x double], ptr [[TMP2]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYDECAY2]], ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[__END2]], align 8
// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTCAPTURE_EXPR_3]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK2-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP4]] to i64
// CHECK2-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP5]] to i64
// CHECK2-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
// CHECK2-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
// CHECK2-NEXT: [[SUB5:%.*]] = sub nsw i64 [[DIV]], 1
// CHECK2-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 8
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND:%.*]]
// CHECK2: for.cond:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], 21
// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END15:%.*]]
// CHECK2: for.body:
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 2
// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD6]], ptr [[I]], align 4
// CHECK2-NEXT: store i64 0, ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK2-NEXT: br label [[FOR_COND7:%.*]]
// CHECK2: for.cond7:
// CHECK2-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP9]], 1
// CHECK2-NEXT: [[CMP9:%.*]] = icmp slt i64 [[TMP8]], [[ADD8]]
// CHECK2-NEXT: br i1 [[CMP9]], label [[FOR_BODY10:%.*]], label [[FOR_END:%.*]]
// CHECK2: for.body10:
// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK2-NEXT: [[MUL11:%.*]] = mul nsw i64 [[TMP11]], 1
// CHECK2-NEXT: [[ADD_PTR12:%.*]] = getelementptr inbounds double, ptr [[TMP10]], i64 [[MUL11]]
// CHECK2-NEXT: store ptr [[ADD_PTR12]], ptr [[__BEGIN2]], align 8
// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__BEGIN2]], align 8
// CHECK2-NEXT: store ptr [[TMP12]], ptr [[V]], align 8
// CHECK2-NEXT: [[TMP13:%.*]] = load double, ptr [[C]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = load ptr, ptr [[V]], align 8
// CHECK2-NEXT: [[TMP15:%.*]] = load double, ptr [[TMP14]], align 8
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4
// CHECK2-NEXT: call void (...) @body(double noundef [[TMP13]], double noundef [[TMP15]], i32 noundef [[TMP16]])
// CHECK2-NEXT: br label [[FOR_INC:%.*]]
// CHECK2: for.inc:
// CHECK2-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK2-NEXT: [[INC:%.*]] = add nsw i64 [[TMP17]], 1
// CHECK2-NEXT: store i64 [[INC]], ptr [[DOTPERMUTED_1_IV___BEGIN2]], align 8
// CHECK2-NEXT: br label [[FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[FOR_INC13:%.*]]
// CHECK2: for.inc13:
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK2-NEXT: [[INC14:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK2-NEXT: store i32 [[INC14]], ptr [[DOTPERMUTED_0_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
// CHECK2: for.end15:
// CHECK2-NEXT: ret void
//