llvm/clang/test/OpenMP/irbuilder_unroll_partial_heuristic_for_collapse.c

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_  size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics

// REQUIRES: x86-registered-target

#ifndef HEADER
#define HEADER

double sind(double);



void unroll_partial_heuristic_for(int m, float *a, float *b, float *c, float *d, float *e, float offset) {
#pragma omp for collapse(2)
  for (int i = 0; i < m; i++) {
#pragma omp unroll partial
    for (int j = 0; j < 8; j++) {
      a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset;
    }
  }
}

#endif // HEADER

// CHECK-LABEL: define {{[^@]+}}@unroll_partial_heuristic_for
// CHECK-SAME: (i32 noundef [[M:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]], ptr noundef [[D:%.*]], ptr noundef [[E:%.*]], float noundef [[OFFSET:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[M_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    [[E_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    [[OFFSET_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK-NEXT:    [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[J:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
// CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[DOTUNROLLED_IV_J:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[I6:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[DOTUNROLLED_IV_J7:%.*]] = alloca i32, align 4
// CHECK-NEXT:    [[DOTUNROLL_INNER_IV_J:%.*]] = alloca i32, align 4
// CHECK-NEXT:    store i32 [[M]], ptr [[M_ADDR]], align 4
// CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK-NEXT:    store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK-NEXT:    store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK-NEXT:    store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK-NEXT:    store ptr [[E]], ptr [[E_ADDR]], align 8
// CHECK-NEXT:    store float [[OFFSET]], ptr [[OFFSET_ADDR]], align 4
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[M_ADDR]], align 4
// CHECK-NEXT:    store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-NEXT:    store i32 0, ptr [[J]], align 4
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
// CHECK-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
// CHECK-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], 4
// CHECK-NEXT:    [[SUB3:%.*]] = sub nsw i64 [[MUL]], 1
// CHECK-NEXT:    store i64 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT:    store i32 0, ptr [[I]], align 4
// CHECK-NEXT:    store i32 0, ptr [[DOTUNROLLED_IV_J]], align 4
// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
// CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP2]]
// CHECK-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK:       omp.precond.then:
// CHECK-NEXT:    store i64 0, ptr [[DOTOMP_LB]], align 8
// CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT:    store i64 [[TMP3]], ptr [[DOTOMP_UB]], align 8
// CHECK-NEXT:    store i64 1, ptr [[DOTOMP_STRIDE]], align 8
// CHECK-NEXT:    store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]])
// CHECK-NEXT:    call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
// CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT:    [[CMP8:%.*]] = icmp sgt i64 [[TMP4]], [[TMP5]]
// CHECK-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK:       cond.true:
// CHECK-NEXT:    [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT:    br label [[COND_END:%.*]]
// CHECK:       cond.false:
// CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
// CHECK-NEXT:    br label [[COND_END]]
// CHECK:       cond.end:
// CHECK-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP6]], [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK-NEXT:    store i64 [[COND]], ptr [[DOTOMP_UB]], align 8
// CHECK-NEXT:    [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
// CHECK-NEXT:    store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8
// CHECK-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK:       omp.inner.for.cond:
// CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK-NEXT:    [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
// CHECK-NEXT:    [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
// CHECK-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK:       omp.inner.for.body:
// CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK-NEXT:    [[DIV12:%.*]] = sdiv i64 [[TMP11]], 4
// CHECK-NEXT:    [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1
// CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL13]]
// CHECK-NEXT:    [[CONV14:%.*]] = trunc i64 [[ADD]] to i32
// CHECK-NEXT:    store i32 [[CONV14]], ptr [[I6]], align 4
// CHECK-NEXT:    [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK-NEXT:    [[DIV15:%.*]] = sdiv i64 [[TMP13]], 4
// CHECK-NEXT:    [[MUL16:%.*]] = mul nsw i64 [[DIV15]], 4
// CHECK-NEXT:    [[SUB17:%.*]] = sub nsw i64 [[TMP12]], [[MUL16]]
// CHECK-NEXT:    [[MUL18:%.*]] = mul nsw i64 [[SUB17]], 2
// CHECK-NEXT:    [[ADD19:%.*]] = add nsw i64 0, [[MUL18]]
// CHECK-NEXT:    [[CONV20:%.*]] = trunc i64 [[ADD19]] to i32
// CHECK-NEXT:    store i32 [[CONV20]], ptr [[DOTUNROLLED_IV_J7]], align 4
// CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[DOTUNROLLED_IV_J7]], align 4
// CHECK-NEXT:    store i32 [[TMP14]], ptr [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT:    br label [[FOR_COND:%.*]]
// CHECK:       for.cond:
// CHECK-NEXT:    [[TMP15:%.*]] = load i32, ptr [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT:    [[TMP16:%.*]] = load i32, ptr [[DOTUNROLLED_IV_J7]], align 4
// CHECK-NEXT:    [[ADD21:%.*]] = add nsw i32 [[TMP16]], 2
// CHECK-NEXT:    [[CMP22:%.*]] = icmp slt i32 [[TMP15]], [[ADD21]]
// CHECK-NEXT:    br i1 [[CMP22]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK:       land.rhs:
// CHECK-NEXT:    [[TMP17:%.*]] = load i32, ptr [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT:    [[CMP24:%.*]] = icmp slt i32 [[TMP17]], 8
// CHECK-NEXT:    br label [[LAND_END]]
// CHECK:       land.end:
// CHECK-NEXT:    [[TMP18:%.*]] = phi i1 [ false, [[FOR_COND]] ], [ [[CMP24]], [[LAND_RHS]] ]
// CHECK-NEXT:    br i1 [[TMP18]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK:       for.body:
// CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK-NEXT:    [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
// CHECK-NEXT:    store i32 [[ADD27]], ptr [[J]], align 4
// CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK-NEXT:    [[TMP21:%.*]] = load i32, ptr [[I6]], align 4
// CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM]]
// CHECK-NEXT:    [[TMP22:%.*]] = load float, ptr [[ARRAYIDX]], align 4
// CHECK-NEXT:    [[CONV28:%.*]] = fpext float [[TMP22]] to double
// CHECK-NEXT:    [[CALL:%.*]] = call double @sind(double noundef [[CONV28]])
// CHECK-NEXT:    [[TMP23:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr [[I6]], align 4
// CHECK-NEXT:    [[IDXPROM29:%.*]] = sext i32 [[TMP24]] to i64
// CHECK-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM29]]
// CHECK-NEXT:    [[TMP25:%.*]] = load float, ptr [[ARRAYIDX30]], align 4
// CHECK-NEXT:    [[CONV31:%.*]] = fpext float [[TMP25]] to double
// CHECK-NEXT:    [[MUL32:%.*]] = fmul double [[CALL]], [[CONV31]]
// CHECK-NEXT:    [[TMP26:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK-NEXT:    [[TMP27:%.*]] = load i32, ptr [[I6]], align 4
// CHECK-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
// CHECK-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[IDXPROM33]]
// CHECK-NEXT:    [[TMP28:%.*]] = load float, ptr [[ARRAYIDX34]], align 4
// CHECK-NEXT:    [[CONV35:%.*]] = fpext float [[TMP28]] to double
// CHECK-NEXT:    [[MUL36:%.*]] = fmul double [[MUL32]], [[CONV35]]
// CHECK-NEXT:    [[TMP29:%.*]] = load ptr, ptr [[E_ADDR]], align 8
// CHECK-NEXT:    [[TMP30:%.*]] = load i32, ptr [[I6]], align 4
// CHECK-NEXT:    [[IDXPROM37:%.*]] = sext i32 [[TMP30]] to i64
// CHECK-NEXT:    [[ARRAYIDX38:%.*]] = getelementptr inbounds float, ptr [[TMP29]], i64 [[IDXPROM37]]
// CHECK-NEXT:    [[TMP31:%.*]] = load float, ptr [[ARRAYIDX38]], align 4
// CHECK-NEXT:    [[CONV39:%.*]] = fpext float [[TMP31]] to double
// CHECK-NEXT:    [[MUL40:%.*]] = fmul double [[MUL36]], [[CONV39]]
// CHECK-NEXT:    [[TMP32:%.*]] = load float, ptr [[OFFSET_ADDR]], align 4
// CHECK-NEXT:    [[CONV41:%.*]] = fpext float [[TMP32]] to double
// CHECK-NEXT:    [[ADD42:%.*]] = fadd double [[MUL40]], [[CONV41]]
// CHECK-NEXT:    [[TMP33:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP34:%.*]] = load i32, ptr [[I6]], align 4
// CHECK-NEXT:    [[IDXPROM43:%.*]] = sext i32 [[TMP34]] to i64
// CHECK-NEXT:    [[ARRAYIDX44:%.*]] = getelementptr inbounds float, ptr [[TMP33]], i64 [[IDXPROM43]]
// CHECK-NEXT:    [[TMP35:%.*]] = load float, ptr [[ARRAYIDX44]], align 4
// CHECK-NEXT:    [[CONV45:%.*]] = fpext float [[TMP35]] to double
// CHECK-NEXT:    [[ADD46:%.*]] = fadd double [[CONV45]], [[ADD42]]
// CHECK-NEXT:    [[CONV47:%.*]] = fptrunc double [[ADD46]] to float
// CHECK-NEXT:    store float [[CONV47]], ptr [[ARRAYIDX44]], align 4
// CHECK-NEXT:    br label [[FOR_INC:%.*]]
// CHECK:       for.inc:
// CHECK-NEXT:    [[TMP36:%.*]] = load i32, ptr [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP36]], 1
// CHECK-NEXT:    store i32 [[INC]], ptr [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT:    br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
// CHECK:       for.end:
// CHECK-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK:       omp.body.continue:
// CHECK-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK:       omp.inner.for.inc:
// CHECK-NEXT:    [[TMP37:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
// CHECK-NEXT:    [[ADD48:%.*]] = add nsw i64 [[TMP37]], 1
// CHECK-NEXT:    store i64 [[ADD48]], ptr [[DOTOMP_IV]], align 8
// CHECK-NEXT:    br label [[OMP_INNER_FOR_COND]]
// CHECK:       omp.inner.for.end:
// CHECK-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
// CHECK:       omp.loop.exit:
// CHECK-NEXT:    [[OMP_GLOBAL_THREAD_NUM49:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]])
// CHECK-NEXT:    call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM49]])
// CHECK-NEXT:    br label [[OMP_PRECOND_END]]
// CHECK:       omp.precond.end:
// CHECK-NEXT:    [[OMP_GLOBAL_THREAD_NUM50:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]])
// CHECK-NEXT:    call void @__kmpc_barrier(ptr @[[GLOB6:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM50]])
// CHECK-NEXT:    ret void
//