llvm/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
; RUN: opt < %s -mattr=+sve2 -passes=loop-vectorize,instcombine -enable-histogram-loop-vectorization -sve-gather-overhead=2 -sve-scatter-overhead=2 -force-vector-interleave=1 -max-dependences=2 -debug-only=loop-vectorize,loop-accesses -S 2>&1 | FileCheck %s
; RUN: opt < %s -mattr=+sve2 -passes=loop-vectorize,instcombine -enable-histogram-loop-vectorization -sve-gather-overhead=2 -sve-scatter-overhead=2 -force-vector-interleave=1 -debug-only=loop-vectorize,loop-accesses -S 2>&1 | FileCheck %s --check-prefix=NORMAL_DEP_LIMIT
; REQUIRES: asserts

target triple = "aarch64-unknown-linux-gnu"

;; Check that we don't crash if LAA gives up on recording dependences and
;; returns a null pointer.

; CHECK-LABEL: LAA: Checking a loop in 'many_deps'
; CHECK: Too many dependences, stopped recording
; CHECK: LV: Can't vectorize due to memory conflicts
; CHECK: LV: Not vectorizing: Cannot prove legality.

define void @many_deps(ptr noalias %buckets, ptr %array, ptr %indices, ptr %other, i64 %N) {
; CHECK-LABEL: define void @many_deps(
; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr [[ARRAY:%.*]], ptr [[INDICES:%.*]], ptr [[OTHER:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT:  entry:
; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
; CHECK:       for.body:
; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT:    [[GEP_INDICES:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
; CHECK-NEXT:    [[L_IDX:%.*]] = load i32, ptr [[GEP_INDICES]], align 4
; CHECK-NEXT:    [[IDXPROM1:%.*]] = zext i32 [[L_IDX]] to i64
; CHECK-NEXT:    [[GEP_BUCKET:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
; CHECK-NEXT:    [[L_BUCKET:%.*]] = load i32, ptr [[GEP_BUCKET]], align 4
; CHECK-NEXT:    [[INC:%.*]] = add nsw i32 [[L_BUCKET]], 1
; CHECK-NEXT:    store i32 [[INC]], ptr [[GEP_BUCKET]], align 4
; CHECK-NEXT:    [[IDX_ADDR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IV]]
; CHECK-NEXT:    [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
; CHECK-NEXT:    store i32 [[IV_TRUNC]], ptr [[IDX_ADDR]], align 4
; CHECK-NEXT:    [[GEP_OTHER:%.*]] = getelementptr inbounds i32, ptr [[OTHER]], i64 [[IV]]
; CHECK-NEXT:    [[L_OTHER:%.*]] = load i32, ptr [[GEP_OTHER]], align 4
; CHECK-NEXT:    [[ADD_OTHER:%.*]] = add i32 [[L_OTHER]], [[IV_TRUNC]]
; CHECK-NEXT:    store i32 [[ADD_OTHER]], ptr [[GEP_OTHER]], align 4
; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
; CHECK:       for.exit:
; CHECK-NEXT:    ret void
;
; NORMAL_DEP_LIMIT-LABEL: define void @many_deps(
; NORMAL_DEP_LIMIT-SAME: ptr noalias [[BUCKETS:%.*]], ptr [[ARRAY:%.*]], ptr [[INDICES:%.*]], ptr [[OTHER:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; NORMAL_DEP_LIMIT-NEXT:  entry:
; NORMAL_DEP_LIMIT-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; NORMAL_DEP_LIMIT-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
; NORMAL_DEP_LIMIT-NEXT:    [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 8)
; NORMAL_DEP_LIMIT-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NORMAL_DEP_LIMIT-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; NORMAL_DEP_LIMIT:       vector.memcheck:
; NORMAL_DEP_LIMIT-NEXT:    [[TMP3:%.*]] = shl i64 [[N]], 2
; NORMAL_DEP_LIMIT-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[ARRAY]], i64 [[TMP3]]
; NORMAL_DEP_LIMIT-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[OTHER]], i64 [[TMP3]]
; NORMAL_DEP_LIMIT-NEXT:    [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[INDICES]], i64 [[TMP3]]
; NORMAL_DEP_LIMIT-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[ARRAY]], [[SCEVGEP1]]
; NORMAL_DEP_LIMIT-NEXT:    [[BOUND1:%.*]] = icmp ult ptr [[OTHER]], [[SCEVGEP]]
; NORMAL_DEP_LIMIT-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; NORMAL_DEP_LIMIT-NEXT:    [[BOUND03:%.*]] = icmp ult ptr [[ARRAY]], [[SCEVGEP2]]
; NORMAL_DEP_LIMIT-NEXT:    [[BOUND14:%.*]] = icmp ult ptr [[INDICES]], [[SCEVGEP]]
; NORMAL_DEP_LIMIT-NEXT:    [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]]
; NORMAL_DEP_LIMIT-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
; NORMAL_DEP_LIMIT-NEXT:    [[BOUND06:%.*]] = icmp ult ptr [[OTHER]], [[SCEVGEP2]]
; NORMAL_DEP_LIMIT-NEXT:    [[BOUND17:%.*]] = icmp ult ptr [[INDICES]], [[SCEVGEP1]]
; NORMAL_DEP_LIMIT-NEXT:    [[FOUND_CONFLICT8:%.*]] = and i1 [[BOUND06]], [[BOUND17]]
; NORMAL_DEP_LIMIT-NEXT:    [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT8]]
; NORMAL_DEP_LIMIT-NEXT:    br i1 [[CONFLICT_RDX9]], label [[SCALAR_PH]], label [[ENTRY:%.*]]
; NORMAL_DEP_LIMIT:       vector.ph:
; NORMAL_DEP_LIMIT-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; NORMAL_DEP_LIMIT-NEXT:    [[TMP8:%.*]] = shl i64 [[TMP4]], 2
; NORMAL_DEP_LIMIT-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NORMAL_DEP_LIMIT-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NORMAL_DEP_LIMIT-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NORMAL_DEP_LIMIT-NEXT:    [[TMP6:%.*]] = shl i64 [[TMP5]], 2
; NORMAL_DEP_LIMIT-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
; NORMAL_DEP_LIMIT-NEXT:    [[TMP9:%.*]] = trunc i64 [[TMP6]] to i32
; NORMAL_DEP_LIMIT-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP9]], i64 0
; NORMAL_DEP_LIMIT-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; NORMAL_DEP_LIMIT-NEXT:    br label [[FOR_BODY:%.*]]
; NORMAL_DEP_LIMIT:       vector.body:
; NORMAL_DEP_LIMIT-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; NORMAL_DEP_LIMIT-NEXT:    [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[TMP7]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
; NORMAL_DEP_LIMIT-NEXT:    [[GEP_INDICES:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
; NORMAL_DEP_LIMIT-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP_INDICES]], align 4, !alias.scope [[META0:![0-9]+]]
; NORMAL_DEP_LIMIT-NEXT:    [[TMP11:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
; NORMAL_DEP_LIMIT-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP11]]
; NORMAL_DEP_LIMIT-NEXT:    call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP12]], i32 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; NORMAL_DEP_LIMIT-NEXT:    [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IV]]
; NORMAL_DEP_LIMIT-NEXT:    store <vscale x 4 x i32> [[VEC_IND]], ptr [[TMP13]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META5:![0-9]+]]
; NORMAL_DEP_LIMIT-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[OTHER]], i64 [[IV]]
; NORMAL_DEP_LIMIT-NEXT:    [[WIDE_LOAD10:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4, !alias.scope [[META7:![0-9]+]], !noalias [[META0]]
; NORMAL_DEP_LIMIT-NEXT:    [[TMP15:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD10]], [[VEC_IND]]
; NORMAL_DEP_LIMIT-NEXT:    store <vscale x 4 x i32> [[TMP15]], ptr [[TMP14]], align 4, !alias.scope [[META7]], !noalias [[META0]]
; NORMAL_DEP_LIMIT-NEXT:    [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NORMAL_DEP_LIMIT-NEXT:    [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NORMAL_DEP_LIMIT-NEXT:    [[TMP16:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; NORMAL_DEP_LIMIT-NEXT:    br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; NORMAL_DEP_LIMIT:       middle.block:
; NORMAL_DEP_LIMIT-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; NORMAL_DEP_LIMIT-NEXT:    br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
; NORMAL_DEP_LIMIT:       scalar.ph:
; NORMAL_DEP_LIMIT-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; NORMAL_DEP_LIMIT-NEXT:    br label [[FOR_BODY1:%.*]]
; NORMAL_DEP_LIMIT:       for.body:
; NORMAL_DEP_LIMIT-NEXT:    [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; NORMAL_DEP_LIMIT-NEXT:    [[GEP_INDICES1:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV1]]
; NORMAL_DEP_LIMIT-NEXT:    [[L_IDX:%.*]] = load i32, ptr [[GEP_INDICES1]], align 4
; NORMAL_DEP_LIMIT-NEXT:    [[IDXPROM1:%.*]] = zext i32 [[L_IDX]] to i64
; NORMAL_DEP_LIMIT-NEXT:    [[GEP_BUCKET:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
; NORMAL_DEP_LIMIT-NEXT:    [[L_BUCKET:%.*]] = load i32, ptr [[GEP_BUCKET]], align 4
; NORMAL_DEP_LIMIT-NEXT:    [[INC:%.*]] = add nsw i32 [[L_BUCKET]], 1
; NORMAL_DEP_LIMIT-NEXT:    store i32 [[INC]], ptr [[GEP_BUCKET]], align 4
; NORMAL_DEP_LIMIT-NEXT:    [[IDX_ADDR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IV1]]
; NORMAL_DEP_LIMIT-NEXT:    [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32
; NORMAL_DEP_LIMIT-NEXT:    store i32 [[IV_TRUNC]], ptr [[IDX_ADDR]], align 4
; NORMAL_DEP_LIMIT-NEXT:    [[GEP_OTHER:%.*]] = getelementptr inbounds i32, ptr [[OTHER]], i64 [[IV1]]
; NORMAL_DEP_LIMIT-NEXT:    [[L_OTHER:%.*]] = load i32, ptr [[GEP_OTHER]], align 4
; NORMAL_DEP_LIMIT-NEXT:    [[ADD_OTHER:%.*]] = add i32 [[L_OTHER]], [[IV_TRUNC]]
; NORMAL_DEP_LIMIT-NEXT:    store i32 [[ADD_OTHER]], ptr [[GEP_OTHER]], align 4
; NORMAL_DEP_LIMIT-NEXT:    [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; NORMAL_DEP_LIMIT-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]]
; NORMAL_DEP_LIMIT-NEXT:    br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP11:![0-9]+]]
; NORMAL_DEP_LIMIT:       for.exit:
; NORMAL_DEP_LIMIT-NEXT:    ret void
;
entry:
  br label %for.body

for.body:
  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
  %gep.indices = getelementptr inbounds i32, ptr %indices, i64 %iv
  %l.idx = load i32, ptr %gep.indices, align 4
  %idxprom1 = zext i32 %l.idx to i64
  %gep.bucket = getelementptr inbounds i32, ptr %buckets, i64 %idxprom1
  %l.bucket = load i32, ptr %gep.bucket, align 4
  %inc = add nsw i32 %l.bucket, 1
  store i32 %inc, ptr %gep.bucket, align 4
  %idx.addr = getelementptr inbounds i32, ptr %array, i64 %iv
  %iv.trunc = trunc i64 %iv to i32
  store i32 %iv.trunc, ptr %idx.addr, align 4
  %gep.other = getelementptr inbounds i32, ptr %other, i64 %iv
  %l.other = load i32, ptr %gep.other, align 4
  %add.other = add i32 %l.other, %iv.trunc
  store i32 %add.other, ptr %gep.other, align 4
  %iv.next = add nuw nsw i64 %iv, 1
  %exitcond = icmp eq i64 %iv.next, %N
  br i1 %exitcond, label %for.exit, label %for.body

for.exit:
  ret void
}