; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -p loop-vectorize -mtriple=x86_64-apple-macosx -mcpu=penryn -S %s | FileCheck %s
define i64 @test_value_in_exit_compare_chain_used_outside(ptr %src, i64 %x, i64 range(i64 1, 32) %N) {
; CHECK-LABEL: define i64 @test_value_in_exit_compare_chain_used_outside(
; CHECK-SAME: ptr [[SRC:%.*]], i64 [[X:%.*]], i64 range(i64 1, 32) [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[N]], -1
; CHECK-NEXT: [[TMP1:%.*]] = freeze i64 [[TMP0]]
; CHECK-NEXT: [[UMIN2:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[X]])
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[UMIN2]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], 8
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; CHECK: [[VECTOR_SCEVCHECK]]:
; CHECK-NEXT: [[TMP3:%.*]] = add nsw i64 [[N]], -1
; CHECK-NEXT: [[TMP4:%.*]] = freeze i64 [[TMP3]]
; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[X]])
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[UMIN]] to i1
; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[UMIN]], 1
; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
; CHECK-NEXT: br i1 [[TMP7]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 8, i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP9]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i8> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP29:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 3
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 5
; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 6
; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 7
; CHECK-NEXT: [[TMP18:%.*]] = and i64 [[TMP10]], 1
; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP11]], 1
; CHECK-NEXT: [[TMP20:%.*]] = and i64 [[TMP12]], 1
; CHECK-NEXT: [[TMP21:%.*]] = and i64 [[TMP13]], 1
; CHECK-NEXT: [[TMP22:%.*]] = and i64 [[TMP14]], 1
; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP15]], 1
; CHECK-NEXT: [[TMP24:%.*]] = and i64 [[TMP16]], 1
; CHECK-NEXT: [[TMP25:%.*]] = and i64 [[TMP17]], 1
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP18]]
; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i32 0
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[TMP27]], i32 -7
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP28]], align 1
; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <8 x i8> [[WIDE_LOAD]], <8 x i8> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[TMP29]] = xor <8 x i8> [[REVERSE]], [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP31:%.*]] = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> [[TMP29]])
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ [[TMP31]], %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[XOR_RED:%.*]] = phi i8 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[XOR_RED_NEXT:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[IV_AND:%.*]] = and i64 [[IV]], 1
; CHECK-NEXT: [[X_INC:%.*]] = add i64 [[IV_AND]], [[X]]
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_AND]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[X_INC]], 0
; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT_1:.*]], label %[[LOOP_LATCH]]
; CHECK: [[LOOP_LATCH]]:
; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1
; CHECK-NEXT: [[XOR_RED_NEXT]] = xor i8 [[L]], [[XOR_RED]]
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[EC]], label %[[EXIT_2:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[EXIT_1]]:
; CHECK-NEXT: [[X_INC_LCSSA:%.*]] = phi i64 [ [[X_INC]], %[[LOOP_HEADER]] ]
; CHECK-NEXT: ret i64 [[X_INC_LCSSA]]
; CHECK: [[EXIT_2]]:
; CHECK-NEXT: [[XOR_RED_NEXT_LCSSA:%.*]] = phi i8 [ [[XOR_RED_NEXT]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[R:%.*]] = zext i8 [[XOR_RED_NEXT_LCSSA]] to i64
; CHECK-NEXT: ret i64 [[R]]
;
entry:
br label %loop.header
loop.header:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
%xor.red = phi i8 [ 0, %entry ], [ %xor.red.next, %loop.latch ]
%iv.and = and i64 %iv, 1
%x.inc = add i64 %iv.and, %x
%gep.src = getelementptr i8, ptr %src, i64 %iv.and
%cmp = icmp eq i64 %x.inc, 0
br i1 %cmp, label %exit.1, label %loop.latch
loop.latch:
%l = load i8, ptr %gep.src, align 1
%xor.red.next = xor i8 %l, %xor.red
%iv.next = add i64 %iv, 1
%ec = icmp eq i64 %iv.next, %N
br i1 %ec, label %exit.2, label %loop.header
exit.1:
ret i64 %x.inc
exit.2:
%r = zext i8 %xor.red.next to i64
ret i64 %r
}
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
;.