llvm/llvm/test/Transforms/SLPVectorizer/X86/alternate-cmp-swapped-pred.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s

define i16 @test(i16 %call37) {
; CHECK-LABEL: @test(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[CALL:%.*]] = load i16, ptr undef, align 2
; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <8 x i16> <i16 poison, i16 0, i16 0, i16 poison, i16 poison, i16 0, i16 poison, i16 0>, i16 [[CALL37:%.*]], i32 3
; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[CALL]], i32 0
; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 5, i32 3, i32 7>
; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt <8 x i16> [[SHUFFLE]], zeroinitializer
; CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt <8 x i16> [[SHUFFLE]], zeroinitializer
; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> [[TMP3]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 5, i32 6, i32 7>
; CHECK-NEXT:    [[TMP5:%.*]] = zext <8 x i1> [[TMP4]] to <8 x i16>
; CHECK-NEXT:    [[TMP6:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP5]])
; CHECK-NEXT:    [[OP_RDX:%.*]] = add i16 [[TMP6]], 0
; CHECK-NEXT:    ret i16 [[OP_RDX]]
;
entry:
  %call = load i16, ptr undef, align 2
  %0 = icmp slt i16 %call, 0
  %cond = zext i1 %0 to i16
  %1 = add i16 %cond, 0
  %2 = icmp slt i16 0, 0
  %cond32 = zext i1 %2 to i16
  %3 = add i16 %1, %cond32
  %.not = icmp sgt i16 0, %call37
  %cond55 = zext i1 %.not to i16
  %4 = icmp sgt i16 %call37, 0
  %cond76 = zext i1 %4 to i16
  %5 = icmp slt i16 0, 0
  %cond97 = zext i1 %5 to i16
  %.not206 = icmp sgt i16 0, %call37
  %cond120 = zext i1 %.not206 to i16
  %6 = icmp sgt i16 0, 0
  %cond141 = zext i1 %6 to i16
  %7 = icmp slt i16 0, 0
  %cond162 = zext i1 %7 to i16
  %8 = add i16 %3, %cond97
  %9 = add i16 %8, %cond55
  %10 = add i16 %9, %cond76
  %11 = add i16 %10, %cond162
  %12 = add i16 %11, %cond120
  %13 = add i16 %12, %cond141
  ret i16 %13
}