llvm/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes="default<O2>" -mattr=avx < %s | FileCheck --check-prefix=AVX %s
; RUN: opt -S -passes="default<O2>" -mattr=avx2 < %s | FileCheck --check-prefix=AVX2 %s

target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"

; Make sure we vectorize when branches are convered to switch.
define dso_local void @test(ptr %start, ptr %end) #0 {
;
; AVX-LABEL: @test(
; AVX-NEXT:  entry:
; AVX-NEXT:    [[I11_NOT1:%.*]] = icmp eq ptr [[START:%.*]], [[END:%.*]]
; AVX-NEXT:    br i1 [[I11_NOT1]], label [[EXIT:%.*]], label [[BB12:%.*]]
; AVX:       bb12:
; AVX-NEXT:    [[PTR2:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LATCH:%.*]] ], [ [[START]], [[ENTRY:%.*]] ]
; AVX-NEXT:    [[VAL:%.*]] = load i32, ptr [[PTR2]], align 4
; AVX-NEXT:    switch i32 [[VAL]], label [[LATCH]] [
; AVX-NEXT:      i32 -12, label [[STORE:%.*]]
; AVX-NEXT:      i32 13, label [[STORE]]
; AVX-NEXT:    ]
; AVX:       store:
; AVX-NEXT:    store i32 42, ptr [[PTR2]], align 4
; AVX-NEXT:    br label [[LATCH]]
; AVX:       latch:
; AVX-NEXT:    [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR2]], i64 4
; AVX-NEXT:    [[I11_NOT:%.*]] = icmp eq ptr [[PTR_NEXT]], [[END]]
; AVX-NEXT:    br i1 [[I11_NOT]], label [[EXIT]], label [[BB12]]
; AVX:       exit:
; AVX-NEXT:    ret void
;
; AVX2-LABEL: @test(
; AVX2-NEXT:  entry:
; AVX2-NEXT:    [[I11_NOT1:%.*]] = icmp eq ptr [[START:%.*]], [[END:%.*]]
; AVX2-NEXT:    br i1 [[I11_NOT1]], label [[EXIT:%.*]], label [[BB12_PREHEADER:%.*]]
; AVX2:       bb12.preheader:
; AVX2-NEXT:    [[END3:%.*]] = ptrtoint ptr [[END]] to i64
; AVX2-NEXT:    [[START4:%.*]] = ptrtoint ptr [[START]] to i64
; AVX2-NEXT:    [[TMP0:%.*]] = add i64 [[END3]], -4
; AVX2-NEXT:    [[TMP1:%.*]] = sub i64 [[TMP0]], [[START4]]
; AVX2-NEXT:    [[TMP2:%.*]] = lshr i64 [[TMP1]], 2
; AVX2-NEXT:    [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
; AVX2-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 124
; AVX2-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[BB12_PREHEADER11:%.*]], label [[VECTOR_PH:%.*]]
; AVX2:       vector.ph:
; AVX2-NEXT:    [[N_VEC:%.*]] = and i64 [[TMP3]], 9223372036854775776
; AVX2-NEXT:    [[TMP4:%.*]] = shl i64 [[N_VEC]], 2
; AVX2-NEXT:    [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP4]]
; AVX2-NEXT:    br label [[VECTOR_BODY:%.*]]
; AVX2:       vector.body:
; AVX2-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; AVX2-NEXT:    [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
; AVX2-NEXT:    [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]]
; AVX2-NEXT:    [[TMP5:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 32
; AVX2-NEXT:    [[TMP6:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 64
; AVX2-NEXT:    [[TMP7:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 96
; AVX2-NEXT:    [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[NEXT_GEP]], align 4
; AVX2-NEXT:    [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4
; AVX2-NEXT:    [[WIDE_LOAD9:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4
; AVX2-NEXT:    [[WIDE_LOAD10:%.*]] = load <8 x i32>, ptr [[TMP7]], align 4
; AVX2-NEXT:    [[TMP8:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD]], <i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12>
; AVX2-NEXT:    [[TMP9:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD8]], <i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12>
; AVX2-NEXT:    [[TMP10:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD9]], <i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12>
; AVX2-NEXT:    [[TMP11:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD10]], <i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12, i32 -12>
; AVX2-NEXT:    [[TMP12:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD]], <i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13>
; AVX2-NEXT:    [[TMP13:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD8]], <i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13>
; AVX2-NEXT:    [[TMP14:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD9]], <i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13>
; AVX2-NEXT:    [[TMP15:%.*]] = icmp eq <8 x i32> [[WIDE_LOAD10]], <i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13>
; AVX2-NEXT:    [[TMP16:%.*]] = or <8 x i1> [[TMP8]], [[TMP12]]
; AVX2-NEXT:    [[TMP17:%.*]] = or <8 x i1> [[TMP9]], [[TMP13]]
; AVX2-NEXT:    [[TMP18:%.*]] = or <8 x i1> [[TMP10]], [[TMP14]]
; AVX2-NEXT:    [[TMP19:%.*]] = or <8 x i1> [[TMP11]], [[TMP15]]
; AVX2-NEXT:    tail call void @llvm.masked.store.v8i32.p0(<8 x i32> <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>, ptr [[NEXT_GEP]], i32 4, <8 x i1> [[TMP16]])
; AVX2-NEXT:    tail call void @llvm.masked.store.v8i32.p0(<8 x i32> <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>, ptr [[TMP5]], i32 4, <8 x i1> [[TMP17]])
; AVX2-NEXT:    tail call void @llvm.masked.store.v8i32.p0(<8 x i32> <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>, ptr [[TMP6]], i32 4, <8 x i1> [[TMP18]])
; AVX2-NEXT:    tail call void @llvm.masked.store.v8i32.p0(<8 x i32> <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>, ptr [[TMP7]], i32 4, <8 x i1> [[TMP19]])
; AVX2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; AVX2-NEXT:    [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; AVX2-NEXT:    br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; AVX2:       middle.block:
; AVX2-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; AVX2-NEXT:    br i1 [[CMP_N]], label [[EXIT]], label [[BB12_PREHEADER11]]
; AVX2:       bb12.preheader8:
; AVX2-NEXT:    [[PTR2_PH:%.*]] = phi ptr [ [[START]], [[BB12_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
; AVX2-NEXT:    br label [[BB12:%.*]]
; AVX2:       bb12:
; AVX2-NEXT:    [[PTR2:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LATCH:%.*]] ], [ [[PTR2_PH]], [[BB12_PREHEADER11]] ]
; AVX2-NEXT:    [[VAL:%.*]] = load i32, ptr [[PTR2]], align 4
; AVX2-NEXT:    switch i32 [[VAL]], label [[LATCH]] [
; AVX2-NEXT:      i32 -12, label [[STORE:%.*]]
; AVX2-NEXT:      i32 13, label [[STORE]]
; AVX2-NEXT:    ]
; AVX2:       store:
; AVX2-NEXT:    store i32 42, ptr [[PTR2]], align 4
; AVX2-NEXT:    br label [[LATCH]]
; AVX2:       latch:
; AVX2-NEXT:    [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR2]], i64 4
; AVX2-NEXT:    [[I11_NOT:%.*]] = icmp eq ptr [[PTR_NEXT]], [[END]]
; AVX2-NEXT:    br i1 [[I11_NOT]], label [[EXIT]], label [[BB12]], !llvm.loop [[LOOP3:![0-9]+]]
; AVX2:       exit:
; AVX2-NEXT:    ret void
;
entry:
  br label %header

header:
  %ptr = phi ptr [ %start, %entry ], [ %ptr.next, %latch ]
  %i11 = icmp ne ptr %ptr, %end
  br i1 %i11, label %bb12, label %exit

bb12:
  %val = load i32, ptr %ptr, align 4
  %c1 = icmp eq i32 %val, 13
  %c2 = icmp eq i32 %val, -12
  %c3 = or i1 %c1, %c2
  br i1 %c3, label %store, label %latch

store:
  store i32 42, ptr %ptr, align 4
  br label %latch

latch:
  %ptr.next = getelementptr inbounds i32, ptr %ptr, i32 1
  br label %header

exit:
  ret void
}