llvm/llvm/test/Transforms/SLPVectorizer/AArch64/shuffle-vectors-mask-size.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s

define void @p(double %0) {
; CHECK-LABEL: define void @p(
; CHECK-SAME: double [[TMP0:%.*]]) {
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x double> <double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double poison>, double [[TMP0]], i32 3
; CHECK-NEXT:    [[TMP2:%.*]] = fmul <4 x double> [[TMP1]], zeroinitializer
; CHECK-NEXT:    [[TMP3:%.*]] = fadd <4 x double> zeroinitializer, [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = fadd <4 x double> [[TMP3]], zeroinitializer
; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> [[TMP3]], <2 x i32> <i32 1, i32 7>
; CHECK-NEXT:    [[TMP6:%.*]] = fadd <2 x double> zeroinitializer, [[TMP5]]
; CHECK-NEXT:    [[TMP7:%.*]] = fmul <2 x double> [[TMP6]], zeroinitializer
; CHECK-NEXT:    [[TMP8:%.*]] = fmul <4 x double> [[TMP4]], zeroinitializer
; CHECK-NEXT:    [[TMP10:%.*]] = call <4 x double> @llvm.vector.insert.v4f64.v2f64(<4 x double> <double 0.000000e+00, double 0.000000e+00, double poison, double poison>, <2 x double> [[TMP7]], i64 2)
; CHECK-NEXT:    [[TMP11:%.*]] = fadd <4 x double> [[TMP8]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = fadd <4 x double> [[TMP11]], zeroinitializer
; CHECK-NEXT:    [[TMP13:%.*]] = fptosi <4 x double> [[TMP12]] to <4 x i32>
; CHECK-NEXT:    store <4 x i32> [[TMP13]], ptr null, align 4
; CHECK-NEXT:    ret void
;
entry:
  %mul16.1.i = fmul double %0, 0.000000e+00
  %add21.1.i = fadd double %mul16.1.i, 0.000000e+00
  %add21.2.i = fadd double %add21.1.i, 0.000000e+00
  %mul16.150.i = fmul double 0.000000e+00, 0.000000e+00
  %add21.1.1.i = fadd double 0.000000e+00, %mul16.150.i
  %add21.2.1.i = fadd double %add21.1.1.i, 0.000000e+00
  %mul16.252.i = fmul double 0.000000e+00, 0.000000e+00
  %add21.1.2.i = fadd double 0.000000e+00, %mul16.252.i
  %add21.2.2.i = fadd double %add21.1.2.i, 0.000000e+00
  %add21.2.165.i = fadd double %add21.1.i, 0.000000e+00
  %mul16.150.1.i = fmul double 0.000000e+00, 0.000000e+00
  %add21.1.1.1.i = fadd double %mul16.150.1.i, 0.000000e+00
  %add21.2.1.1.i = fadd double %add21.1.1.1.i, 0.000000e+00
  %add21.2.2.1.i = fadd double 0.000000e+00, %mul16.150.1.i
  %mul16.1.1.i36 = fmul double %add21.2.1.1.i, 0.000000e+00
  %add21.1.1.i37 = fadd double 0.000000e+00, %mul16.1.1.i36
  %add21.2.1.i40 = fadd double %add21.1.1.i37, 0.000000e+00
  %mul16.252.i43 = fmul double %add21.2.2.i, 0.000000e+00
  %mul16.1.2.i45 = fmul double %add21.2.2.1.i, 0.000000e+00
  %add21.1.2.i46 = fadd double %mul16.252.i43, %mul16.1.2.i45
  %add21.2.2.i49 = fadd double %add21.1.2.i46, 0.000000e+00
  %mul16.157.i51 = fmul double %add21.2.i, 0.000000e+00
  %mul16.1.160.i52 = fmul double %add21.2.165.i, 0.000000e+00
  %add21.1.161.i53 = fadd double %mul16.157.i51, %mul16.1.160.i52
  %add21.2.165.i56 = fadd double %add21.1.161.i53, 0.000000e+00
  %mul16.150.1.i58 = fmul double %add21.2.1.i, 0.000000e+00
  %add21.1.1.1.i60 = fadd double %mul16.150.1.i58, 0.000000e+00
  %add21.2.1.1.i62 = fadd double %add21.1.1.1.i60, 0.000000e+00
  %conv14.1 = fptosi double %add21.2.1.i40 to i32
  %arrayidx16.1 = getelementptr i32, ptr null, i64 1
  store i32 %conv14.1, ptr %arrayidx16.1, align 4
  %conv14.2 = fptosi double %add21.2.2.i49 to i32
  %arrayidx16.2 = getelementptr i32, ptr null, i64 2
  store i32 %conv14.2, ptr %arrayidx16.2, align 4
  %conv14.3 = fptosi double %add21.2.165.i56 to i32
  %arrayidx16.3 = getelementptr i32, ptr null, i64 3
  store i32 %conv14.3, ptr %arrayidx16.3, align 4
  %conv14.4 = fptosi double %add21.2.1.1.i62 to i32
  store i32 %conv14.4, ptr null, align 4
  ret void
}