llvm/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
; RUN:   -mattr=+no-sink-splat-operands -riscv-v-vector-bits-min=128 \
; RUN:   | FileCheck -check-prefix=NO-SINK %s
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
; RUN:   -mattr=-no-sink-splat-operands -riscv-v-vector-bits-min=128 \
; RUN:   | FileCheck -check-prefix=SINK %s
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
; RUN:   -riscv-v-vector-bits-min=128 \
; RUN:   | FileCheck -check-prefix=DEFAULT %s

; Test that we don't sink splat operands when compiling with no-sink-splat-operands.
; Each scalar register access requires a S2V transfer buffer entry. Using too many
; limits performance.
; FIXME: This is potentially bad for register pressure. Need a better heuristic.

define void @sink_splat_add(ptr nocapture %a, i32 signext %x) {
; NO-SINK-LABEL: sink_splat_add:
; NO-SINK:       # %bb.0: # %entry
; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; NO-SINK-NEXT:    vmv.v.x v8, a1
; NO-SINK-NEXT:    lui a1, 1
; NO-SINK-NEXT:    add a1, a0, a1
; NO-SINK-NEXT:  .LBB0_1: # %vector.body
; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; NO-SINK-NEXT:    vle32.v v9, (a0)
; NO-SINK-NEXT:    vadd.vv v9, v9, v8
; NO-SINK-NEXT:    vse32.v v9, (a0)
; NO-SINK-NEXT:    addi a0, a0, 16
; NO-SINK-NEXT:    bne a0, a1, .LBB0_1
; NO-SINK-NEXT:  # %bb.2: # %for.cond.cleanup
; NO-SINK-NEXT:    ret
;
; SINK-LABEL: sink_splat_add:
; SINK:       # %bb.0: # %entry
; SINK-NEXT:    lui a2, 1
; SINK-NEXT:    add a2, a0, a2
; SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; SINK-NEXT:  .LBB0_1: # %vector.body
; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; SINK-NEXT:    vle32.v v8, (a0)
; SINK-NEXT:    vadd.vx v8, v8, a1
; SINK-NEXT:    vse32.v v8, (a0)
; SINK-NEXT:    addi a0, a0, 16
; SINK-NEXT:    bne a0, a2, .LBB0_1
; SINK-NEXT:  # %bb.2: # %for.cond.cleanup
; SINK-NEXT:    ret
;
; DEFAULT-LABEL: sink_splat_add:
; DEFAULT:       # %bb.0: # %entry
; DEFAULT-NEXT:    lui a2, 1
; DEFAULT-NEXT:    add a2, a0, a2
; DEFAULT-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; DEFAULT-NEXT:  .LBB0_1: # %vector.body
; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
; DEFAULT-NEXT:    vle32.v v8, (a0)
; DEFAULT-NEXT:    vadd.vx v8, v8, a1
; DEFAULT-NEXT:    vse32.v v8, (a0)
; DEFAULT-NEXT:    addi a0, a0, 16
; DEFAULT-NEXT:    bne a0, a2, .LBB0_1
; DEFAULT-NEXT:  # %bb.2: # %for.cond.cleanup
; DEFAULT-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, ptr %a, i64 %index
  %1 = bitcast ptr %0 to ptr
  %wide.load = load <4 x i32>, ptr %1, align 4
  %2 = add <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast ptr %0 to ptr
  store <4 x i32> %2, ptr %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare i64 @llvm.vscale.i64()

define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
; NO-SINK-LABEL: sink_splat_add_scalable:
; NO-SINK:       # %bb.0: # %entry
; NO-SINK-NEXT:    csrr a5, vlenb
; NO-SINK-NEXT:    srli a2, a5, 1
; NO-SINK-NEXT:    li a3, 1024
; NO-SINK-NEXT:    bgeu a3, a2, .LBB1_2
; NO-SINK-NEXT:  # %bb.1:
; NO-SINK-NEXT:    li a3, 0
; NO-SINK-NEXT:    j .LBB1_5
; NO-SINK-NEXT:  .LBB1_2: # %vector.ph
; NO-SINK-NEXT:    addi a3, a2, -1
; NO-SINK-NEXT:    andi a4, a3, 1024
; NO-SINK-NEXT:    xori a3, a4, 1024
; NO-SINK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
; NO-SINK-NEXT:    vmv.v.x v8, a1
; NO-SINK-NEXT:    slli a5, a5, 1
; NO-SINK-NEXT:    mv a6, a0
; NO-SINK-NEXT:    mv a7, a3
; NO-SINK-NEXT:  .LBB1_3: # %vector.body
; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; NO-SINK-NEXT:    vl2re32.v v10, (a6)
; NO-SINK-NEXT:    vadd.vv v10, v10, v8
; NO-SINK-NEXT:    vs2r.v v10, (a6)
; NO-SINK-NEXT:    sub a7, a7, a2
; NO-SINK-NEXT:    add a6, a6, a5
; NO-SINK-NEXT:    bnez a7, .LBB1_3
; NO-SINK-NEXT:  # %bb.4: # %middle.block
; NO-SINK-NEXT:    beqz a4, .LBB1_7
; NO-SINK-NEXT:  .LBB1_5: # %for.body.preheader
; NO-SINK-NEXT:    slli a2, a3, 2
; NO-SINK-NEXT:    add a2, a0, a2
; NO-SINK-NEXT:    lui a3, 1
; NO-SINK-NEXT:    add a0, a0, a3
; NO-SINK-NEXT:  .LBB1_6: # %for.body
; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; NO-SINK-NEXT:    lw a3, 0(a2)
; NO-SINK-NEXT:    add a3, a3, a1
; NO-SINK-NEXT:    sw a3, 0(a2)
; NO-SINK-NEXT:    addi a2, a2, 4
; NO-SINK-NEXT:    bne a2, a0, .LBB1_6
; NO-SINK-NEXT:  .LBB1_7: # %for.cond.cleanup
; NO-SINK-NEXT:    ret
;
; SINK-LABEL: sink_splat_add_scalable:
; SINK:       # %bb.0: # %entry
; SINK-NEXT:    csrr a5, vlenb
; SINK-NEXT:    srli a2, a5, 1
; SINK-NEXT:    li a3, 1024
; SINK-NEXT:    bgeu a3, a2, .LBB1_2
; SINK-NEXT:  # %bb.1:
; SINK-NEXT:    li a3, 0
; SINK-NEXT:    j .LBB1_5
; SINK-NEXT:  .LBB1_2: # %vector.ph
; SINK-NEXT:    addi a3, a2, -1
; SINK-NEXT:    andi a4, a3, 1024
; SINK-NEXT:    xori a3, a4, 1024
; SINK-NEXT:    slli a5, a5, 1
; SINK-NEXT:    mv a6, a0
; SINK-NEXT:    mv a7, a3
; SINK-NEXT:    vsetvli t0, zero, e32, m2, ta, ma
; SINK-NEXT:  .LBB1_3: # %vector.body
; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; SINK-NEXT:    vl2re32.v v8, (a6)
; SINK-NEXT:    vadd.vx v8, v8, a1
; SINK-NEXT:    vs2r.v v8, (a6)
; SINK-NEXT:    sub a7, a7, a2
; SINK-NEXT:    add a6, a6, a5
; SINK-NEXT:    bnez a7, .LBB1_3
; SINK-NEXT:  # %bb.4: # %middle.block
; SINK-NEXT:    beqz a4, .LBB1_7
; SINK-NEXT:  .LBB1_5: # %for.body.preheader
; SINK-NEXT:    slli a2, a3, 2
; SINK-NEXT:    add a2, a0, a2
; SINK-NEXT:    lui a3, 1
; SINK-NEXT:    add a0, a0, a3
; SINK-NEXT:  .LBB1_6: # %for.body
; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; SINK-NEXT:    lw a3, 0(a2)
; SINK-NEXT:    add a3, a3, a1
; SINK-NEXT:    sw a3, 0(a2)
; SINK-NEXT:    addi a2, a2, 4
; SINK-NEXT:    bne a2, a0, .LBB1_6
; SINK-NEXT:  .LBB1_7: # %for.cond.cleanup
; SINK-NEXT:    ret
;
; DEFAULT-LABEL: sink_splat_add_scalable:
; DEFAULT:       # %bb.0: # %entry
; DEFAULT-NEXT:    csrr a5, vlenb
; DEFAULT-NEXT:    srli a2, a5, 1
; DEFAULT-NEXT:    li a3, 1024
; DEFAULT-NEXT:    bgeu a3, a2, .LBB1_2
; DEFAULT-NEXT:  # %bb.1:
; DEFAULT-NEXT:    li a3, 0
; DEFAULT-NEXT:    j .LBB1_5
; DEFAULT-NEXT:  .LBB1_2: # %vector.ph
; DEFAULT-NEXT:    addi a3, a2, -1
; DEFAULT-NEXT:    andi a4, a3, 1024
; DEFAULT-NEXT:    xori a3, a4, 1024
; DEFAULT-NEXT:    slli a5, a5, 1
; DEFAULT-NEXT:    mv a6, a0
; DEFAULT-NEXT:    mv a7, a3
; DEFAULT-NEXT:    vsetvli t0, zero, e32, m2, ta, ma
; DEFAULT-NEXT:  .LBB1_3: # %vector.body
; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
; DEFAULT-NEXT:    vl2re32.v v8, (a6)
; DEFAULT-NEXT:    vadd.vx v8, v8, a1
; DEFAULT-NEXT:    vs2r.v v8, (a6)
; DEFAULT-NEXT:    sub a7, a7, a2
; DEFAULT-NEXT:    add a6, a6, a5
; DEFAULT-NEXT:    bnez a7, .LBB1_3
; DEFAULT-NEXT:  # %bb.4: # %middle.block
; DEFAULT-NEXT:    beqz a4, .LBB1_7
; DEFAULT-NEXT:  .LBB1_5: # %for.body.preheader
; DEFAULT-NEXT:    slli a2, a3, 2
; DEFAULT-NEXT:    add a2, a0, a2
; DEFAULT-NEXT:    lui a3, 1
; DEFAULT-NEXT:    add a0, a0, a3
; DEFAULT-NEXT:  .LBB1_6: # %for.body
; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
; DEFAULT-NEXT:    lw a3, 0(a2)
; DEFAULT-NEXT:    add a3, a3, a1
; DEFAULT-NEXT:    sw a3, 0(a2)
; DEFAULT-NEXT:    addi a2, a2, 4
; DEFAULT-NEXT:    bne a2, a0, .LBB1_6
; DEFAULT-NEXT:  .LBB1_7: # %for.cond.cleanup
; DEFAULT-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, ptr %a, i64 %index
  %7 = bitcast ptr %6 to ptr
  %wide.load = load <vscale x 4 x i32>, ptr %7, align 4
  %8 = add <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast ptr %6 to ptr
  store <vscale x 4 x i32> %8, ptr %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
  %11 = load i32, ptr %arrayidx, align 4
  %add = add i32 %11, %x
  store i32 %add, ptr %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; NO-SINK-LABEL: sink_splat_vp_add:
; NO-SINK:       # %bb.0: # %entry
; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; NO-SINK-NEXT:    vmv.v.x v8, a1
; NO-SINK-NEXT:    lui a1, 1
; NO-SINK-NEXT:    add a1, a0, a1
; NO-SINK-NEXT:  .LBB2_1: # %vector.body
; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; NO-SINK-NEXT:    vle32.v v9, (a0)
; NO-SINK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
; NO-SINK-NEXT:    vadd.vv v9, v9, v8, v0.t
; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; NO-SINK-NEXT:    vse32.v v9, (a0)
; NO-SINK-NEXT:    addi a0, a0, 16
; NO-SINK-NEXT:    bne a0, a1, .LBB2_1
; NO-SINK-NEXT:  # %bb.2: # %for.cond.cleanup
; NO-SINK-NEXT:    ret
;
; SINK-LABEL: sink_splat_vp_add:
; SINK:       # %bb.0: # %entry
; SINK-NEXT:    lui a3, 1
; SINK-NEXT:    add a3, a0, a3
; SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; SINK-NEXT:  .LBB2_1: # %vector.body
; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; SINK-NEXT:    vle32.v v8, (a0)
; SINK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
; SINK-NEXT:    vadd.vx v8, v8, a1, v0.t
; SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; SINK-NEXT:    vse32.v v8, (a0)
; SINK-NEXT:    addi a0, a0, 16
; SINK-NEXT:    bne a0, a3, .LBB2_1
; SINK-NEXT:  # %bb.2: # %for.cond.cleanup
; SINK-NEXT:    ret
;
; DEFAULT-LABEL: sink_splat_vp_add:
; DEFAULT:       # %bb.0: # %entry
; DEFAULT-NEXT:    lui a3, 1
; DEFAULT-NEXT:    add a3, a0, a3
; DEFAULT-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; DEFAULT-NEXT:  .LBB2_1: # %vector.body
; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
; DEFAULT-NEXT:    vle32.v v8, (a0)
; DEFAULT-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
; DEFAULT-NEXT:    vadd.vx v8, v8, a1, v0.t
; DEFAULT-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; DEFAULT-NEXT:    vse32.v v8, (a0)
; DEFAULT-NEXT:    addi a0, a0, 16
; DEFAULT-NEXT:    bne a0, a3, .LBB2_1
; DEFAULT-NEXT:  # %bb.2: # %for.cond.cleanup
; DEFAULT-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, ptr %a, i64 %index
  %1 = bitcast ptr %0 to ptr
  %wide.load = load <4 x i32>, ptr %1, align 4
  %2 = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast ptr %0 to ptr
  store <4 x i32> %2, ptr %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_fadd(ptr nocapture %a, float %x) {
; NO-SINK-LABEL: sink_splat_fadd:
; NO-SINK:       # %bb.0: # %entry
; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; NO-SINK-NEXT:    vfmv.v.f v8, fa0
; NO-SINK-NEXT:    lui a1, 1
; NO-SINK-NEXT:    add a1, a0, a1
; NO-SINK-NEXT:  .LBB3_1: # %vector.body
; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; NO-SINK-NEXT:    vle32.v v9, (a0)
; NO-SINK-NEXT:    vfadd.vv v9, v9, v8
; NO-SINK-NEXT:    vse32.v v9, (a0)
; NO-SINK-NEXT:    addi a0, a0, 16
; NO-SINK-NEXT:    bne a0, a1, .LBB3_1
; NO-SINK-NEXT:  # %bb.2: # %for.cond.cleanup
; NO-SINK-NEXT:    ret
;
; SINK-LABEL: sink_splat_fadd:
; SINK:       # %bb.0: # %entry
; SINK-NEXT:    lui a1, 1
; SINK-NEXT:    add a1, a0, a1
; SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; SINK-NEXT:  .LBB3_1: # %vector.body
; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; SINK-NEXT:    vle32.v v8, (a0)
; SINK-NEXT:    vfadd.vf v8, v8, fa0
; SINK-NEXT:    vse32.v v8, (a0)
; SINK-NEXT:    addi a0, a0, 16
; SINK-NEXT:    bne a0, a1, .LBB3_1
; SINK-NEXT:  # %bb.2: # %for.cond.cleanup
; SINK-NEXT:    ret
;
; DEFAULT-LABEL: sink_splat_fadd:
; DEFAULT:       # %bb.0: # %entry
; DEFAULT-NEXT:    lui a1, 1
; DEFAULT-NEXT:    add a1, a0, a1
; DEFAULT-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; DEFAULT-NEXT:  .LBB3_1: # %vector.body
; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
; DEFAULT-NEXT:    vle32.v v8, (a0)
; DEFAULT-NEXT:    vfadd.vf v8, v8, fa0
; DEFAULT-NEXT:    vse32.v v8, (a0)
; DEFAULT-NEXT:    addi a0, a0, 16
; DEFAULT-NEXT:    bne a0, a1, .LBB3_1
; DEFAULT-NEXT:  # %bb.2: # %for.cond.cleanup
; DEFAULT-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, ptr %a, i64 %index
  %1 = bitcast ptr %0 to ptr
  %wide.load = load <4 x float>, ptr %1, align 4
  %2 = fadd <4 x float> %wide.load, %broadcast.splat
  %3 = bitcast ptr %0 to ptr
  store <4 x float> %2, ptr %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
; NO-SINK-LABEL: sink_splat_fadd_scalable:
; NO-SINK:       # %bb.0: # %entry
; NO-SINK-NEXT:    csrr a1, vlenb
; NO-SINK-NEXT:    srli a2, a1, 2
; NO-SINK-NEXT:    li a3, 1024
; NO-SINK-NEXT:    bgeu a3, a2, .LBB4_2
; NO-SINK-NEXT:  # %bb.1:
; NO-SINK-NEXT:    li a3, 0
; NO-SINK-NEXT:    j .LBB4_5
; NO-SINK-NEXT:  .LBB4_2: # %vector.ph
; NO-SINK-NEXT:    addi a3, a2, -1
; NO-SINK-NEXT:    andi a4, a3, 1024
; NO-SINK-NEXT:    xori a3, a4, 1024
; NO-SINK-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
; NO-SINK-NEXT:    vfmv.v.f v8, fa0
; NO-SINK-NEXT:    mv a5, a0
; NO-SINK-NEXT:    mv a6, a3
; NO-SINK-NEXT:  .LBB4_3: # %vector.body
; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; NO-SINK-NEXT:    vl1re32.v v9, (a5)
; NO-SINK-NEXT:    vfadd.vv v9, v9, v8
; NO-SINK-NEXT:    vs1r.v v9, (a5)
; NO-SINK-NEXT:    sub a6, a6, a2
; NO-SINK-NEXT:    add a5, a5, a1
; NO-SINK-NEXT:    bnez a6, .LBB4_3
; NO-SINK-NEXT:  # %bb.4: # %middle.block
; NO-SINK-NEXT:    beqz a4, .LBB4_7
; NO-SINK-NEXT:  .LBB4_5: # %for.body.preheader
; NO-SINK-NEXT:    slli a1, a3, 2
; NO-SINK-NEXT:    add a1, a0, a1
; NO-SINK-NEXT:    lui a2, 1
; NO-SINK-NEXT:    add a0, a0, a2
; NO-SINK-NEXT:  .LBB4_6: # %for.body
; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; NO-SINK-NEXT:    flw fa5, 0(a1)
; NO-SINK-NEXT:    fadd.s fa5, fa5, fa0
; NO-SINK-NEXT:    fsw fa5, 0(a1)
; NO-SINK-NEXT:    addi a1, a1, 4
; NO-SINK-NEXT:    bne a1, a0, .LBB4_6
; NO-SINK-NEXT:  .LBB4_7: # %for.cond.cleanup
; NO-SINK-NEXT:    ret
;
; SINK-LABEL: sink_splat_fadd_scalable:
; SINK:       # %bb.0: # %entry
; SINK-NEXT:    csrr a1, vlenb
; SINK-NEXT:    srli a2, a1, 2
; SINK-NEXT:    li a3, 1024
; SINK-NEXT:    bgeu a3, a2, .LBB4_2
; SINK-NEXT:  # %bb.1:
; SINK-NEXT:    li a3, 0
; SINK-NEXT:    j .LBB4_5
; SINK-NEXT:  .LBB4_2: # %vector.ph
; SINK-NEXT:    addi a3, a2, -1
; SINK-NEXT:    andi a4, a3, 1024
; SINK-NEXT:    xori a3, a4, 1024
; SINK-NEXT:    mv a5, a0
; SINK-NEXT:    mv a6, a3
; SINK-NEXT:    vsetvli a7, zero, e32, m1, ta, ma
; SINK-NEXT:  .LBB4_3: # %vector.body
; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; SINK-NEXT:    vl1re32.v v8, (a5)
; SINK-NEXT:    vfadd.vf v8, v8, fa0
; SINK-NEXT:    vs1r.v v8, (a5)
; SINK-NEXT:    sub a6, a6, a2
; SINK-NEXT:    add a5, a5, a1
; SINK-NEXT:    bnez a6, .LBB4_3
; SINK-NEXT:  # %bb.4: # %middle.block
; SINK-NEXT:    beqz a4, .LBB4_7
; SINK-NEXT:  .LBB4_5: # %for.body.preheader
; SINK-NEXT:    slli a1, a3, 2
; SINK-NEXT:    add a1, a0, a1
; SINK-NEXT:    lui a2, 1
; SINK-NEXT:    add a0, a0, a2
; SINK-NEXT:  .LBB4_6: # %for.body
; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; SINK-NEXT:    flw fa5, 0(a1)
; SINK-NEXT:    fadd.s fa5, fa5, fa0
; SINK-NEXT:    fsw fa5, 0(a1)
; SINK-NEXT:    addi a1, a1, 4
; SINK-NEXT:    bne a1, a0, .LBB4_6
; SINK-NEXT:  .LBB4_7: # %for.cond.cleanup
; SINK-NEXT:    ret
;
; DEFAULT-LABEL: sink_splat_fadd_scalable:
; DEFAULT:       # %bb.0: # %entry
; DEFAULT-NEXT:    csrr a1, vlenb
; DEFAULT-NEXT:    srli a2, a1, 2
; DEFAULT-NEXT:    li a3, 1024
; DEFAULT-NEXT:    bgeu a3, a2, .LBB4_2
; DEFAULT-NEXT:  # %bb.1:
; DEFAULT-NEXT:    li a3, 0
; DEFAULT-NEXT:    j .LBB4_5
; DEFAULT-NEXT:  .LBB4_2: # %vector.ph
; DEFAULT-NEXT:    addi a3, a2, -1
; DEFAULT-NEXT:    andi a4, a3, 1024
; DEFAULT-NEXT:    xori a3, a4, 1024
; DEFAULT-NEXT:    mv a5, a0
; DEFAULT-NEXT:    mv a6, a3
; DEFAULT-NEXT:    vsetvli a7, zero, e32, m1, ta, ma
; DEFAULT-NEXT:  .LBB4_3: # %vector.body
; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
; DEFAULT-NEXT:    vl1re32.v v8, (a5)
; DEFAULT-NEXT:    vfadd.vf v8, v8, fa0
; DEFAULT-NEXT:    vs1r.v v8, (a5)
; DEFAULT-NEXT:    sub a6, a6, a2
; DEFAULT-NEXT:    add a5, a5, a1
; DEFAULT-NEXT:    bnez a6, .LBB4_3
; DEFAULT-NEXT:  # %bb.4: # %middle.block
; DEFAULT-NEXT:    beqz a4, .LBB4_7
; DEFAULT-NEXT:  .LBB4_5: # %for.body.preheader
; DEFAULT-NEXT:    slli a1, a3, 2
; DEFAULT-NEXT:    add a1, a0, a1
; DEFAULT-NEXT:    lui a2, 1
; DEFAULT-NEXT:    add a0, a0, a2
; DEFAULT-NEXT:  .LBB4_6: # %for.body
; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
; DEFAULT-NEXT:    flw fa5, 0(a1)
; DEFAULT-NEXT:    fadd.s fa5, fa5, fa0
; DEFAULT-NEXT:    fsw fa5, 0(a1)
; DEFAULT-NEXT:    addi a1, a1, 4
; DEFAULT-NEXT:    bne a1, a0, .LBB4_6
; DEFAULT-NEXT:  .LBB4_7: # %for.cond.cleanup
; DEFAULT-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 1
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 1
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 1
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds float, ptr %a, i64 %index
  %7 = bitcast ptr %6 to ptr
  %wide.load = load <vscale x 2 x float>, ptr %7, align 4
  %8 = fadd <vscale x 2 x float> %wide.load, %broadcast.splat
  %9 = bitcast ptr %6 to ptr
  store <vscale x 2 x float> %8, ptr %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds float, ptr %a, i64 %indvars.iv
  %11 = load float, ptr %arrayidx, align 4
  %mul = fadd float %11, %x
  store float %mul, ptr %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
; NO-SINK-LABEL: sink_splat_vp_fadd:
; NO-SINK:       # %bb.0: # %entry
; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; NO-SINK-NEXT:    vfmv.v.f v8, fa0
; NO-SINK-NEXT:    lui a2, 1
; NO-SINK-NEXT:    add a2, a0, a2
; NO-SINK-NEXT:  .LBB5_1: # %vector.body
; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; NO-SINK-NEXT:    vle32.v v9, (a0)
; NO-SINK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
; NO-SINK-NEXT:    vfadd.vv v9, v9, v8, v0.t
; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; NO-SINK-NEXT:    vse32.v v9, (a0)
; NO-SINK-NEXT:    addi a0, a0, 16
; NO-SINK-NEXT:    bne a0, a2, .LBB5_1
; NO-SINK-NEXT:  # %bb.2: # %for.cond.cleanup
; NO-SINK-NEXT:    ret
;
; SINK-LABEL: sink_splat_vp_fadd:
; SINK:       # %bb.0: # %entry
; SINK-NEXT:    lui a2, 1
; SINK-NEXT:    add a2, a0, a2
; SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; SINK-NEXT:  .LBB5_1: # %vector.body
; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
; SINK-NEXT:    vle32.v v8, (a0)
; SINK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
; SINK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
; SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; SINK-NEXT:    vse32.v v8, (a0)
; SINK-NEXT:    addi a0, a0, 16
; SINK-NEXT:    bne a0, a2, .LBB5_1
; SINK-NEXT:  # %bb.2: # %for.cond.cleanup
; SINK-NEXT:    ret
;
; DEFAULT-LABEL: sink_splat_vp_fadd:
; DEFAULT:       # %bb.0: # %entry
; DEFAULT-NEXT:    lui a2, 1
; DEFAULT-NEXT:    add a2, a0, a2
; DEFAULT-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; DEFAULT-NEXT:  .LBB5_1: # %vector.body
; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
; DEFAULT-NEXT:    vle32.v v8, (a0)
; DEFAULT-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
; DEFAULT-NEXT:    vfadd.vf v8, v8, fa0, v0.t
; DEFAULT-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; DEFAULT-NEXT:    vse32.v v8, (a0)
; DEFAULT-NEXT:    addi a0, a0, 16
; DEFAULT-NEXT:    bne a0, a2, .LBB5_1
; DEFAULT-NEXT:  # %bb.2: # %for.cond.cleanup
; DEFAULT-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, ptr %a, i64 %index
  %1 = bitcast ptr %0 to ptr
  %wide.load = load <4 x float>, ptr %1, align 4
  %2 = call <4 x float> @llvm.vp.fadd.v4i32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast ptr %0 to ptr
  store <4 x float> %2, ptr %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}