llvm/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py

; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVFH
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+zba,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVFH
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVFH,RV64V
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+rva22u64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVFH,RVA22U64
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVFHMIN,RV32-NO-ZFHMIN
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVFHMIN,RV64-NO-ZFHMIN
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVFHMIN,RV32-ZFHMIN
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVFHMIN,RV64-ZFHMIN

; Tests that a floating-point build_vector doesn't try and generate a VID
; instruction
define void @buildvec_no_vid_v4f32(ptr %x) {
; CHECK-LABEL: buildvec_no_vid_v4f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
; CHECK-NEXT:    addi a1, a1, %lo(.LCPI0_0)
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vle32.v v8, (a1)
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
  store <4 x float> <float 0.0, float 4.0, float 0.0, float 2.0>, ptr %x
  ret void
}

; Not all BUILD_VECTORs are successfully lowered by the backend: some are
; expanded into scalarized stack stores. However, this may result in an
; infinite loop in the DAGCombiner which tries to recombine those stores into a
; BUILD_VECTOR followed by a vector store. The BUILD_VECTOR is then expanded
; and the loop begins.
; Until all BUILD_VECTORs are lowered, we disable store-combining after
; legalization for fixed-length vectors.
; This test uses a trick with a shufflevector which can't be lowered to a
; SHUFFLE_VECTOR node; the mask is shorter than the source vectors and the
; shuffle indices aren't located within the same 4-element subvector, so is
; expanded to 4 EXTRACT_VECTOR_ELTs and a BUILD_VECTOR. This then triggers the
; loop when expanded.
define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x, <8 x float> %y) optsize {
; CHECK-LABEL: hang_when_merging_stores_after_legalization:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT:    vid.v v12
; CHECK-NEXT:    li a0, 7
; CHECK-NEXT:    vmul.vx v14, v12, a0
; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT:    vrgatherei16.vv v12, v8, v14
; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT:    vmv.v.i v0, 12
; CHECK-NEXT:    vadd.vi v8, v14, -14
; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT:    vrgatherei16.vv v12, v10, v8, v0.t
; CHECK-NEXT:    vmv1r.v v8, v12
; CHECK-NEXT:    ret
  %z = shufflevector <8 x float> %x, <8 x float> %y, <4 x i32> <i32 0, i32 7, i32 8, i32 15>
  ret <4 x float> %z
}

define void @buildvec_dominant0_v2f32(ptr %x) {
; CHECK-LABEL: buildvec_dominant0_v2f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT:    vid.v v8
; CHECK-NEXT:    vfcvt.f.x.v v8, v8
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
  store <2 x float> <float 0.0, float 1.0>, ptr %x
  ret void
}

; We don't want to lower this to the insertion of two scalar elements as above,
; as each would require their own load from the constant pool.

define void @buildvec_dominant1_v2f32(ptr %x) {
; CHECK-LABEL: buildvec_dominant1_v2f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT:    vid.v v8
; CHECK-NEXT:    vadd.vi v8, v8, 1
; CHECK-NEXT:    vfcvt.f.x.v v8, v8
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
  store <2 x float> <float 1.0, float 2.0>, ptr %x
  ret void
}

define void @buildvec_dominant0_v4f32(ptr %x) {
; CHECK-LABEL: buildvec_dominant0_v4f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lui a1, 262144
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a1
; CHECK-NEXT:    vmv.s.x v9, zero
; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
; CHECK-NEXT:    vslideup.vi v8, v9, 2
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
  store <4 x float> <float 2.0, float 2.0, float 0.0, float 2.0>, ptr %x
  ret void
}

define void @buildvec_dominant1_v4f32(ptr %x, float %f) {
; CHECK-LABEL: buildvec_dominant1_v4f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vmv.s.x v9, zero
; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
; CHECK-NEXT:    vslideup.vi v8, v9, 1
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
  %v0 = insertelement <4 x float> poison, float %f, i32 0
  %v1 = insertelement <4 x float> %v0, float 0.0, i32 1
  %v2 = insertelement <4 x float> %v1, float %f, i32 2
  %v3 = insertelement <4 x float> %v2, float %f, i32 3
  store <4 x float> %v3, ptr %x
  ret void
}

define void @buildvec_dominant2_v4f32(ptr %x, float %f) {
; CHECK-LABEL: buildvec_dominant2_v4f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lui a1, 262144
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vmv.s.x v8, a1
; CHECK-NEXT:    vfmv.v.f v9, fa0
; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
; CHECK-NEXT:    vslideup.vi v9, v8, 1
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vse32.v v9, (a0)
; CHECK-NEXT:    ret
  %v0 = insertelement <4 x float> poison, float %f, i32 0
  %v1 = insertelement <4 x float> %v0, float 2.0, i32 1
  %v2 = insertelement <4 x float> %v1, float %f, i32 2
  %v3 = insertelement <4 x float> %v2, float %f, i32 3
  store <4 x float> %v3, ptr %x
  ret void
}

define void @buildvec_merge0_v4f32(ptr %x, float %f) {
; CHECK-LABEL: buildvec_merge0_v4f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vmv.v.i v0, 6
; CHECK-NEXT:    lui a1, 262144
; CHECK-NEXT:    vmerge.vxm v8, v8, a1, v0
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
  %v0 = insertelement <4 x float> poison, float %f, i32 0
  %v1 = insertelement <4 x float> %v0, float 2.0, i32 1
  %v2 = insertelement <4 x float> %v1, float 2.0, i32 2
  %v3 = insertelement <4 x float> %v2, float %f, i32 3
  store <4 x float> %v3, ptr %x
  ret void
}

define <4 x half> @splat_c3_v4f16(<4 x half> %v) {
; CHECK-LABEL: splat_c3_v4f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT:    vrgather.vi v9, v8, 3
; CHECK-NEXT:    vmv1r.v v8, v9
; CHECK-NEXT:    ret
  %x = extractelement <4 x half> %v, i32 3
  %ins = insertelement <4 x half> poison, half %x, i32 0
  %splat = shufflevector <4 x half> %ins, <4 x half> poison, <4 x i32> zeroinitializer
  ret <4 x half> %splat
}

define <4 x half> @splat_idx_v4f16(<4 x half> %v, i64 %idx) {
; RV32ZVFH-LABEL: splat_idx_v4f16:
; RV32ZVFH:       # %bb.0:
; RV32ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; RV32ZVFH-NEXT:    vrgather.vx v9, v8, a0
; RV32ZVFH-NEXT:    vmv1r.v v8, v9
; RV32ZVFH-NEXT:    ret
;
; RV64ZVFH-LABEL: splat_idx_v4f16:
; RV64ZVFH:       # %bb.0:
; RV64ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; RV64ZVFH-NEXT:    vrgather.vx v9, v8, a0
; RV64ZVFH-NEXT:    vmv1r.v v8, v9
; RV64ZVFH-NEXT:    ret
;
; RV32-NO-ZFHMIN-LABEL: splat_idx_v4f16:
; RV32-NO-ZFHMIN:       # %bb.0:
; RV32-NO-ZFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; RV32-NO-ZFHMIN-NEXT:    vrgather.vx v9, v8, a0
; RV32-NO-ZFHMIN-NEXT:    vmv1r.v v8, v9
; RV32-NO-ZFHMIN-NEXT:    ret
;
; RV64-NO-ZFHMIN-LABEL: splat_idx_v4f16:
; RV64-NO-ZFHMIN:       # %bb.0:
; RV64-NO-ZFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; RV64-NO-ZFHMIN-NEXT:    vrgather.vx v9, v8, a0
; RV64-NO-ZFHMIN-NEXT:    vmv1r.v v8, v9
; RV64-NO-ZFHMIN-NEXT:    ret
;
; RV32-ZFHMIN-LABEL: splat_idx_v4f16:
; RV32-ZFHMIN:       # %bb.0:
; RV32-ZFHMIN-NEXT:    addi sp, sp, -48
; RV32-ZFHMIN-NEXT:    .cfi_def_cfa_offset 48
; RV32-ZFHMIN-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
; RV32-ZFHMIN-NEXT:    .cfi_offset ra, -4
; RV32-ZFHMIN-NEXT:    csrr a1, vlenb
; RV32-ZFHMIN-NEXT:    sub sp, sp, a1
; RV32-ZFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 1 * vlenb
; RV32-ZFHMIN-NEXT:    addi a1, sp, 32
; RV32-ZFHMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
; RV32-ZFHMIN-NEXT:    andi a0, a0, 3
; RV32-ZFHMIN-NEXT:    li a1, 2
; RV32-ZFHMIN-NEXT:    call __mulsi3
; RV32-ZFHMIN-NEXT:    addi a1, sp, 16
; RV32-ZFHMIN-NEXT:    add a0, a1, a0
; RV32-ZFHMIN-NEXT:    addi a2, sp, 32
; RV32-ZFHMIN-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
; RV32-ZFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; RV32-ZFHMIN-NEXT:    vse16.v v8, (a1)
; RV32-ZFHMIN-NEXT:    lh a0, 0(a0)
; RV32-ZFHMIN-NEXT:    vmv.v.x v8, a0
; RV32-ZFHMIN-NEXT:    csrr a0, vlenb
; RV32-ZFHMIN-NEXT:    add sp, sp, a0
; RV32-ZFHMIN-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
; RV32-ZFHMIN-NEXT:    addi sp, sp, 48
; RV32-ZFHMIN-NEXT:    ret
;
; RV64-ZFHMIN-LABEL: splat_idx_v4f16:
; RV64-ZFHMIN:       # %bb.0:
; RV64-ZFHMIN-NEXT:    addi sp, sp, -48
; RV64-ZFHMIN-NEXT:    .cfi_def_cfa_offset 48
; RV64-ZFHMIN-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
; RV64-ZFHMIN-NEXT:    .cfi_offset ra, -8
; RV64-ZFHMIN-NEXT:    csrr a1, vlenb
; RV64-ZFHMIN-NEXT:    sub sp, sp, a1
; RV64-ZFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 1 * vlenb
; RV64-ZFHMIN-NEXT:    addi a1, sp, 32
; RV64-ZFHMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
; RV64-ZFHMIN-NEXT:    andi a0, a0, 3
; RV64-ZFHMIN-NEXT:    li a1, 2
; RV64-ZFHMIN-NEXT:    call __muldi3
; RV64-ZFHMIN-NEXT:    addi a1, sp, 16
; RV64-ZFHMIN-NEXT:    add a0, a1, a0
; RV64-ZFHMIN-NEXT:    addi a2, sp, 32
; RV64-ZFHMIN-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
; RV64-ZFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; RV64-ZFHMIN-NEXT:    vse16.v v8, (a1)
; RV64-ZFHMIN-NEXT:    lh a0, 0(a0)
; RV64-ZFHMIN-NEXT:    vmv.v.x v8, a0
; RV64-ZFHMIN-NEXT:    csrr a0, vlenb
; RV64-ZFHMIN-NEXT:    add sp, sp, a0
; RV64-ZFHMIN-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
; RV64-ZFHMIN-NEXT:    addi sp, sp, 48
; RV64-ZFHMIN-NEXT:    ret
  %x = extractelement <4 x half> %v, i64 %idx
  %ins = insertelement <4 x half> poison, half %x, i32 0
  %splat = shufflevector <4 x half> %ins, <4 x half> poison, <4 x i32> zeroinitializer
  ret <4 x half> %splat
}

define <8 x float> @splat_c5_v8f32(<8 x float> %v) {
; CHECK-LABEL: splat_c5_v8f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT:    vrgather.vi v10, v8, 5
; CHECK-NEXT:    vmv.v.v v8, v10
; CHECK-NEXT:    ret
  %x = extractelement <8 x float> %v, i32 5
  %ins = insertelement <8 x float> poison, float %x, i32 0
  %splat = shufflevector <8 x float> %ins, <8 x float> poison, <8 x i32> zeroinitializer
  ret <8 x float> %splat
}

define <8 x float> @splat_idx_v8f32(<8 x float> %v, i64 %idx) {
;
; CHECK-LABEL: splat_idx_v8f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT:    vrgather.vx v10, v8, a0
; CHECK-NEXT:    vmv.v.v v8, v10
; CHECK-NEXT:    ret
  %x = extractelement <8 x float> %v, i64 %idx
  %ins = insertelement <8 x float> poison, float %x, i32 0
  %splat = shufflevector <8 x float> %ins, <8 x float> poison, <8 x i32> zeroinitializer
  ret <8 x float> %splat
}

; Test that we pull the vlse of the constant pool out of the loop.
define dso_local void @splat_load_licm(ptr %0) {
; RV32-LABEL: splat_load_licm:
; RV32:       # %bb.0:
; RV32-NEXT:    lui a1, 1
; RV32-NEXT:    add a1, a0, a1
; RV32-NEXT:    lui a2, 263168
; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT:    vmv.v.x v8, a2
; RV32-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
; RV32-NEXT:    vse32.v v8, (a0)
; RV32-NEXT:    addi a0, a0, 16
; RV32-NEXT:    bne a0, a1, .LBB12_1
; RV32-NEXT:  # %bb.2:
; RV32-NEXT:    ret
;
; RV64V-LABEL: splat_load_licm:
; RV64V:       # %bb.0:
; RV64V-NEXT:    lui a1, 1
; RV64V-NEXT:    add a1, a0, a1
; RV64V-NEXT:    lui a2, 263168
; RV64V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; RV64V-NEXT:    vmv.v.x v8, a2
; RV64V-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
; RV64V-NEXT:    vse32.v v8, (a0)
; RV64V-NEXT:    addi a0, a0, 16
; RV64V-NEXT:    bne a0, a1, .LBB12_1
; RV64V-NEXT:  # %bb.2:
; RV64V-NEXT:    ret
;
; RVA22U64-LABEL: splat_load_licm:
; RVA22U64:       # %bb.0:
; RVA22U64-NEXT:    lui a1, 1
; RVA22U64-NEXT:    add a1, a1, a0
; RVA22U64-NEXT:    lui a2, 263168
; RVA22U64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; RVA22U64-NEXT:    vmv.v.x v8, a2
; RVA22U64-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
; RVA22U64-NEXT:    vse32.v v8, (a0)
; RVA22U64-NEXT:    addi a0, a0, 16
; RVA22U64-NEXT:    bne a0, a1, .LBB12_1
; RVA22U64-NEXT:  # %bb.2:
; RVA22U64-NEXT:    ret
;
; RV64ZVFHMIN-LABEL: splat_load_licm:
; RV64ZVFHMIN:       # %bb.0:
; RV64ZVFHMIN-NEXT:    lui a1, 1
; RV64ZVFHMIN-NEXT:    add a1, a0, a1
; RV64ZVFHMIN-NEXT:    lui a2, 263168
; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; RV64ZVFHMIN-NEXT:    vmv.v.x v8, a2
; RV64ZVFHMIN-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
; RV64ZVFHMIN-NEXT:    vse32.v v8, (a0)
; RV64ZVFHMIN-NEXT:    addi a0, a0, 16
; RV64ZVFHMIN-NEXT:    bne a0, a1, .LBB12_1
; RV64ZVFHMIN-NEXT:  # %bb.2:
; RV64ZVFHMIN-NEXT:    ret
  br label %2

2:                                                ; preds = %2, %1
  %3 = phi i32 [ 0, %1 ], [ %6, %2 ]
  %4 = getelementptr inbounds float, ptr %0, i32 %3
  %5 = bitcast ptr %4 to ptr
  store <4 x float> <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>, ptr %5, align 4
  %6 = add nuw i32 %3, 4
  %7 = icmp eq i32 %6, 1024
  br i1 %7, label %8, label %2

8:                                                ; preds = %2
  ret void
}

define <2 x half> @buildvec_v2f16(half %a, half %b) {
; RV32ZVFH-LABEL: buildvec_v2f16:
; RV32ZVFH:       # %bb.0:
; RV32ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV32ZVFH-NEXT:    vfmv.v.f v8, fa0
; RV32ZVFH-NEXT:    vfslide1down.vf v8, v8, fa1
; RV32ZVFH-NEXT:    ret
;
; RV64ZVFH-LABEL: buildvec_v2f16:
; RV64ZVFH:       # %bb.0:
; RV64ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV64ZVFH-NEXT:    vfmv.v.f v8, fa0
; RV64ZVFH-NEXT:    vfslide1down.vf v8, v8, fa1
; RV64ZVFH-NEXT:    ret
;
; RV32-NO-ZFHMIN-LABEL: buildvec_v2f16:
; RV32-NO-ZFHMIN:       # %bb.0:
; RV32-NO-ZFHMIN-NEXT:    fmv.x.w a0, fa1
; RV32-NO-ZFHMIN-NEXT:    fmv.x.w a1, fa0
; RV32-NO-ZFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV32-NO-ZFHMIN-NEXT:    vmv.v.x v8, a1
; RV32-NO-ZFHMIN-NEXT:    vslide1down.vx v8, v8, a0
; RV32-NO-ZFHMIN-NEXT:    ret
;
; RV64-NO-ZFHMIN-LABEL: buildvec_v2f16:
; RV64-NO-ZFHMIN:       # %bb.0:
; RV64-NO-ZFHMIN-NEXT:    fmv.x.w a0, fa1
; RV64-NO-ZFHMIN-NEXT:    fmv.x.w a1, fa0
; RV64-NO-ZFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV64-NO-ZFHMIN-NEXT:    vmv.v.x v8, a1
; RV64-NO-ZFHMIN-NEXT:    vslide1down.vx v8, v8, a0
; RV64-NO-ZFHMIN-NEXT:    ret
;
; RV32-ZFHMIN-LABEL: buildvec_v2f16:
; RV32-ZFHMIN:       # %bb.0:
; RV32-ZFHMIN-NEXT:    fmv.x.h a0, fa1
; RV32-ZFHMIN-NEXT:    fmv.x.h a1, fa0
; RV32-ZFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV32-ZFHMIN-NEXT:    vmv.v.x v8, a1
; RV32-ZFHMIN-NEXT:    vslide1down.vx v8, v8, a0
; RV32-ZFHMIN-NEXT:    ret
;
; RV64-ZFHMIN-LABEL: buildvec_v2f16:
; RV64-ZFHMIN:       # %bb.0:
; RV64-ZFHMIN-NEXT:    fmv.x.h a0, fa1
; RV64-ZFHMIN-NEXT:    fmv.x.h a1, fa0
; RV64-ZFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV64-ZFHMIN-NEXT:    vmv.v.x v8, a1
; RV64-ZFHMIN-NEXT:    vslide1down.vx v8, v8, a0
; RV64-ZFHMIN-NEXT:    ret
  %v1 = insertelement <2 x half> poison, half %a, i64 0
  %v2 = insertelement <2 x half> %v1, half %b, i64 1
  ret <2 x half> %v2
}

define <2 x float> @buildvec_v2f32(float %a, float %b) {
; CHECK-LABEL: buildvec_v2f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
; CHECK-NEXT:    ret
  %v1 = insertelement <2 x float> poison, float %a, i64 0
  %v2 = insertelement <2 x float> %v1, float %b, i64 1
  ret <2 x float> %v2
}

define <2 x double> @buildvec_v2f64(double %a, double %b) {
; CHECK-LABEL: buildvec_v2f64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
; CHECK-NEXT:    ret
  %v1 = insertelement <2 x double> poison, double %a, i64 0
  %v2 = insertelement <2 x double> %v1, double %b, i64 1
  ret <2 x double> %v2
}

define <2 x double> @buildvec_v2f64_b(double %a, double %b) {
; CHECK-LABEL: buildvec_v2f64_b:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
; CHECK-NEXT:    ret
  %v1 = insertelement <2 x double> poison, double %b, i64 1
  %v2 = insertelement <2 x double> %v1, double %a, i64 0
  ret <2 x double> %v2
}

define <4 x float> @buildvec_v4f32(float %a, float %b, float %c, float %d) {
; CHECK-LABEL: buildvec_v4f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa2
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa3
; CHECK-NEXT:    ret
  %v1 = insertelement <4 x float> poison, float %a, i64 0
  %v2 = insertelement <4 x float> %v1, float %b, i64 1
  %v3 = insertelement <4 x float> %v2, float %c, i64 2
  %v4 = insertelement <4 x float> %v3, float %d, i64 3
  ret <4 x float> %v4
}

define <8 x float> @buildvec_v8f32(float %e0, float %e1, float %e2, float %e3, float %e4, float %e5, float %e6, float %e7) {
; CHECK-LABEL: buildvec_v8f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa2
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa3
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa4
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa5
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa6
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa7
; CHECK-NEXT:    ret
  %v0 = insertelement <8 x float> poison, float %e0, i64 0
  %v1 = insertelement <8 x float> %v0, float %e1, i64 1
  %v2 = insertelement <8 x float> %v1, float %e2, i64 2
  %v3 = insertelement <8 x float> %v2, float %e3, i64 3
  %v4 = insertelement <8 x float> %v3, float %e4, i64 4
  %v5 = insertelement <8 x float> %v4, float %e5, i64 5
  %v6 = insertelement <8 x float> %v5, float %e6, i64 6
  %v7 = insertelement <8 x float> %v6, float %e7, i64 7
  ret <8 x float> %v7
}

define <16 x float> @buildvec_v16f32(float %e0, float %e1, float %e2, float %e3, float %e4, float %e5, float %e6, float %e7, float %e8, float %e9, float %e10, float %e11, float %e12, float %e13, float %e14, float %e15) {
; RV32-LABEL: buildvec_v16f32:
; RV32:       # %bb.0:
; RV32-NEXT:    addi sp, sp, -128
; RV32-NEXT:    .cfi_def_cfa_offset 128
; RV32-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
; RV32-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
; RV32-NEXT:    .cfi_offset ra, -4
; RV32-NEXT:    .cfi_offset s0, -8
; RV32-NEXT:    addi s0, sp, 128
; RV32-NEXT:    .cfi_def_cfa s0, 0
; RV32-NEXT:    andi sp, sp, -64
; RV32-NEXT:    sw a7, 60(sp)
; RV32-NEXT:    sw a6, 56(sp)
; RV32-NEXT:    sw a5, 52(sp)
; RV32-NEXT:    sw a4, 48(sp)
; RV32-NEXT:    sw a3, 44(sp)
; RV32-NEXT:    sw a2, 40(sp)
; RV32-NEXT:    sw a1, 36(sp)
; RV32-NEXT:    sw a0, 32(sp)
; RV32-NEXT:    fsw fa7, 28(sp)
; RV32-NEXT:    fsw fa6, 24(sp)
; RV32-NEXT:    fsw fa5, 20(sp)
; RV32-NEXT:    fsw fa4, 16(sp)
; RV32-NEXT:    fsw fa3, 12(sp)
; RV32-NEXT:    fsw fa2, 8(sp)
; RV32-NEXT:    fsw fa1, 4(sp)
; RV32-NEXT:    fsw fa0, 0(sp)
; RV32-NEXT:    mv a0, sp
; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT:    vle32.v v8, (a0)
; RV32-NEXT:    addi sp, s0, -128
; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
; RV32-NEXT:    addi sp, sp, 128
; RV32-NEXT:    ret
;
; RV64-LABEL: buildvec_v16f32:
; RV64:       # %bb.0:
; RV64-NEXT:    addi sp, sp, -128
; RV64-NEXT:    .cfi_def_cfa_offset 128
; RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT:    .cfi_offset ra, -8
; RV64-NEXT:    .cfi_offset s0, -16
; RV64-NEXT:    addi s0, sp, 128
; RV64-NEXT:    .cfi_def_cfa s0, 0
; RV64-NEXT:    andi sp, sp, -64
; RV64-NEXT:    fmv.w.x ft0, a0
; RV64-NEXT:    fmv.w.x ft1, a1
; RV64-NEXT:    fmv.w.x ft2, a2
; RV64-NEXT:    fmv.w.x ft3, a3
; RV64-NEXT:    fmv.w.x ft4, a4
; RV64-NEXT:    fmv.w.x ft5, a5
; RV64-NEXT:    fmv.w.x ft6, a6
; RV64-NEXT:    fmv.w.x ft7, a7
; RV64-NEXT:    fsw fa7, 28(sp)
; RV64-NEXT:    fsw fa6, 24(sp)
; RV64-NEXT:    fsw fa5, 20(sp)
; RV64-NEXT:    fsw fa4, 16(sp)
; RV64-NEXT:    fsw fa3, 12(sp)
; RV64-NEXT:    fsw fa2, 8(sp)
; RV64-NEXT:    fsw fa1, 4(sp)
; RV64-NEXT:    fsw fa0, 0(sp)
; RV64-NEXT:    fsw ft7, 60(sp)
; RV64-NEXT:    fsw ft6, 56(sp)
; RV64-NEXT:    fsw ft5, 52(sp)
; RV64-NEXT:    fsw ft4, 48(sp)
; RV64-NEXT:    fsw ft3, 44(sp)
; RV64-NEXT:    fsw ft2, 40(sp)
; RV64-NEXT:    fsw ft1, 36(sp)
; RV64-NEXT:    fsw ft0, 32(sp)
; RV64-NEXT:    mv a0, sp
; RV64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT:    vle32.v v8, (a0)
; RV64-NEXT:    addi sp, s0, -128
; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT:    addi sp, sp, 128
; RV64-NEXT:    ret
  %v0 = insertelement <16 x float> poison, float %e0, i64 0
  %v1 = insertelement <16 x float> %v0, float %e1, i64 1
  %v2 = insertelement <16 x float> %v1, float %e2, i64 2
  %v3 = insertelement <16 x float> %v2, float %e3, i64 3
  %v4 = insertelement <16 x float> %v3, float %e4, i64 4
  %v5 = insertelement <16 x float> %v4, float %e5, i64 5
  %v6 = insertelement <16 x float> %v5, float %e6, i64 6
  %v7 = insertelement <16 x float> %v6, float %e7, i64 7
  %v8 = insertelement <16 x float> %v7, float %e8, i64 8
  %v9 = insertelement <16 x float> %v8, float %e9, i64 9
  %v10 = insertelement <16 x float> %v9, float %e10, i64 10
  %v11 = insertelement <16 x float> %v10, float %e11, i64 11
  %v12 = insertelement <16 x float> %v11, float %e12, i64 12
  %v13 = insertelement <16 x float> %v12, float %e13, i64 13
  %v14 = insertelement <16 x float> %v13, float %e14, i64 14
  %v15 = insertelement <16 x float> %v14, float %e15, i64 15
  ret <16 x float> %v15
}

define <32 x float> @buildvec_v32f32(float %e0, float %e1, float %e2, float %e3, float %e4, float %e5, float %e6, float %e7, float %e8, float %e9, float %e10, float %e11, float %e12, float %e13, float %e14, float %e15, float %e16, float %e17, float %e18, float %e19, float %e20, float %e21, float %e22, float %e23, float %e24, float %e25, float %e26, float %e27, float %e28, float %e29, float %e30, float %e31) {
; RV32-LABEL: buildvec_v32f32:
; RV32:       # %bb.0:
; RV32-NEXT:    addi sp, sp, -256
; RV32-NEXT:    .cfi_def_cfa_offset 256
; RV32-NEXT:    sw ra, 252(sp) # 4-byte Folded Spill
; RV32-NEXT:    sw s0, 248(sp) # 4-byte Folded Spill
; RV32-NEXT:    fsd fs0, 240(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs1, 232(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs2, 224(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs3, 216(sp) # 8-byte Folded Spill
; RV32-NEXT:    .cfi_offset ra, -4
; RV32-NEXT:    .cfi_offset s0, -8
; RV32-NEXT:    .cfi_offset fs0, -16
; RV32-NEXT:    .cfi_offset fs1, -24
; RV32-NEXT:    .cfi_offset fs2, -32
; RV32-NEXT:    .cfi_offset fs3, -40
; RV32-NEXT:    addi s0, sp, 256
; RV32-NEXT:    .cfi_def_cfa s0, 0
; RV32-NEXT:    andi sp, sp, -128
; RV32-NEXT:    flw ft0, 0(s0)
; RV32-NEXT:    flw ft1, 4(s0)
; RV32-NEXT:    flw ft2, 8(s0)
; RV32-NEXT:    flw ft3, 12(s0)
; RV32-NEXT:    flw ft4, 16(s0)
; RV32-NEXT:    flw ft5, 20(s0)
; RV32-NEXT:    flw ft6, 24(s0)
; RV32-NEXT:    flw ft7, 28(s0)
; RV32-NEXT:    flw ft8, 32(s0)
; RV32-NEXT:    flw ft9, 36(s0)
; RV32-NEXT:    flw ft10, 40(s0)
; RV32-NEXT:    flw ft11, 44(s0)
; RV32-NEXT:    flw fs0, 60(s0)
; RV32-NEXT:    flw fs1, 56(s0)
; RV32-NEXT:    flw fs2, 52(s0)
; RV32-NEXT:    flw fs3, 48(s0)
; RV32-NEXT:    fsw fs0, 124(sp)
; RV32-NEXT:    fsw fs1, 120(sp)
; RV32-NEXT:    fsw fs2, 116(sp)
; RV32-NEXT:    fsw fs3, 112(sp)
; RV32-NEXT:    fsw ft11, 108(sp)
; RV32-NEXT:    fsw ft10, 104(sp)
; RV32-NEXT:    fsw ft9, 100(sp)
; RV32-NEXT:    fsw ft8, 96(sp)
; RV32-NEXT:    fsw ft7, 92(sp)
; RV32-NEXT:    fsw ft6, 88(sp)
; RV32-NEXT:    fsw ft5, 84(sp)
; RV32-NEXT:    fsw ft4, 80(sp)
; RV32-NEXT:    fsw ft3, 76(sp)
; RV32-NEXT:    fsw ft2, 72(sp)
; RV32-NEXT:    fsw ft1, 68(sp)
; RV32-NEXT:    fsw ft0, 64(sp)
; RV32-NEXT:    sw a7, 60(sp)
; RV32-NEXT:    sw a6, 56(sp)
; RV32-NEXT:    sw a5, 52(sp)
; RV32-NEXT:    sw a4, 48(sp)
; RV32-NEXT:    sw a3, 44(sp)
; RV32-NEXT:    sw a2, 40(sp)
; RV32-NEXT:    sw a1, 36(sp)
; RV32-NEXT:    sw a0, 32(sp)
; RV32-NEXT:    fsw fa7, 28(sp)
; RV32-NEXT:    fsw fa6, 24(sp)
; RV32-NEXT:    fsw fa5, 20(sp)
; RV32-NEXT:    fsw fa4, 16(sp)
; RV32-NEXT:    fsw fa3, 12(sp)
; RV32-NEXT:    fsw fa2, 8(sp)
; RV32-NEXT:    fsw fa1, 4(sp)
; RV32-NEXT:    fsw fa0, 0(sp)
; RV32-NEXT:    li a0, 32
; RV32-NEXT:    mv a1, sp
; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT:    vle32.v v8, (a1)
; RV32-NEXT:    addi sp, s0, -256
; RV32-NEXT:    lw ra, 252(sp) # 4-byte Folded Reload
; RV32-NEXT:    lw s0, 248(sp) # 4-byte Folded Reload
; RV32-NEXT:    fld fs0, 240(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs1, 232(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs2, 224(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs3, 216(sp) # 8-byte Folded Reload
; RV32-NEXT:    addi sp, sp, 256
; RV32-NEXT:    ret
;
; RV64-LABEL: buildvec_v32f32:
; RV64:       # %bb.0:
; RV64-NEXT:    addi sp, sp, -256
; RV64-NEXT:    .cfi_def_cfa_offset 256
; RV64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
; RV64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs0, 232(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs1, 224(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs2, 216(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs3, 208(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs4, 200(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs5, 192(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs6, 184(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs7, 176(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs8, 168(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs9, 160(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs10, 152(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs11, 144(sp) # 8-byte Folded Spill
; RV64-NEXT:    .cfi_offset ra, -8
; RV64-NEXT:    .cfi_offset s0, -16
; RV64-NEXT:    .cfi_offset fs0, -24
; RV64-NEXT:    .cfi_offset fs1, -32
; RV64-NEXT:    .cfi_offset fs2, -40
; RV64-NEXT:    .cfi_offset fs3, -48
; RV64-NEXT:    .cfi_offset fs4, -56
; RV64-NEXT:    .cfi_offset fs5, -64
; RV64-NEXT:    .cfi_offset fs6, -72
; RV64-NEXT:    .cfi_offset fs7, -80
; RV64-NEXT:    .cfi_offset fs8, -88
; RV64-NEXT:    .cfi_offset fs9, -96
; RV64-NEXT:    .cfi_offset fs10, -104
; RV64-NEXT:    .cfi_offset fs11, -112
; RV64-NEXT:    addi s0, sp, 256
; RV64-NEXT:    .cfi_def_cfa s0, 0
; RV64-NEXT:    andi sp, sp, -128
; RV64-NEXT:    fmv.w.x ft0, a0
; RV64-NEXT:    fmv.w.x ft1, a1
; RV64-NEXT:    fmv.w.x ft2, a2
; RV64-NEXT:    fmv.w.x ft3, a3
; RV64-NEXT:    fmv.w.x ft4, a4
; RV64-NEXT:    fmv.w.x ft5, a5
; RV64-NEXT:    fmv.w.x ft6, a6
; RV64-NEXT:    fmv.w.x ft7, a7
; RV64-NEXT:    flw ft8, 0(s0)
; RV64-NEXT:    flw ft9, 8(s0)
; RV64-NEXT:    flw ft10, 16(s0)
; RV64-NEXT:    flw ft11, 24(s0)
; RV64-NEXT:    flw fs0, 32(s0)
; RV64-NEXT:    flw fs1, 40(s0)
; RV64-NEXT:    flw fs2, 48(s0)
; RV64-NEXT:    flw fs3, 56(s0)
; RV64-NEXT:    flw fs4, 64(s0)
; RV64-NEXT:    flw fs5, 72(s0)
; RV64-NEXT:    flw fs6, 80(s0)
; RV64-NEXT:    flw fs7, 88(s0)
; RV64-NEXT:    flw fs8, 120(s0)
; RV64-NEXT:    flw fs9, 112(s0)
; RV64-NEXT:    flw fs10, 104(s0)
; RV64-NEXT:    flw fs11, 96(s0)
; RV64-NEXT:    fsw fs8, 124(sp)
; RV64-NEXT:    fsw fs9, 120(sp)
; RV64-NEXT:    fsw fs10, 116(sp)
; RV64-NEXT:    fsw fs11, 112(sp)
; RV64-NEXT:    fsw fs7, 108(sp)
; RV64-NEXT:    fsw fs6, 104(sp)
; RV64-NEXT:    fsw fs5, 100(sp)
; RV64-NEXT:    fsw fs4, 96(sp)
; RV64-NEXT:    fsw fs3, 92(sp)
; RV64-NEXT:    fsw fs2, 88(sp)
; RV64-NEXT:    fsw fs1, 84(sp)
; RV64-NEXT:    fsw fs0, 80(sp)
; RV64-NEXT:    fsw ft11, 76(sp)
; RV64-NEXT:    fsw ft10, 72(sp)
; RV64-NEXT:    fsw ft9, 68(sp)
; RV64-NEXT:    fsw ft8, 64(sp)
; RV64-NEXT:    fsw fa7, 28(sp)
; RV64-NEXT:    fsw fa6, 24(sp)
; RV64-NEXT:    fsw fa5, 20(sp)
; RV64-NEXT:    fsw fa4, 16(sp)
; RV64-NEXT:    fsw fa3, 12(sp)
; RV64-NEXT:    fsw fa2, 8(sp)
; RV64-NEXT:    fsw fa1, 4(sp)
; RV64-NEXT:    fsw fa0, 0(sp)
; RV64-NEXT:    fsw ft7, 60(sp)
; RV64-NEXT:    fsw ft6, 56(sp)
; RV64-NEXT:    fsw ft5, 52(sp)
; RV64-NEXT:    fsw ft4, 48(sp)
; RV64-NEXT:    fsw ft3, 44(sp)
; RV64-NEXT:    fsw ft2, 40(sp)
; RV64-NEXT:    fsw ft1, 36(sp)
; RV64-NEXT:    fsw ft0, 32(sp)
; RV64-NEXT:    li a0, 32
; RV64-NEXT:    mv a1, sp
; RV64-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
; RV64-NEXT:    vle32.v v8, (a1)
; RV64-NEXT:    addi sp, s0, -256
; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs0, 232(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs1, 224(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs2, 216(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs3, 208(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs4, 200(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs5, 192(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs6, 184(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs7, 176(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs8, 168(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs9, 160(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs10, 152(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs11, 144(sp) # 8-byte Folded Reload
; RV64-NEXT:    addi sp, sp, 256
; RV64-NEXT:    ret
  %v0 = insertelement <32 x float> poison, float %e0, i64 0
  %v1 = insertelement <32 x float> %v0, float %e1, i64 1
  %v2 = insertelement <32 x float> %v1, float %e2, i64 2
  %v3 = insertelement <32 x float> %v2, float %e3, i64 3
  %v4 = insertelement <32 x float> %v3, float %e4, i64 4
  %v5 = insertelement <32 x float> %v4, float %e5, i64 5
  %v6 = insertelement <32 x float> %v5, float %e6, i64 6
  %v7 = insertelement <32 x float> %v6, float %e7, i64 7
  %v8 = insertelement <32 x float> %v7, float %e8, i64 8
  %v9 = insertelement <32 x float> %v8, float %e9, i64 9
  %v10 = insertelement <32 x float> %v9, float %e10, i64 10
  %v11 = insertelement <32 x float> %v10, float %e11, i64 11
  %v12 = insertelement <32 x float> %v11, float %e12, i64 12
  %v13 = insertelement <32 x float> %v12, float %e13, i64 13
  %v14 = insertelement <32 x float> %v13, float %e14, i64 14
  %v15 = insertelement <32 x float> %v14, float %e15, i64 15
  %v16 = insertelement <32 x float> %v15, float %e16, i64 16
  %v17 = insertelement <32 x float> %v16, float %e17, i64 17
  %v18 = insertelement <32 x float> %v17, float %e18, i64 18
  %v19 = insertelement <32 x float> %v18, float %e19, i64 19
  %v20 = insertelement <32 x float> %v19, float %e20, i64 20
  %v21 = insertelement <32 x float> %v20, float %e21, i64 21
  %v22 = insertelement <32 x float> %v21, float %e22, i64 22
  %v23 = insertelement <32 x float> %v22, float %e23, i64 23
  %v24 = insertelement <32 x float> %v23, float %e24, i64 24
  %v25 = insertelement <32 x float> %v24, float %e25, i64 25
  %v26 = insertelement <32 x float> %v25, float %e26, i64 26
  %v27 = insertelement <32 x float> %v26, float %e27, i64 27
  %v28 = insertelement <32 x float> %v27, float %e28, i64 28
  %v29 = insertelement <32 x float> %v28, float %e29, i64 29
  %v30 = insertelement <32 x float> %v29, float %e30, i64 30
  %v31 = insertelement <32 x float> %v30, float %e31, i64 31
  ret <32 x float> %v31
}

define <8 x double> @buildvec_v8f64(double %e0, double %e1, double %e2, double %e3, double %e4, double %e5, double %e6, double %e7) {
; RV32-LABEL: buildvec_v8f64:
; RV32:       # %bb.0:
; RV32-NEXT:    addi sp, sp, -128
; RV32-NEXT:    .cfi_def_cfa_offset 128
; RV32-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
; RV32-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
; RV32-NEXT:    .cfi_offset ra, -4
; RV32-NEXT:    .cfi_offset s0, -8
; RV32-NEXT:    addi s0, sp, 128
; RV32-NEXT:    .cfi_def_cfa s0, 0
; RV32-NEXT:    andi sp, sp, -64
; RV32-NEXT:    fsd fa7, 56(sp)
; RV32-NEXT:    fsd fa6, 48(sp)
; RV32-NEXT:    fsd fa5, 40(sp)
; RV32-NEXT:    fsd fa4, 32(sp)
; RV32-NEXT:    fsd fa3, 24(sp)
; RV32-NEXT:    fsd fa2, 16(sp)
; RV32-NEXT:    fsd fa1, 8(sp)
; RV32-NEXT:    fsd fa0, 0(sp)
; RV32-NEXT:    mv a0, sp
; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT:    vle64.v v8, (a0)
; RV32-NEXT:    addi sp, s0, -128
; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
; RV32-NEXT:    addi sp, sp, 128
; RV32-NEXT:    ret
;
; RV64-LABEL: buildvec_v8f64:
; RV64:       # %bb.0:
; RV64-NEXT:    addi sp, sp, -128
; RV64-NEXT:    .cfi_def_cfa_offset 128
; RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT:    .cfi_offset ra, -8
; RV64-NEXT:    .cfi_offset s0, -16
; RV64-NEXT:    addi s0, sp, 128
; RV64-NEXT:    .cfi_def_cfa s0, 0
; RV64-NEXT:    andi sp, sp, -64
; RV64-NEXT:    fsd fa7, 56(sp)
; RV64-NEXT:    fsd fa6, 48(sp)
; RV64-NEXT:    fsd fa5, 40(sp)
; RV64-NEXT:    fsd fa4, 32(sp)
; RV64-NEXT:    fsd fa3, 24(sp)
; RV64-NEXT:    fsd fa2, 16(sp)
; RV64-NEXT:    fsd fa1, 8(sp)
; RV64-NEXT:    fsd fa0, 0(sp)
; RV64-NEXT:    mv a0, sp
; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT:    vle64.v v8, (a0)
; RV64-NEXT:    addi sp, s0, -128
; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT:    addi sp, sp, 128
; RV64-NEXT:    ret
  %v0 = insertelement <8 x double> poison, double %e0, i64 0
  %v1 = insertelement <8 x double> %v0, double %e1, i64 1
  %v2 = insertelement <8 x double> %v1, double %e2, i64 2
  %v3 = insertelement <8 x double> %v2, double %e3, i64 3
  %v4 = insertelement <8 x double> %v3, double %e4, i64 4
  %v5 = insertelement <8 x double> %v4, double %e5, i64 5
  %v6 = insertelement <8 x double> %v5, double %e6, i64 6
  %v7 = insertelement <8 x double> %v6, double %e7, i64 7
  ret <8 x double> %v7
}

define <16 x double> @buildvec_v16f64(double %e0, double %e1, double %e2, double %e3, double %e4, double %e5, double %e6, double %e7, double %e8, double %e9, double %e10, double %e11, double %e12, double %e13, double %e14, double %e15) {
; RV32-LABEL: buildvec_v16f64:
; RV32:       # %bb.0:
; RV32-NEXT:    addi sp, sp, -384
; RV32-NEXT:    .cfi_def_cfa_offset 384
; RV32-NEXT:    sw ra, 380(sp) # 4-byte Folded Spill
; RV32-NEXT:    sw s0, 376(sp) # 4-byte Folded Spill
; RV32-NEXT:    .cfi_offset ra, -4
; RV32-NEXT:    .cfi_offset s0, -8
; RV32-NEXT:    addi s0, sp, 384
; RV32-NEXT:    .cfi_def_cfa s0, 0
; RV32-NEXT:    andi sp, sp, -128
; RV32-NEXT:    sw a0, 120(sp)
; RV32-NEXT:    sw a1, 124(sp)
; RV32-NEXT:    fld ft0, 120(sp)
; RV32-NEXT:    sw a2, 120(sp)
; RV32-NEXT:    sw a3, 124(sp)
; RV32-NEXT:    fld ft1, 120(sp)
; RV32-NEXT:    sw a4, 120(sp)
; RV32-NEXT:    sw a5, 124(sp)
; RV32-NEXT:    fld ft2, 120(sp)
; RV32-NEXT:    sw a6, 120(sp)
; RV32-NEXT:    sw a7, 124(sp)
; RV32-NEXT:    fld ft3, 120(sp)
; RV32-NEXT:    fld ft4, 24(s0)
; RV32-NEXT:    fld ft5, 16(s0)
; RV32-NEXT:    fld ft6, 8(s0)
; RV32-NEXT:    fld ft7, 0(s0)
; RV32-NEXT:    fsd ft4, 248(sp)
; RV32-NEXT:    fsd ft5, 240(sp)
; RV32-NEXT:    fsd ft6, 232(sp)
; RV32-NEXT:    fsd ft7, 224(sp)
; RV32-NEXT:    fsd fa7, 184(sp)
; RV32-NEXT:    fsd fa6, 176(sp)
; RV32-NEXT:    fsd fa5, 168(sp)
; RV32-NEXT:    fsd fa4, 160(sp)
; RV32-NEXT:    fsd fa3, 152(sp)
; RV32-NEXT:    fsd fa2, 144(sp)
; RV32-NEXT:    fsd fa1, 136(sp)
; RV32-NEXT:    fsd fa0, 128(sp)
; RV32-NEXT:    fsd ft3, 216(sp)
; RV32-NEXT:    fsd ft2, 208(sp)
; RV32-NEXT:    fsd ft1, 200(sp)
; RV32-NEXT:    fsd ft0, 192(sp)
; RV32-NEXT:    addi a0, sp, 128
; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT:    vle64.v v8, (a0)
; RV32-NEXT:    addi sp, s0, -384
; RV32-NEXT:    lw ra, 380(sp) # 4-byte Folded Reload
; RV32-NEXT:    lw s0, 376(sp) # 4-byte Folded Reload
; RV32-NEXT:    addi sp, sp, 384
; RV32-NEXT:    ret
;
; RV64-LABEL: buildvec_v16f64:
; RV64:       # %bb.0:
; RV64-NEXT:    addi sp, sp, -256
; RV64-NEXT:    .cfi_def_cfa_offset 256
; RV64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
; RV64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
; RV64-NEXT:    .cfi_offset ra, -8
; RV64-NEXT:    .cfi_offset s0, -16
; RV64-NEXT:    addi s0, sp, 256
; RV64-NEXT:    .cfi_def_cfa s0, 0
; RV64-NEXT:    andi sp, sp, -128
; RV64-NEXT:    sd a7, 120(sp)
; RV64-NEXT:    sd a6, 112(sp)
; RV64-NEXT:    sd a5, 104(sp)
; RV64-NEXT:    sd a4, 96(sp)
; RV64-NEXT:    sd a3, 88(sp)
; RV64-NEXT:    sd a2, 80(sp)
; RV64-NEXT:    sd a1, 72(sp)
; RV64-NEXT:    sd a0, 64(sp)
; RV64-NEXT:    fsd fa7, 56(sp)
; RV64-NEXT:    fsd fa6, 48(sp)
; RV64-NEXT:    fsd fa5, 40(sp)
; RV64-NEXT:    fsd fa4, 32(sp)
; RV64-NEXT:    fsd fa3, 24(sp)
; RV64-NEXT:    fsd fa2, 16(sp)
; RV64-NEXT:    fsd fa1, 8(sp)
; RV64-NEXT:    fsd fa0, 0(sp)
; RV64-NEXT:    mv a0, sp
; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT:    vle64.v v8, (a0)
; RV64-NEXT:    addi sp, s0, -256
; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
; RV64-NEXT:    addi sp, sp, 256
; RV64-NEXT:    ret
  %v0 = insertelement <16 x double> poison, double %e0, i64 0
  %v1 = insertelement <16 x double> %v0, double %e1, i64 1
  %v2 = insertelement <16 x double> %v1, double %e2, i64 2
  %v3 = insertelement <16 x double> %v2, double %e3, i64 3
  %v4 = insertelement <16 x double> %v3, double %e4, i64 4
  %v5 = insertelement <16 x double> %v4, double %e5, i64 5
  %v6 = insertelement <16 x double> %v5, double %e6, i64 6
  %v7 = insertelement <16 x double> %v6, double %e7, i64 7
  %v8 = insertelement <16 x double> %v7, double %e8, i64 8
  %v9 = insertelement <16 x double> %v8, double %e9, i64 9
  %v10 = insertelement <16 x double> %v9, double %e10, i64 10
  %v11 = insertelement <16 x double> %v10, double %e11, i64 11
  %v12 = insertelement <16 x double> %v11, double %e12, i64 12
  %v13 = insertelement <16 x double> %v12, double %e13, i64 13
  %v14 = insertelement <16 x double> %v13, double %e14, i64 14
  %v15 = insertelement <16 x double> %v14, double %e15, i64 15
  ret <16 x double> %v15
}

define <32 x double> @buildvec_v32f64(double %e0, double %e1, double %e2, double %e3, double %e4, double %e5, double %e6, double %e7, double %e8, double %e9, double %e10, double %e11, double %e12, double %e13, double %e14, double %e15, double %e16, double %e17, double %e18, double %e19, double %e20, double %e21, double %e22, double %e23, double %e24, double %e25, double %e26, double %e27, double %e28, double %e29, double %e30, double %e31) {
; RV32-LABEL: buildvec_v32f64:
; RV32:       # %bb.0:
; RV32-NEXT:    addi sp, sp, -512
; RV32-NEXT:    .cfi_def_cfa_offset 512
; RV32-NEXT:    sw ra, 508(sp) # 4-byte Folded Spill
; RV32-NEXT:    sw s0, 504(sp) # 4-byte Folded Spill
; RV32-NEXT:    fsd fs0, 496(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs1, 488(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs2, 480(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs3, 472(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs4, 464(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs5, 456(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs6, 448(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs7, 440(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs8, 432(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs9, 424(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs10, 416(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs11, 408(sp) # 8-byte Folded Spill
; RV32-NEXT:    .cfi_offset ra, -4
; RV32-NEXT:    .cfi_offset s0, -8
; RV32-NEXT:    .cfi_offset fs0, -16
; RV32-NEXT:    .cfi_offset fs1, -24
; RV32-NEXT:    .cfi_offset fs2, -32
; RV32-NEXT:    .cfi_offset fs3, -40
; RV32-NEXT:    .cfi_offset fs4, -48
; RV32-NEXT:    .cfi_offset fs5, -56
; RV32-NEXT:    .cfi_offset fs6, -64
; RV32-NEXT:    .cfi_offset fs7, -72
; RV32-NEXT:    .cfi_offset fs8, -80
; RV32-NEXT:    .cfi_offset fs9, -88
; RV32-NEXT:    .cfi_offset fs10, -96
; RV32-NEXT:    .cfi_offset fs11, -104
; RV32-NEXT:    addi s0, sp, 512
; RV32-NEXT:    .cfi_def_cfa s0, 0
; RV32-NEXT:    andi sp, sp, -128
; RV32-NEXT:    sw a0, 120(sp)
; RV32-NEXT:    sw a1, 124(sp)
; RV32-NEXT:    fld ft0, 120(sp)
; RV32-NEXT:    sw a2, 120(sp)
; RV32-NEXT:    sw a3, 124(sp)
; RV32-NEXT:    fld ft1, 120(sp)
; RV32-NEXT:    sw a4, 120(sp)
; RV32-NEXT:    sw a5, 124(sp)
; RV32-NEXT:    fld ft2, 120(sp)
; RV32-NEXT:    sw a6, 120(sp)
; RV32-NEXT:    sw a7, 124(sp)
; RV32-NEXT:    fld ft3, 120(sp)
; RV32-NEXT:    fld ft4, 0(s0)
; RV32-NEXT:    fld ft5, 8(s0)
; RV32-NEXT:    fld ft6, 16(s0)
; RV32-NEXT:    fld ft7, 24(s0)
; RV32-NEXT:    fld ft8, 32(s0)
; RV32-NEXT:    fld ft9, 40(s0)
; RV32-NEXT:    fld ft10, 48(s0)
; RV32-NEXT:    fld ft11, 56(s0)
; RV32-NEXT:    fld fs0, 64(s0)
; RV32-NEXT:    fld fs1, 72(s0)
; RV32-NEXT:    fld fs2, 80(s0)
; RV32-NEXT:    fld fs3, 88(s0)
; RV32-NEXT:    fld fs4, 96(s0)
; RV32-NEXT:    fld fs5, 104(s0)
; RV32-NEXT:    fld fs6, 112(s0)
; RV32-NEXT:    fld fs7, 120(s0)
; RV32-NEXT:    fld fs8, 152(s0)
; RV32-NEXT:    fld fs9, 144(s0)
; RV32-NEXT:    fld fs10, 136(s0)
; RV32-NEXT:    fld fs11, 128(s0)
; RV32-NEXT:    fsd fs8, 248(sp)
; RV32-NEXT:    fsd fs9, 240(sp)
; RV32-NEXT:    fsd fs10, 232(sp)
; RV32-NEXT:    fsd fs11, 224(sp)
; RV32-NEXT:    fsd fs7, 216(sp)
; RV32-NEXT:    fsd fs6, 208(sp)
; RV32-NEXT:    fsd fs5, 200(sp)
; RV32-NEXT:    fsd fs4, 192(sp)
; RV32-NEXT:    fsd fs3, 184(sp)
; RV32-NEXT:    fsd fs2, 176(sp)
; RV32-NEXT:    fsd fs1, 168(sp)
; RV32-NEXT:    fsd fs0, 160(sp)
; RV32-NEXT:    fsd ft11, 152(sp)
; RV32-NEXT:    fsd ft10, 144(sp)
; RV32-NEXT:    fsd ft9, 136(sp)
; RV32-NEXT:    fsd ft8, 128(sp)
; RV32-NEXT:    fsd ft7, 376(sp)
; RV32-NEXT:    fsd ft6, 368(sp)
; RV32-NEXT:    fsd ft5, 360(sp)
; RV32-NEXT:    fsd ft4, 352(sp)
; RV32-NEXT:    fsd fa7, 312(sp)
; RV32-NEXT:    fsd fa6, 304(sp)
; RV32-NEXT:    fsd fa5, 296(sp)
; RV32-NEXT:    fsd fa4, 288(sp)
; RV32-NEXT:    fsd fa3, 280(sp)
; RV32-NEXT:    fsd fa2, 272(sp)
; RV32-NEXT:    fsd fa1, 264(sp)
; RV32-NEXT:    fsd fa0, 256(sp)
; RV32-NEXT:    fsd ft3, 344(sp)
; RV32-NEXT:    fsd ft2, 336(sp)
; RV32-NEXT:    fsd ft1, 328(sp)
; RV32-NEXT:    fsd ft0, 320(sp)
; RV32-NEXT:    addi a0, sp, 128
; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT:    vle64.v v16, (a0)
; RV32-NEXT:    addi a0, sp, 256
; RV32-NEXT:    vle64.v v8, (a0)
; RV32-NEXT:    addi sp, s0, -512
; RV32-NEXT:    lw ra, 508(sp) # 4-byte Folded Reload
; RV32-NEXT:    lw s0, 504(sp) # 4-byte Folded Reload
; RV32-NEXT:    fld fs0, 496(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs1, 488(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs2, 480(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs3, 472(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs4, 464(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs5, 456(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs6, 448(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs7, 440(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs8, 432(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs9, 424(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs10, 416(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs11, 408(sp) # 8-byte Folded Reload
; RV32-NEXT:    addi sp, sp, 512
; RV32-NEXT:    ret
;
; RV64-LABEL: buildvec_v32f64:
; RV64:       # %bb.0:
; RV64-NEXT:    addi sp, sp, -384
; RV64-NEXT:    .cfi_def_cfa_offset 384
; RV64-NEXT:    sd ra, 376(sp) # 8-byte Folded Spill
; RV64-NEXT:    sd s0, 368(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs0, 360(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs1, 352(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs2, 344(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs3, 336(sp) # 8-byte Folded Spill
; RV64-NEXT:    .cfi_offset ra, -8
; RV64-NEXT:    .cfi_offset s0, -16
; RV64-NEXT:    .cfi_offset fs0, -24
; RV64-NEXT:    .cfi_offset fs1, -32
; RV64-NEXT:    .cfi_offset fs2, -40
; RV64-NEXT:    .cfi_offset fs3, -48
; RV64-NEXT:    addi s0, sp, 384
; RV64-NEXT:    .cfi_def_cfa s0, 0
; RV64-NEXT:    andi sp, sp, -128
; RV64-NEXT:    fld ft0, 0(s0)
; RV64-NEXT:    fld ft1, 8(s0)
; RV64-NEXT:    fld ft2, 16(s0)
; RV64-NEXT:    fld ft3, 24(s0)
; RV64-NEXT:    fld ft4, 32(s0)
; RV64-NEXT:    fld ft5, 40(s0)
; RV64-NEXT:    fld ft6, 48(s0)
; RV64-NEXT:    fld ft7, 56(s0)
; RV64-NEXT:    fld ft8, 64(s0)
; RV64-NEXT:    fld ft9, 72(s0)
; RV64-NEXT:    fld ft10, 80(s0)
; RV64-NEXT:    fld ft11, 88(s0)
; RV64-NEXT:    fld fs0, 96(s0)
; RV64-NEXT:    fld fs1, 104(s0)
; RV64-NEXT:    fld fs2, 112(s0)
; RV64-NEXT:    fld fs3, 120(s0)
; RV64-NEXT:    sd a7, 248(sp)
; RV64-NEXT:    sd a6, 240(sp)
; RV64-NEXT:    sd a5, 232(sp)
; RV64-NEXT:    sd a4, 224(sp)
; RV64-NEXT:    sd a3, 216(sp)
; RV64-NEXT:    sd a2, 208(sp)
; RV64-NEXT:    sd a1, 200(sp)
; RV64-NEXT:    sd a0, 192(sp)
; RV64-NEXT:    fsd fa7, 184(sp)
; RV64-NEXT:    fsd fa6, 176(sp)
; RV64-NEXT:    fsd fa5, 168(sp)
; RV64-NEXT:    fsd fa4, 160(sp)
; RV64-NEXT:    fsd fa3, 152(sp)
; RV64-NEXT:    fsd fa2, 144(sp)
; RV64-NEXT:    fsd fa1, 136(sp)
; RV64-NEXT:    fsd fa0, 128(sp)
; RV64-NEXT:    fsd fs3, 120(sp)
; RV64-NEXT:    fsd fs2, 112(sp)
; RV64-NEXT:    fsd fs1, 104(sp)
; RV64-NEXT:    fsd fs0, 96(sp)
; RV64-NEXT:    fsd ft11, 88(sp)
; RV64-NEXT:    fsd ft10, 80(sp)
; RV64-NEXT:    fsd ft9, 72(sp)
; RV64-NEXT:    fsd ft8, 64(sp)
; RV64-NEXT:    fsd ft7, 56(sp)
; RV64-NEXT:    fsd ft6, 48(sp)
; RV64-NEXT:    fsd ft5, 40(sp)
; RV64-NEXT:    fsd ft4, 32(sp)
; RV64-NEXT:    fsd ft3, 24(sp)
; RV64-NEXT:    fsd ft2, 16(sp)
; RV64-NEXT:    fsd ft1, 8(sp)
; RV64-NEXT:    fsd ft0, 0(sp)
; RV64-NEXT:    addi a0, sp, 128
; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT:    vle64.v v8, (a0)
; RV64-NEXT:    mv a0, sp
; RV64-NEXT:    vle64.v v16, (a0)
; RV64-NEXT:    addi sp, s0, -384
; RV64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
; RV64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs0, 360(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs1, 352(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs2, 344(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs3, 336(sp) # 8-byte Folded Reload
; RV64-NEXT:    addi sp, sp, 384
; RV64-NEXT:    ret
  %v0 = insertelement <32 x double> poison, double %e0, i64 0
  %v1 = insertelement <32 x double> %v0, double %e1, i64 1
  %v2 = insertelement <32 x double> %v1, double %e2, i64 2
  %v3 = insertelement <32 x double> %v2, double %e3, i64 3
  %v4 = insertelement <32 x double> %v3, double %e4, i64 4
  %v5 = insertelement <32 x double> %v4, double %e5, i64 5
  %v6 = insertelement <32 x double> %v5, double %e6, i64 6
  %v7 = insertelement <32 x double> %v6, double %e7, i64 7
  %v8 = insertelement <32 x double> %v7, double %e8, i64 8
  %v9 = insertelement <32 x double> %v8, double %e9, i64 9
  %v10 = insertelement <32 x double> %v9, double %e10, i64 10
  %v11 = insertelement <32 x double> %v10, double %e11, i64 11
  %v12 = insertelement <32 x double> %v11, double %e12, i64 12
  %v13 = insertelement <32 x double> %v12, double %e13, i64 13
  %v14 = insertelement <32 x double> %v13, double %e14, i64 14
  %v15 = insertelement <32 x double> %v14, double %e15, i64 15
  %v16 = insertelement <32 x double> %v15, double %e16, i64 16
  %v17 = insertelement <32 x double> %v16, double %e17, i64 17
  %v18 = insertelement <32 x double> %v17, double %e18, i64 18
  %v19 = insertelement <32 x double> %v18, double %e19, i64 19
  %v20 = insertelement <32 x double> %v19, double %e20, i64 20
  %v21 = insertelement <32 x double> %v20, double %e21, i64 21
  %v22 = insertelement <32 x double> %v21, double %e22, i64 22
  %v23 = insertelement <32 x double> %v22, double %e23, i64 23
  %v24 = insertelement <32 x double> %v23, double %e24, i64 24
  %v25 = insertelement <32 x double> %v24, double %e25, i64 25
  %v26 = insertelement <32 x double> %v25, double %e26, i64 26
  %v27 = insertelement <32 x double> %v26, double %e27, i64 27
  %v28 = insertelement <32 x double> %v27, double %e28, i64 28
  %v29 = insertelement <32 x double> %v28, double %e29, i64 29
  %v30 = insertelement <32 x double> %v29, double %e30, i64 30
  %v31 = insertelement <32 x double> %v30, double %e31, i64 31
  ret <32 x double> %v31
}

define <32 x double> @buildvec_v32f64_exact_vlen(double %e0, double %e1, double %e2, double %e3, double %e4, double %e5, double %e6, double %e7, double %e8, double %e9, double %e10, double %e11, double %e12, double %e13, double %e14, double %e15, double %e16, double %e17, double %e18, double %e19, double %e20, double %e21, double %e22, double %e23, double %e24, double %e25, double %e26, double %e27, double %e28, double %e29, double %e30, double %e31) vscale_range(2,2) {
; RV32-LABEL: buildvec_v32f64_exact_vlen:
; RV32:       # %bb.0:
; RV32-NEXT:    addi sp, sp, -112
; RV32-NEXT:    .cfi_def_cfa_offset 112
; RV32-NEXT:    fsd fs0, 104(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs1, 96(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs2, 88(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs3, 80(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs4, 72(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs5, 64(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs6, 56(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs7, 48(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs8, 40(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs9, 32(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs10, 24(sp) # 8-byte Folded Spill
; RV32-NEXT:    fsd fs11, 16(sp) # 8-byte Folded Spill
; RV32-NEXT:    .cfi_offset fs0, -8
; RV32-NEXT:    .cfi_offset fs1, -16
; RV32-NEXT:    .cfi_offset fs2, -24
; RV32-NEXT:    .cfi_offset fs3, -32
; RV32-NEXT:    .cfi_offset fs4, -40
; RV32-NEXT:    .cfi_offset fs5, -48
; RV32-NEXT:    .cfi_offset fs6, -56
; RV32-NEXT:    .cfi_offset fs7, -64
; RV32-NEXT:    .cfi_offset fs8, -72
; RV32-NEXT:    .cfi_offset fs9, -80
; RV32-NEXT:    .cfi_offset fs10, -88
; RV32-NEXT:    .cfi_offset fs11, -96
; RV32-NEXT:    sw a6, 8(sp)
; RV32-NEXT:    sw a7, 12(sp)
; RV32-NEXT:    fld ft6, 8(sp)
; RV32-NEXT:    sw a4, 8(sp)
; RV32-NEXT:    sw a5, 12(sp)
; RV32-NEXT:    fld ft7, 8(sp)
; RV32-NEXT:    sw a2, 8(sp)
; RV32-NEXT:    sw a3, 12(sp)
; RV32-NEXT:    fld ft8, 8(sp)
; RV32-NEXT:    sw a0, 8(sp)
; RV32-NEXT:    sw a1, 12(sp)
; RV32-NEXT:    fld ft9, 8(sp)
; RV32-NEXT:    fld ft0, 264(sp)
; RV32-NEXT:    fld ft1, 256(sp)
; RV32-NEXT:    fld ft2, 248(sp)
; RV32-NEXT:    fld ft3, 240(sp)
; RV32-NEXT:    fld ft4, 232(sp)
; RV32-NEXT:    fld ft5, 224(sp)
; RV32-NEXT:    fld ft10, 216(sp)
; RV32-NEXT:    fld ft11, 208(sp)
; RV32-NEXT:    fld fs0, 200(sp)
; RV32-NEXT:    fld fs1, 192(sp)
; RV32-NEXT:    fld fs2, 184(sp)
; RV32-NEXT:    fld fs3, 176(sp)
; RV32-NEXT:    fld fs4, 152(sp)
; RV32-NEXT:    fld fs5, 144(sp)
; RV32-NEXT:    fld fs6, 168(sp)
; RV32-NEXT:    fld fs7, 160(sp)
; RV32-NEXT:    fld fs8, 136(sp)
; RV32-NEXT:    fld fs9, 128(sp)
; RV32-NEXT:    fld fs10, 120(sp)
; RV32-NEXT:    fld fs11, 112(sp)
; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT:    vfmv.v.f v8, ft9
; RV32-NEXT:    vfslide1down.vf v12, v8, ft8
; RV32-NEXT:    vfmv.v.f v8, fa2
; RV32-NEXT:    vfslide1down.vf v9, v8, fa3
; RV32-NEXT:    vfmv.v.f v8, fa0
; RV32-NEXT:    vfslide1down.vf v8, v8, fa1
; RV32-NEXT:    vfmv.v.f v10, fa4
; RV32-NEXT:    vfslide1down.vf v10, v10, fa5
; RV32-NEXT:    vfmv.v.f v11, fa6
; RV32-NEXT:    vfslide1down.vf v11, v11, fa7
; RV32-NEXT:    vfmv.v.f v13, ft7
; RV32-NEXT:    vfslide1down.vf v13, v13, ft6
; RV32-NEXT:    vfmv.v.f v14, fs11
; RV32-NEXT:    vfslide1down.vf v14, v14, fs10
; RV32-NEXT:    vfmv.v.f v15, fs9
; RV32-NEXT:    vfslide1down.vf v15, v15, fs8
; RV32-NEXT:    vfmv.v.f v16, fs7
; RV32-NEXT:    vfslide1down.vf v17, v16, fs6
; RV32-NEXT:    vfmv.v.f v16, fs5
; RV32-NEXT:    vfslide1down.vf v16, v16, fs4
; RV32-NEXT:    vfmv.v.f v18, fs3
; RV32-NEXT:    vfslide1down.vf v18, v18, fs2
; RV32-NEXT:    vfmv.v.f v19, fs1
; RV32-NEXT:    vfslide1down.vf v19, v19, fs0
; RV32-NEXT:    vfmv.v.f v20, ft11
; RV32-NEXT:    vfslide1down.vf v20, v20, ft10
; RV32-NEXT:    vfmv.v.f v21, ft5
; RV32-NEXT:    vfslide1down.vf v21, v21, ft4
; RV32-NEXT:    vfmv.v.f v22, ft3
; RV32-NEXT:    vfslide1down.vf v22, v22, ft2
; RV32-NEXT:    vfmv.v.f v23, ft1
; RV32-NEXT:    vfslide1down.vf v23, v23, ft0
; RV32-NEXT:    fld fs0, 104(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs1, 96(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs2, 88(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs3, 80(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs4, 72(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs5, 64(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs6, 56(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs7, 48(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs8, 40(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs9, 32(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs10, 24(sp) # 8-byte Folded Reload
; RV32-NEXT:    fld fs11, 16(sp) # 8-byte Folded Reload
; RV32-NEXT:    addi sp, sp, 112
; RV32-NEXT:    ret
;
; RV64-LABEL: buildvec_v32f64_exact_vlen:
; RV64:       # %bb.0:
; RV64-NEXT:    addi sp, sp, -64
; RV64-NEXT:    .cfi_def_cfa_offset 64
; RV64-NEXT:    fsd fs0, 56(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs1, 48(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs2, 40(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs3, 32(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs4, 24(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs5, 16(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs6, 8(sp) # 8-byte Folded Spill
; RV64-NEXT:    fsd fs7, 0(sp) # 8-byte Folded Spill
; RV64-NEXT:    .cfi_offset fs0, -8
; RV64-NEXT:    .cfi_offset fs1, -16
; RV64-NEXT:    .cfi_offset fs2, -24
; RV64-NEXT:    .cfi_offset fs3, -32
; RV64-NEXT:    .cfi_offset fs4, -40
; RV64-NEXT:    .cfi_offset fs5, -48
; RV64-NEXT:    .cfi_offset fs6, -56
; RV64-NEXT:    .cfi_offset fs7, -64
; RV64-NEXT:    fmv.d.x ft4, a7
; RV64-NEXT:    fmv.d.x ft5, a5
; RV64-NEXT:    fmv.d.x ft6, a3
; RV64-NEXT:    fmv.d.x ft7, a1
; RV64-NEXT:    fld ft0, 184(sp)
; RV64-NEXT:    fld ft1, 176(sp)
; RV64-NEXT:    fld ft2, 168(sp)
; RV64-NEXT:    fld ft3, 160(sp)
; RV64-NEXT:    fld ft8, 152(sp)
; RV64-NEXT:    fld ft9, 144(sp)
; RV64-NEXT:    fld ft10, 136(sp)
; RV64-NEXT:    fld ft11, 128(sp)
; RV64-NEXT:    fld fs0, 120(sp)
; RV64-NEXT:    fld fs1, 112(sp)
; RV64-NEXT:    fld fs2, 104(sp)
; RV64-NEXT:    fld fs3, 96(sp)
; RV64-NEXT:    fld fs4, 72(sp)
; RV64-NEXT:    fld fs5, 64(sp)
; RV64-NEXT:    fld fs6, 88(sp)
; RV64-NEXT:    fld fs7, 80(sp)
; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT:    vfmv.v.f v8, fa2
; RV64-NEXT:    vfslide1down.vf v9, v8, fa3
; RV64-NEXT:    vfmv.v.f v8, fa0
; RV64-NEXT:    vfslide1down.vf v8, v8, fa1
; RV64-NEXT:    vfmv.v.f v10, fa4
; RV64-NEXT:    vfslide1down.vf v10, v10, fa5
; RV64-NEXT:    vfmv.v.f v11, fa6
; RV64-NEXT:    vfslide1down.vf v11, v11, fa7
; RV64-NEXT:    vmv.v.x v12, a0
; RV64-NEXT:    vfslide1down.vf v12, v12, ft7
; RV64-NEXT:    vmv.v.x v13, a2
; RV64-NEXT:    vfslide1down.vf v13, v13, ft6
; RV64-NEXT:    vmv.v.x v14, a4
; RV64-NEXT:    vfslide1down.vf v14, v14, ft5
; RV64-NEXT:    vmv.v.x v15, a6
; RV64-NEXT:    vfslide1down.vf v15, v15, ft4
; RV64-NEXT:    vfmv.v.f v16, fs7
; RV64-NEXT:    vfslide1down.vf v17, v16, fs6
; RV64-NEXT:    vfmv.v.f v16, fs5
; RV64-NEXT:    vfslide1down.vf v16, v16, fs4
; RV64-NEXT:    vfmv.v.f v18, fs3
; RV64-NEXT:    vfslide1down.vf v18, v18, fs2
; RV64-NEXT:    vfmv.v.f v19, fs1
; RV64-NEXT:    vfslide1down.vf v19, v19, fs0
; RV64-NEXT:    vfmv.v.f v20, ft11
; RV64-NEXT:    vfslide1down.vf v20, v20, ft10
; RV64-NEXT:    vfmv.v.f v21, ft9
; RV64-NEXT:    vfslide1down.vf v21, v21, ft8
; RV64-NEXT:    vfmv.v.f v22, ft3
; RV64-NEXT:    vfslide1down.vf v22, v22, ft2
; RV64-NEXT:    vfmv.v.f v23, ft1
; RV64-NEXT:    vfslide1down.vf v23, v23, ft0
; RV64-NEXT:    fld fs0, 56(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs1, 48(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs2, 40(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs3, 32(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs4, 24(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs5, 16(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs6, 8(sp) # 8-byte Folded Reload
; RV64-NEXT:    fld fs7, 0(sp) # 8-byte Folded Reload
; RV64-NEXT:    addi sp, sp, 64
; RV64-NEXT:    ret
  %v0 = insertelement <32 x double> poison, double %e0, i64 0
  %v1 = insertelement <32 x double> %v0, double %e1, i64 1
  %v2 = insertelement <32 x double> %v1, double %e2, i64 2
  %v3 = insertelement <32 x double> %v2, double %e3, i64 3
  %v4 = insertelement <32 x double> %v3, double %e4, i64 4
  %v5 = insertelement <32 x double> %v4, double %e5, i64 5
  %v6 = insertelement <32 x double> %v5, double %e6, i64 6
  %v7 = insertelement <32 x double> %v6, double %e7, i64 7
  %v8 = insertelement <32 x double> %v7, double %e8, i64 8
  %v9 = insertelement <32 x double> %v8, double %e9, i64 9
  %v10 = insertelement <32 x double> %v9, double %e10, i64 10
  %v11 = insertelement <32 x double> %v10, double %e11, i64 11
  %v12 = insertelement <32 x double> %v11, double %e12, i64 12
  %v13 = insertelement <32 x double> %v12, double %e13, i64 13
  %v14 = insertelement <32 x double> %v13, double %e14, i64 14
  %v15 = insertelement <32 x double> %v14, double %e15, i64 15
  %v16 = insertelement <32 x double> %v15, double %e16, i64 16
  %v17 = insertelement <32 x double> %v16, double %e17, i64 17
  %v18 = insertelement <32 x double> %v17, double %e18, i64 18
  %v19 = insertelement <32 x double> %v18, double %e19, i64 19
  %v20 = insertelement <32 x double> %v19, double %e20, i64 20
  %v21 = insertelement <32 x double> %v20, double %e21, i64 21
  %v22 = insertelement <32 x double> %v21, double %e22, i64 22
  %v23 = insertelement <32 x double> %v22, double %e23, i64 23
  %v24 = insertelement <32 x double> %v23, double %e24, i64 24
  %v25 = insertelement <32 x double> %v24, double %e25, i64 25
  %v26 = insertelement <32 x double> %v25, double %e26, i64 26
  %v27 = insertelement <32 x double> %v26, double %e27, i64 27
  %v28 = insertelement <32 x double> %v27, double %e28, i64 28
  %v29 = insertelement <32 x double> %v28, double %e29, i64 29
  %v30 = insertelement <32 x double> %v29, double %e30, i64 30
  %v31 = insertelement <32 x double> %v30, double %e31, i64 31
  ret <32 x double> %v31
}

; FIXME: These constants have enough sign bits that we could use vmv.v.x/i and
; vsext, but we don't support this for FP yet.
define <2 x float> @signbits() {
; CHECK-LABEL: signbits:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    lui a0, %hi(.LCPI25_0)
; CHECK-NEXT:    addi a0, a0, %lo(.LCPI25_0)
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    ret
entry:
  ret <2 x float> <float 0x36A0000000000000, float 0.000000e+00>
}

define <2 x half> @vid_v2f16() {
; RV32ZVFH-LABEL: vid_v2f16:
; RV32ZVFH:       # %bb.0:
; RV32ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV32ZVFH-NEXT:    vid.v v8
; RV32ZVFH-NEXT:    vfcvt.f.x.v v8, v8
; RV32ZVFH-NEXT:    ret
;
; RV64ZVFH-LABEL: vid_v2f16:
; RV64ZVFH:       # %bb.0:
; RV64ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV64ZVFH-NEXT:    vid.v v8
; RV64ZVFH-NEXT:    vfcvt.f.x.v v8, v8
; RV64ZVFH-NEXT:    ret
;
; RV32ZVFHMIN-LABEL: vid_v2f16:
; RV32ZVFHMIN:       # %bb.0:
; RV32ZVFHMIN-NEXT:    lui a0, 245760
; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
; RV32ZVFHMIN-NEXT:    vmv.s.x v8, a0
; RV32ZVFHMIN-NEXT:    ret
;
; RV64ZVFHMIN-LABEL: vid_v2f16:
; RV64ZVFHMIN:       # %bb.0:
; RV64ZVFHMIN-NEXT:    lui a0, 245760
; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVFHMIN-NEXT:    vmv.s.x v8, a0
; RV64ZVFHMIN-NEXT:    ret
  ret <2 x half> <half 0.0, half 1.0>
}

define <2 x half> @vid_addend1_v2f16() {
; RV32ZVFH-LABEL: vid_addend1_v2f16:
; RV32ZVFH:       # %bb.0:
; RV32ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV32ZVFH-NEXT:    vid.v v8
; RV32ZVFH-NEXT:    vadd.vi v8, v8, 1
; RV32ZVFH-NEXT:    vfcvt.f.x.v v8, v8
; RV32ZVFH-NEXT:    ret
;
; RV64ZVFH-LABEL: vid_addend1_v2f16:
; RV64ZVFH:       # %bb.0:
; RV64ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV64ZVFH-NEXT:    vid.v v8
; RV64ZVFH-NEXT:    vadd.vi v8, v8, 1
; RV64ZVFH-NEXT:    vfcvt.f.x.v v8, v8
; RV64ZVFH-NEXT:    ret
;
; RV32ZVFHMIN-LABEL: vid_addend1_v2f16:
; RV32ZVFHMIN:       # %bb.0:
; RV32ZVFHMIN-NEXT:    lui a0, 262148
; RV32ZVFHMIN-NEXT:    addi a0, a0, -1024
; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
; RV32ZVFHMIN-NEXT:    vmv.s.x v8, a0
; RV32ZVFHMIN-NEXT:    ret
;
; RV64ZVFHMIN-LABEL: vid_addend1_v2f16:
; RV64ZVFHMIN:       # %bb.0:
; RV64ZVFHMIN-NEXT:    lui a0, 262148
; RV64ZVFHMIN-NEXT:    addi a0, a0, -1024
; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVFHMIN-NEXT:    vmv.s.x v8, a0
; RV64ZVFHMIN-NEXT:    ret
  ret <2 x half> <half 1.0, half 2.0>
}

define <2 x half> @vid_denominator2_v2f16() {
; RV32ZVFH-LABEL: vid_denominator2_v2f16:
; RV32ZVFH:       # %bb.0:
; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI28_0)
; RV32ZVFH-NEXT:    addi a0, a0, %lo(.LCPI28_0)
; RV32ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV32ZVFH-NEXT:    vle16.v v8, (a0)
; RV32ZVFH-NEXT:    ret
;
; RV64ZVFH-LABEL: vid_denominator2_v2f16:
; RV64ZVFH:       # %bb.0:
; RV64ZVFH-NEXT:    lui a0, %hi(.LCPI28_0)
; RV64ZVFH-NEXT:    addi a0, a0, %lo(.LCPI28_0)
; RV64ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV64ZVFH-NEXT:    vle16.v v8, (a0)
; RV64ZVFH-NEXT:    ret
;
; RV32ZVFHMIN-LABEL: vid_denominator2_v2f16:
; RV32ZVFHMIN:       # %bb.0:
; RV32ZVFHMIN-NEXT:    lui a0, 245764
; RV32ZVFHMIN-NEXT:    addi a0, a0, -2048
; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
; RV32ZVFHMIN-NEXT:    vmv.s.x v8, a0
; RV32ZVFHMIN-NEXT:    ret
;
; RV64ZVFHMIN-LABEL: vid_denominator2_v2f16:
; RV64ZVFHMIN:       # %bb.0:
; RV64ZVFHMIN-NEXT:    lui a0, 245764
; RV64ZVFHMIN-NEXT:    addi a0, a0, -2048
; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVFHMIN-NEXT:    vmv.s.x v8, a0
; RV64ZVFHMIN-NEXT:    ret
  ret <2 x half> <half 0.5, half 1.0>
}

define <2 x half> @vid_step2_v2f16() {
; RV32ZVFH-LABEL: vid_step2_v2f16:
; RV32ZVFH:       # %bb.0:
; RV32ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV32ZVFH-NEXT:    vid.v v8
; RV32ZVFH-NEXT:    vadd.vv v8, v8, v8
; RV32ZVFH-NEXT:    vfcvt.f.x.v v8, v8
; RV32ZVFH-NEXT:    ret
;
; RV64ZVFH-LABEL: vid_step2_v2f16:
; RV64ZVFH:       # %bb.0:
; RV64ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV64ZVFH-NEXT:    vid.v v8
; RV64ZVFH-NEXT:    vadd.vv v8, v8, v8
; RV64ZVFH-NEXT:    vfcvt.f.x.v v8, v8
; RV64ZVFH-NEXT:    ret
;
; RV32ZVFHMIN-LABEL: vid_step2_v2f16:
; RV32ZVFHMIN:       # %bb.0:
; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV32ZVFHMIN-NEXT:    vid.v v8
; RV32ZVFHMIN-NEXT:    vsll.vi v8, v8, 14
; RV32ZVFHMIN-NEXT:    ret
;
; RV64ZVFHMIN-LABEL: vid_step2_v2f16:
; RV64ZVFHMIN:       # %bb.0:
; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV64ZVFHMIN-NEXT:    vid.v v8
; RV64ZVFHMIN-NEXT:    vsll.vi v8, v8, 14
; RV64ZVFHMIN-NEXT:    ret
  ret <2 x half> <half 0.0, half 2.0>
}

define <2 x float> @vid_v2f32() {
; CHECK-LABEL: vid_v2f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT:    vid.v v8
; CHECK-NEXT:    vfcvt.f.x.v v8, v8
; CHECK-NEXT:    ret
  ret <2 x float> <float 0.0, float 1.0>
}

define <2 x float> @vid_addend1_v2f32() {
; CHECK-LABEL: vid_addend1_v2f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT:    vid.v v8
; CHECK-NEXT:    vadd.vi v8, v8, 1
; CHECK-NEXT:    vfcvt.f.x.v v8, v8
; CHECK-NEXT:    ret
  ret <2 x float> <float 1.0, float 2.0>
}

define <2 x float> @vid_denominator2_v2f32() {
; CHECK-LABEL: vid_denominator2_v2f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lui a0, %hi(.LCPI32_0)
; CHECK-NEXT:    addi a0, a0, %lo(.LCPI32_0)
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    ret
  ret <2 x float> <float 0.5, float 1.0>
}

define <2 x float> @vid_step2_v2f32() {
; CHECK-LABEL: vid_step2_v2f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT:    vid.v v8
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vfcvt.f.x.v v8, v8
; CHECK-NEXT:    ret
  ret <2 x float> <float 0.0, float 2.0>
}

define <2 x double> @vid_v2f64() {
; CHECK-LABEL: vid_v2f64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT:    vid.v v8
; CHECK-NEXT:    vfcvt.f.x.v v8, v8
; CHECK-NEXT:    ret
  ret <2 x double> <double 0.0, double 1.0>
}

define <2 x double> @vid_addend1_v2f64() {
; CHECK-LABEL: vid_addend1_v2f64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT:    vid.v v8
; CHECK-NEXT:    vadd.vi v8, v8, 1
; CHECK-NEXT:    vfcvt.f.x.v v8, v8
; CHECK-NEXT:    ret
  ret <2 x double> <double 1.0, double 2.0>
}

define <2 x double> @vid_denominator2_v2f64() {
; CHECK-LABEL: vid_denominator2_v2f64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lui a0, %hi(.LCPI36_0)
; CHECK-NEXT:    addi a0, a0, %lo(.LCPI36_0)
; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT:    vle64.v v8, (a0)
; CHECK-NEXT:    ret
  ret <2 x double> <double 0.5, double 1.0>
}

define <2 x double> @vid_step2_v2f64() {
; CHECK-LABEL: vid_step2_v2f64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT:    vid.v v8
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vfcvt.f.x.v v8, v8
; CHECK-NEXT:    ret
  ret <2 x double> <double 0.0, double 2.0>
}


define <8 x float> @buildvec_v8f32_zvl256(float %e0, float %e1, float %e2, float %e3, float %e4, float %e5, float %e6, float %e7) vscale_range(4, 128) {
; CHECK-LABEL: buildvec_v8f32_zvl256:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 8, e32, m1, ta, mu
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa2
; CHECK-NEXT:    vfslide1down.vf v9, v8, fa3
; CHECK-NEXT:    vfmv.v.f v8, fa4
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa5
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa6
; CHECK-NEXT:    vmv.v.i v0, 15
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa7
; CHECK-NEXT:    vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT:    ret
  %v0 = insertelement <8 x float> poison, float %e0, i64 0
  %v1 = insertelement <8 x float> %v0, float %e1, i64 1
  %v2 = insertelement <8 x float> %v1, float %e2, i64 2
  %v3 = insertelement <8 x float> %v2, float %e3, i64 3
  %v4 = insertelement <8 x float> %v3, float %e4, i64 4
  %v5 = insertelement <8 x float> %v4, float %e5, i64 5
  %v6 = insertelement <8 x float> %v5, float %e6, i64 6
  %v7 = insertelement <8 x float> %v6, float %e7, i64 7
  ret <8 x float> %v7
}


define <8 x double> @buildvec_v8f64_zvl256(double %e0, double %e1, double %e2, double %e3, double %e4, double %e5, double %e6, double %e7) vscale_range(4, 128) {
; CHECK-LABEL: buildvec_v8f64_zvl256:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 8, e64, m2, ta, ma
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa2
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa3
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa4
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa5
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa6
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa7
; CHECK-NEXT:    ret
  %v0 = insertelement <8 x double> poison, double %e0, i64 0
  %v1 = insertelement <8 x double> %v0, double %e1, i64 1
  %v2 = insertelement <8 x double> %v1, double %e2, i64 2
  %v3 = insertelement <8 x double> %v2, double %e3, i64 3
  %v4 = insertelement <8 x double> %v3, double %e4, i64 4
  %v5 = insertelement <8 x double> %v4, double %e5, i64 5
  %v6 = insertelement <8 x double> %v5, double %e6, i64 6
  %v7 = insertelement <8 x double> %v6, double %e7, i64 7
  ret <8 x double> %v7
}

define <8 x double> @buildvec_v8f64_zvl512(double %e0, double %e1, double %e2, double %e3, double %e4, double %e5, double %e6, double %e7) vscale_range(8, 128) {
; CHECK-LABEL: buildvec_v8f64_zvl512:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 8, e64, m1, ta, mu
; CHECK-NEXT:    vfmv.v.f v8, fa0
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa2
; CHECK-NEXT:    vfslide1down.vf v9, v8, fa3
; CHECK-NEXT:    vfmv.v.f v8, fa4
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa5
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa6
; CHECK-NEXT:    vmv.v.i v0, 15
; CHECK-NEXT:    vfslide1down.vf v8, v8, fa7
; CHECK-NEXT:    vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT:    ret
  %v0 = insertelement <8 x double> poison, double %e0, i64 0
  %v1 = insertelement <8 x double> %v0, double %e1, i64 1
  %v2 = insertelement <8 x double> %v1, double %e2, i64 2
  %v3 = insertelement <8 x double> %v2, double %e3, i64 3
  %v4 = insertelement <8 x double> %v3, double %e4, i64 4
  %v5 = insertelement <8 x double> %v4, double %e5, i64 5
  %v6 = insertelement <8 x double> %v5, double %e6, i64 6
  %v7 = insertelement <8 x double> %v6, double %e7, i64 7
  ret <8 x double> %v7
}