; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=ilp32d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
declare <2 x half> @llvm.vp.minimum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v2f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vmfeq.vv v0, v9, v9, v0.t
; ZVFH-NEXT: vmerge.vvm v8, v9, v8, v0
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vfmin.vv v8, v8, v11, v0.t
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfmin_vv_v2f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v11, v8, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8, v0.t
; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v11, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vfmin.vv v9, v8, v9, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <2 x half> @llvm.vp.minimum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
ret <2 x half> %v
}
define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v2f16_unmasked:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8
; ZVFH-NEXT: vmfeq.vv v10, v9, v9
; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vmerge.vvm v8, v9, v8, v0
; ZVFH-NEXT: vfmin.vv v8, v8, v11
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfmin_vv_v2f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v10
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v11, v11
; ZVFHMIN-NEXT: vmerge.vvm v9, v10, v11, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v11, v10, v0
; ZVFHMIN-NEXT: vfmin.vv v9, v8, v9
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <2 x half> @llvm.vp.minimum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x half> %v
}
declare <4 x half> @llvm.vp.minimum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v4f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vmfeq.vv v0, v9, v9, v0.t
; ZVFH-NEXT: vmerge.vvm v8, v9, v8, v0
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vfmin.vv v8, v8, v11, v0.t
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfmin_vv_v4f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v11, v8, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8, v0.t
; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v11, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vfmin.vv v9, v8, v9, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <4 x half> @llvm.vp.minimum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
ret <4 x half> %v
}
define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v4f16_unmasked:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8
; ZVFH-NEXT: vmfeq.vv v10, v9, v9
; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vmerge.vvm v8, v9, v8, v0
; ZVFH-NEXT: vfmin.vv v8, v8, v11
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfmin_vv_v4f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v10
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v11, v11
; ZVFHMIN-NEXT: vmerge.vvm v9, v10, v11, v0
; ZVFHMIN-NEXT: vmv.v.v v0, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v11, v10, v0
; ZVFHMIN-NEXT: vfmin.vv v9, v8, v9
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <4 x half> @llvm.vp.minimum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x half> %v
}
declare <8 x half> @llvm.vp.minimum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vmfeq.vv v0, v9, v9, v0.t
; ZVFH-NEXT: vmerge.vvm v8, v9, v8, v0
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vfmin.vv v8, v8, v11, v0.t
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfmin_vv_v8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v14, v12, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vfmin.vv v10, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%v = call <8 x half> @llvm.vp.minimum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
ret <8 x half> %v
}
define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v8f16_unmasked:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8
; ZVFH-NEXT: vmfeq.vv v10, v9, v9
; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0
; ZVFH-NEXT: vmv.v.v v0, v10
; ZVFH-NEXT: vmerge.vvm v8, v9, v8, v0
; ZVFH-NEXT: vfmin.vv v8, v8, v11
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfmin_vv_v8f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v10
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12
; ZVFHMIN-NEXT: vmerge.vvm v14, v10, v12, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v12, v10, v0
; ZVFHMIN-NEXT: vfmin.vv v10, v8, v14
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%v = call <8 x half> @llvm.vp.minimum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x half> %v
}
declare <16 x half> @llvm.vp.minimum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v16f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
; ZVFH-NEXT: vmv1r.v v0, v13
; ZVFH-NEXT: vmerge.vvm v14, v8, v10, v0
; ZVFH-NEXT: vmv1r.v v0, v12
; ZVFH-NEXT: vmfeq.vv v13, v10, v10, v0.t
; ZVFH-NEXT: vmv1r.v v0, v13
; ZVFH-NEXT: vmerge.vvm v8, v10, v8, v0
; ZVFH-NEXT: vmv1r.v v0, v12
; ZVFH-NEXT: vfmin.vv v8, v8, v14, v0.t
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfmin_vv_v16f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v20, v16, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vfmin.vv v12, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
%v = call <16 x half> @llvm.vp.minimum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
ret <16 x half> %v
}
define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v16f16_unmasked:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8
; ZVFH-NEXT: vmfeq.vv v12, v10, v10
; ZVFH-NEXT: vmerge.vvm v14, v8, v10, v0
; ZVFH-NEXT: vmv1r.v v0, v12
; ZVFH-NEXT: vmerge.vvm v8, v10, v8, v0
; ZVFH-NEXT: vfmin.vv v8, v8, v14
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfmin_vv_v16f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v12, v12
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16
; ZVFHMIN-NEXT: vmerge.vvm v20, v12, v16, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v12, v0
; ZVFHMIN-NEXT: vfmin.vv v12, v8, v20
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
%v = call <16 x half> @llvm.vp.minimum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x half> %v
}
declare <2 x float> @llvm.vp.minimum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32)
define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmfeq.vv v0, v9, v9, v0.t
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfmin.vv v8, v8, v11, v0.t
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.minimum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
ret <2 x float> %v
}
define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
; CHECK-NEXT: vmfeq.vv v10, v9, v9
; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v11
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.minimum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x float> %v
}
declare <4 x float> @llvm.vp.minimum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmfeq.vv v0, v9, v9, v0.t
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfmin.vv v8, v8, v11, v0.t
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.minimum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
ret <4 x float> %v
}
define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
; CHECK-NEXT: vmfeq.vv v10, v9, v9
; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v11
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.minimum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x float> %v
}
declare <8 x float> @llvm.vp.minimum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfeq.vv v13, v10, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfmin.vv v8, v8, v14, v0.t
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.minimum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
ret <8 x float> %v
}
define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
; CHECK-NEXT: vmfeq.vv v12, v10, v10
; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v14
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.minimum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x float> %v
}
declare <16 x float> @llvm.vp.minimum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32)
define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v17
; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmfeq.vv v17, v12, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v17
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfmin.vv v8, v8, v20, v0.t
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.minimum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
ret <16 x float> %v
}
define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
; CHECK-NEXT: vmfeq.vv v16, v12, v12
; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v20
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.minimum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x float> %v
}
declare <2 x double> @llvm.vp.minimum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32)
define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmfeq.vv v0, v9, v9, v0.t
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfmin.vv v8, v8, v11, v0.t
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.minimum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
ret <2 x double> %v
}
define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
; CHECK-NEXT: vmfeq.vv v10, v9, v9
; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v11
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.minimum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}
declare <4 x double> @llvm.vp.minimum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32)
define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfeq.vv v13, v10, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfmin.vv v8, v8, v14, v0.t
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.minimum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
ret <4 x double> %v
}
define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
; CHECK-NEXT: vmfeq.vv v12, v10, v10
; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v14
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.minimum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}
declare <8 x double> @llvm.vp.minimum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v17
; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmfeq.vv v17, v12, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v17
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfmin.vv v8, v8, v20, v0.t
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.minimum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
ret <8 x double> %v
}
define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
; CHECK-NEXT: vmfeq.vv v16, v12, v12
; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v20
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.minimum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}
declare <16 x double> @llvm.vp.minimum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32)
define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.minimum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
ret <16 x double> %v
}
define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %vb, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v24
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.minimum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}
declare <32 x double> @llvm.vp.minimum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32)
define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v32f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB24_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v26, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vmfeq.vv v26, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v24, v16, v8, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v16, v16, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.minimum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
}
define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %vb, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v32f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmv8r.v v16, v24
; CHECK-NEXT: vmerge.vvm v24, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v8, v8, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v7, v8, v8
; CHECK-NEXT: vmerge.vvm v24, v16, v8, v0
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: vfmin.vv v16, v16, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.minimum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}