; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc < %s -mtriple=riscv64 -mattr=+v -target-abi=lp64d | FileCheck %s
define <4 x float> @foo(ptr %0) nounwind {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -48
; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; CHECK-NEXT: lhu s0, 0(a0)
; CHECK-NEXT: lhu s1, 2(a0)
; CHECK-NEXT: lhu s2, 4(a0)
; CHECK-NEXT: lhu a0, 6(a0)
; CHECK-NEXT: fmv.w.x fa0, a0
; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fsw fa0, 4(sp)
; CHECK-NEXT: fmv.w.x fa0, s2
; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fsw fa0, 12(sp)
; CHECK-NEXT: fmv.w.x fa0, s1
; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fsw fa0, 8(sp)
; CHECK-NEXT: fmv.w.x fa0, s0
; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fsw fa0, 0(sp)
; CHECK-NEXT: addi a0, sp, 4
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v9, (a0)
; CHECK-NEXT: addi a0, sp, 12
; CHECK-NEXT: vle32.v v10, (a0)
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vle32.v v11, (a0)
; CHECK-NEXT: mv a0, sp
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v10, v9, 1
; CHECK-NEXT: vslideup.vi v8, v11, 1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 48
; CHECK-NEXT: ret
%2 = load <4 x half>, ptr %0, align 2
%3 = fpext <4 x half> %2 to <4 x float>
ret <4 x float> %3
}