; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+f -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IF %s
; RUN: llc -mtriple=riscv32 -mattr=+zfh -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IZFH %s
; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+f -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IF %s
; RUN: llc -mtriple=riscv64 -mattr=+zfh -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IZFH %s
define half @callee_half_in_regs(half %x) nounwind {
; RV32I-LABEL: name: callee_half_in_regs
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: callee_half_in_regs
; RV32IF: bb.1 (%ir-block.0):
; RV32IF-NEXT: liveins: $f10_f
; RV32IF-NEXT: {{ $}}
; RV32IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: callee_half_in_regs
; RV32IZFH: bb.1 (%ir-block.0):
; RV32IZFH-NEXT: liveins: $f10_h
; RV32IZFH-NEXT: {{ $}}
; RV32IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
;
; RV64I-LABEL: name: callee_half_in_regs
; RV64I: bb.1 (%ir-block.0):
; RV64I-NEXT: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: callee_half_in_regs
; RV64IF: bb.1 (%ir-block.0):
; RV64IF-NEXT: liveins: $f10_f
; RV64IF-NEXT: {{ $}}
; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: callee_half_in_regs
; RV64IZFH: bb.1 (%ir-block.0):
; RV64IZFH-NEXT: liveins: $f10_h
; RV64IZFH-NEXT: {{ $}}
; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
; RV64IZFH-NEXT: PseudoRET implicit $f10_h
ret half %x
}
define half @caller_half_in_regs(half %x) nounwind {
; RV32I-LABEL: name: caller_half_in_regs
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @caller_half_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT1]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: caller_half_in_regs
; RV32IF: bb.1 (%ir-block.0):
; RV32IF-NEXT: liveins: $f10_f
; RV32IF-NEXT: {{ $}}
; RV32IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: PseudoCALL target-flags(riscv-call) @caller_half_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
; RV32IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT1]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: caller_half_in_regs
; RV32IZFH: bb.1 (%ir-block.0):
; RV32IZFH-NEXT: liveins: $f10_h
; RV32IZFH-NEXT: {{ $}}
; RV32IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
; RV32IZFH-NEXT: PseudoCALL target-flags(riscv-call) @caller_half_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_h, implicit-def $f10_h
; RV32IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
;
; RV64I-LABEL: name: caller_half_in_regs
; RV64I: bb.1 (%ir-block.0):
; RV64I-NEXT: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @caller_half_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT1]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: caller_half_in_regs
; RV64IF: bb.1 (%ir-block.0):
; RV64IF-NEXT: liveins: $f10_f
; RV64IF-NEXT: {{ $}}
; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV64IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: PseudoCALL target-flags(riscv-call) @caller_half_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
; RV64IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV64IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT1]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: caller_half_in_regs
; RV64IZFH: bb.1 (%ir-block.0):
; RV64IZFH-NEXT: liveins: $f10_h
; RV64IZFH-NEXT: {{ $}}
; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
; RV64IZFH-NEXT: PseudoCALL target-flags(riscv-call) @caller_half_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_h, implicit-def $f10_h
; RV64IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
; RV64IZFH-NEXT: PseudoRET implicit $f10_h
%y = call half @caller_half_in_regs(half %x)
ret half %y
}
define half @callee_half_mixed_with_int(i32 %x0, half %x) nounwind {
; RV32I-LABEL: name: callee_half_mixed_with_int
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10, $x11
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: callee_half_mixed_with_int
; RV32IF: bb.1 (%ir-block.0):
; RV32IF-NEXT: liveins: $x10, $f10_f
; RV32IF-NEXT: {{ $}}
; RV32IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: callee_half_mixed_with_int
; RV32IZFH: bb.1 (%ir-block.0):
; RV32IZFH-NEXT: liveins: $x10, $f10_h
; RV32IZFH-NEXT: {{ $}}
; RV32IZFH-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
;
; RV64I-LABEL: name: callee_half_mixed_with_int
; RV64I: bb.1 (%ir-block.0):
; RV64I-NEXT: liveins: $x10, $x11
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: callee_half_mixed_with_int
; RV64IF: bb.1 (%ir-block.0):
; RV64IF-NEXT: liveins: $x10, $f10_f
; RV64IF-NEXT: {{ $}}
; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: callee_half_mixed_with_int
; RV64IZFH: bb.1 (%ir-block.0):
; RV64IZFH-NEXT: liveins: $x10, $f10_h
; RV64IZFH-NEXT: {{ $}}
; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64IZFH-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
; RV64IZFH-NEXT: PseudoRET implicit $f10_h
ret half %x
}
define half @caller_half_mixed_with_int(half %x, i32 %x0) nounwind {
; RV32I-LABEL: name: caller_half_mixed_with_int
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10, $x11
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32I-NEXT: $x10 = COPY [[COPY1]](s32)
; RV32I-NEXT: $x11 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_mixed_with_int, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; RV32I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT1]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: caller_half_mixed_with_int
; RV32IF: bb.1 (%ir-block.0):
; RV32IF-NEXT: liveins: $x10, $f10_f
; RV32IF-NEXT: {{ $}}
; RV32IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32IF-NEXT: $x10 = COPY [[COPY1]](s32)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_mixed_with_int, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $f10_f, implicit-def $f10_f
; RV32IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; RV32IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT1]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: caller_half_mixed_with_int
; RV32IZFH: bb.1 (%ir-block.0):
; RV32IZFH-NEXT: liveins: $x10, $f10_h
; RV32IZFH-NEXT: {{ $}}
; RV32IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IZFH-NEXT: $x10 = COPY [[COPY1]](s32)
; RV32IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
; RV32IZFH-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_mixed_with_int, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $f10_h, implicit-def $f10_h
; RV32IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY2]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
;
; RV64I-LABEL: name: caller_half_mixed_with_int
; RV64I: bb.1 (%ir-block.0):
; RV64I-NEXT: liveins: $x10, $x11
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s32)
; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_mixed_with_int, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s64)
; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC2]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT2]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: caller_half_mixed_with_int
; RV64IF: bb.1 (%ir-block.0):
; RV64IF-NEXT: liveins: $x10, $f10_f
; RV64IF-NEXT: {{ $}}
; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s32)
; RV64IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV64IF-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT1]](s32)
; RV64IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_mixed_with_int, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $f10_f, implicit-def $f10_f
; RV64IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; RV64IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT2]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: caller_half_mixed_with_int
; RV64IZFH: bb.1 (%ir-block.0):
; RV64IZFH-NEXT: liveins: $x10, $f10_h
; RV64IZFH-NEXT: {{ $}}
; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
; RV64IZFH-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IZFH-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
; RV64IZFH-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
; RV64IZFH-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_mixed_with_int, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $f10_h, implicit-def $f10_h
; RV64IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: $f10_h = COPY [[COPY2]](s16)
; RV64IZFH-NEXT: PseudoRET implicit $f10_h
%y = call half @callee_half_mixed_with_int(i32 %x0, half %x)
ret half %y
}
define half @callee_half_return_stack1(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, i32 %v6, i32 %v7, i32 %v8, half %x) nounwind {
; RV32I-LABEL: name: callee_half_return_stack1
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; RV32I-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
; RV32I-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32I-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32I-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; RV32I-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %fixed-stack.0, align 16)
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: callee_half_return_stack1
; RV32IF: bb.1 (%ir-block.0):
; RV32IF-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_f
; RV32IF-NEXT: {{ $}}
; RV32IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; RV32IF-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; RV32IF-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
; RV32IF-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32IF-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32IF-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32IF-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: callee_half_return_stack1
; RV32IZFH: bb.1 (%ir-block.0):
; RV32IZFH-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_h
; RV32IZFH-NEXT: {{ $}}
; RV32IZFH-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; RV32IZFH-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; RV32IZFH-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
; RV32IZFH-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32IZFH-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32IZFH-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32IZFH-NEXT: [[COPY8:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY8]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
;
; RV64I-LABEL: name: callee_half_return_stack1
; RV64I: bb.1 (%ir-block.0):
; RV64I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
; RV64I-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY3]](s64)
; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
; RV64I-NEXT: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
; RV64I-NEXT: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64I-NEXT: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY6]](s64)
; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64I-NEXT: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY7]](s64)
; RV64I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %fixed-stack.0, align 16)
; RV64I-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s64)
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC8]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: callee_half_return_stack1
; RV64IF: bb.1 (%ir-block.0):
; RV64IF-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_f
; RV64IF-NEXT: {{ $}}
; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; RV64IF-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
; RV64IF-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY3]](s64)
; RV64IF-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
; RV64IF-NEXT: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
; RV64IF-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
; RV64IF-NEXT: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
; RV64IF-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64IF-NEXT: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY6]](s64)
; RV64IF-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64IF-NEXT: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY7]](s64)
; RV64IF-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC8]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: callee_half_return_stack1
; RV64IZFH: bb.1 (%ir-block.0):
; RV64IZFH-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_h
; RV64IZFH-NEXT: {{ $}}
; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64IZFH-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64IZFH-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
; RV64IZFH-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; RV64IZFH-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
; RV64IZFH-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY3]](s64)
; RV64IZFH-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
; RV64IZFH-NEXT: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
; RV64IZFH-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
; RV64IZFH-NEXT: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
; RV64IZFH-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64IZFH-NEXT: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY6]](s64)
; RV64IZFH-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64IZFH-NEXT: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY7]](s64)
; RV64IZFH-NEXT: [[COPY8:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: $f10_h = COPY [[COPY8]](s16)
; RV64IZFH-NEXT: PseudoRET implicit $f10_h
ret half %x
}
define half @caller_half_return_stack1(i32 %v1, half %x) nounwind {
; RV32I-LABEL: name: caller_half_return_stack1
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10, $x11
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV32I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
; RV32I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32I-NEXT: ADJCALLSTACKDOWN 4, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C7]](s32)
; RV32I-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 16)
; RV32I-NEXT: $x10 = COPY [[C]](s32)
; RV32I-NEXT: $x11 = COPY [[C1]](s32)
; RV32I-NEXT: $x12 = COPY [[C2]](s32)
; RV32I-NEXT: $x13 = COPY [[COPY]](s32)
; RV32I-NEXT: $x14 = COPY [[C3]](s32)
; RV32I-NEXT: $x15 = COPY [[C4]](s32)
; RV32I-NEXT: $x16 = COPY [[C5]](s32)
; RV32I-NEXT: $x17 = COPY [[C6]](s32)
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; RV32I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT1]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: caller_half_return_stack1
; RV32IF: bb.1 (%ir-block.0):
; RV32IF-NEXT: liveins: $x10, $f10_f
; RV32IF-NEXT: {{ $}}
; RV32IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32IF-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32IF-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32IF-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV32IF-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
; RV32IF-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; RV32IF-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
; RV32IF-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32IF-NEXT: $x10 = COPY [[C]](s32)
; RV32IF-NEXT: $x11 = COPY [[C1]](s32)
; RV32IF-NEXT: $x12 = COPY [[C2]](s32)
; RV32IF-NEXT: $x13 = COPY [[COPY]](s32)
; RV32IF-NEXT: $x14 = COPY [[C3]](s32)
; RV32IF-NEXT: $x15 = COPY [[C4]](s32)
; RV32IF-NEXT: $x16 = COPY [[C5]](s32)
; RV32IF-NEXT: $x17 = COPY [[C6]](s32)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_f, implicit-def $f10_f
; RV32IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; RV32IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT1]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: caller_half_return_stack1
; RV32IZFH: bb.1 (%ir-block.0):
; RV32IZFH-NEXT: liveins: $x10, $f10_h
; RV32IZFH-NEXT: {{ $}}
; RV32IZFH-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32IZFH-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32IZFH-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV32IZFH-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
; RV32IZFH-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; RV32IZFH-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
; RV32IZFH-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IZFH-NEXT: $x10 = COPY [[C]](s32)
; RV32IZFH-NEXT: $x11 = COPY [[C1]](s32)
; RV32IZFH-NEXT: $x12 = COPY [[C2]](s32)
; RV32IZFH-NEXT: $x13 = COPY [[COPY]](s32)
; RV32IZFH-NEXT: $x14 = COPY [[C3]](s32)
; RV32IZFH-NEXT: $x15 = COPY [[C4]](s32)
; RV32IZFH-NEXT: $x16 = COPY [[C5]](s32)
; RV32IZFH-NEXT: $x17 = COPY [[C6]](s32)
; RV32IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
; RV32IZFH-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_h, implicit-def $f10_h
; RV32IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY2]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
;
; RV64I-LABEL: name: caller_half_return_stack1
; RV64I: bb.1 (%ir-block.0):
; RV64I-NEXT: liveins: $x10, $x11
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV64I-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
; RV64I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C7]](s64)
; RV64I-NEXT: G_STORE [[ANYEXT8]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
; RV64I-NEXT: $x12 = COPY [[ANYEXT2]](s64)
; RV64I-NEXT: $x13 = COPY [[ANYEXT3]](s64)
; RV64I-NEXT: $x14 = COPY [[ANYEXT4]](s64)
; RV64I-NEXT: $x15 = COPY [[ANYEXT5]](s64)
; RV64I-NEXT: $x16 = COPY [[ANYEXT6]](s64)
; RV64I-NEXT: $x17 = COPY [[ANYEXT7]](s64)
; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s64)
; RV64I-NEXT: [[ANYEXT9:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC2]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT9]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: caller_half_return_stack1
; RV64IF: bb.1 (%ir-block.0):
; RV64IF-NEXT: liveins: $x10, $f10_f
; RV64IF-NEXT: {{ $}}
; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV64IF-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64IF-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV64IF-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV64IF-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
; RV64IF-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; RV64IF-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
; RV64IF-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV64IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
; RV64IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64IF-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
; RV64IF-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
; RV64IF-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
; RV64IF-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV64IF-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64IF-NEXT: $x11 = COPY [[ANYEXT1]](s64)
; RV64IF-NEXT: $x12 = COPY [[ANYEXT2]](s64)
; RV64IF-NEXT: $x13 = COPY [[ANYEXT3]](s64)
; RV64IF-NEXT: $x14 = COPY [[ANYEXT4]](s64)
; RV64IF-NEXT: $x15 = COPY [[ANYEXT5]](s64)
; RV64IF-NEXT: $x16 = COPY [[ANYEXT6]](s64)
; RV64IF-NEXT: $x17 = COPY [[ANYEXT7]](s64)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT8]](s32)
; RV64IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_f, implicit-def $f10_f
; RV64IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; RV64IF-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT9]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: caller_half_return_stack1
; RV64IZFH: bb.1 (%ir-block.0):
; RV64IZFH-NEXT: liveins: $x10, $f10_h
; RV64IZFH-NEXT: {{ $}}
; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64IZFH-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64IZFH-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV64IZFH-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV64IZFH-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
; RV64IZFH-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; RV64IZFH-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
; RV64IZFH-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV64IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IZFH-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64IZFH-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64IZFH-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64IZFH-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
; RV64IZFH-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64IZFH-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
; RV64IZFH-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
; RV64IZFH-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
; RV64IZFH-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64IZFH-NEXT: $x11 = COPY [[ANYEXT1]](s64)
; RV64IZFH-NEXT: $x12 = COPY [[ANYEXT2]](s64)
; RV64IZFH-NEXT: $x13 = COPY [[ANYEXT3]](s64)
; RV64IZFH-NEXT: $x14 = COPY [[ANYEXT4]](s64)
; RV64IZFH-NEXT: $x15 = COPY [[ANYEXT5]](s64)
; RV64IZFH-NEXT: $x16 = COPY [[ANYEXT6]](s64)
; RV64IZFH-NEXT: $x17 = COPY [[ANYEXT7]](s64)
; RV64IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
; RV64IZFH-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_h, implicit-def $f10_h
; RV64IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: $f10_h = COPY [[COPY2]](s16)
; RV64IZFH-NEXT: PseudoRET implicit $f10_h
%y = call half @callee_half_return_stack1(i32 0, i32 1, i32 2, i32 %v1, i32 5, i32 6, i32 7, i32 8, half %x)
ret half %y
}
define half @callee_half_return_stack2(half %v1, half %v2, half %v3, half %v4, half %v5, half %v6, half %v7, half %v8, half %x) nounwind {
; RV32I-LABEL: name: callee_half_return_stack2
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; RV32I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; RV32I-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; RV32I-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
; RV32I-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; RV32I-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32I-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; RV32I-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32I-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; RV32I-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32I-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
; RV32I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; RV32I-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %fixed-stack.0, align 16)
; RV32I-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC8]](s16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: callee_half_return_stack2
; RV32IF: bb.1 (%ir-block.0):
; RV32IF-NEXT: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f, $f14_f, $f15_f, $f16_f, $f17_f
; RV32IF-NEXT: {{ $}}
; RV32IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f11_f
; RV32IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f12_f
; RV32IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; RV32IF-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $f13_f
; RV32IF-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; RV32IF-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $f14_f
; RV32IF-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; RV32IF-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $f15_f
; RV32IF-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; RV32IF-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $f16_f
; RV32IF-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; RV32IF-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $f17_f
; RV32IF-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
; RV32IF-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
; RV32IF-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC8]](s16)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: callee_half_return_stack2
; RV32IZFH: bb.1 (%ir-block.0):
; RV32IZFH-NEXT: liveins: $x10, $f10_h, $f11_h, $f12_h, $f13_h, $f14_h, $f15_h, $f16_h, $f17_h
; RV32IZFH-NEXT: {{ $}}
; RV32IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f11_h
; RV32IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f12_h
; RV32IZFH-NEXT: [[COPY3:%[0-9]+]]:_(s16) = COPY $f13_h
; RV32IZFH-NEXT: [[COPY4:%[0-9]+]]:_(s16) = COPY $f14_h
; RV32IZFH-NEXT: [[COPY5:%[0-9]+]]:_(s16) = COPY $f15_h
; RV32IZFH-NEXT: [[COPY6:%[0-9]+]]:_(s16) = COPY $f16_h
; RV32IZFH-NEXT: [[COPY7:%[0-9]+]]:_(s16) = COPY $f17_h
; RV32IZFH-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
; RV32IZFH-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; RV32IZFH-NEXT: $f10_h = COPY [[TRUNC]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
;
; RV64I-LABEL: name: callee_half_return_stack2
; RV64I: bb.1 (%ir-block.0):
; RV64I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s64)
; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
; RV64I-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s64)
; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
; RV64I-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s64)
; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
; RV64I-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s64)
; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64I-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s64)
; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64I-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s64)
; RV64I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %fixed-stack.0, align 16)
; RV64I-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s64)
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC8]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: callee_half_return_stack2
; RV64IF: bb.1 (%ir-block.0):
; RV64IF-NEXT: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f, $f14_f, $f15_f, $f16_f, $f17_f
; RV64IF-NEXT: {{ $}}
; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f11_f
; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f12_f
; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; RV64IF-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $f13_f
; RV64IF-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; RV64IF-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $f14_f
; RV64IF-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; RV64IF-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $f15_f
; RV64IF-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; RV64IF-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $f16_f
; RV64IF-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; RV64IF-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $f17_f
; RV64IF-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
; RV64IF-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
; RV64IF-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s64)
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC8]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: callee_half_return_stack2
; RV64IZFH: bb.1 (%ir-block.0):
; RV64IZFH-NEXT: liveins: $x10, $f10_h, $f11_h, $f12_h, $f13_h, $f14_h, $f15_h, $f16_h, $f17_h
; RV64IZFH-NEXT: {{ $}}
; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f11_h
; RV64IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f12_h
; RV64IZFH-NEXT: [[COPY3:%[0-9]+]]:_(s16) = COPY $f13_h
; RV64IZFH-NEXT: [[COPY4:%[0-9]+]]:_(s16) = COPY $f14_h
; RV64IZFH-NEXT: [[COPY5:%[0-9]+]]:_(s16) = COPY $f15_h
; RV64IZFH-NEXT: [[COPY6:%[0-9]+]]:_(s16) = COPY $f16_h
; RV64IZFH-NEXT: [[COPY7:%[0-9]+]]:_(s16) = COPY $f17_h
; RV64IZFH-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
; RV64IZFH-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s64)
; RV64IZFH-NEXT: $f10_h = COPY [[TRUNC]](s16)
; RV64IZFH-NEXT: PseudoRET implicit $f10_h
ret half %x
}
define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV32I-LABEL: name: caller_half_return_stack2
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10, $x11
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32I-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4200
; RV32I-NEXT: ADJCALLSTACKDOWN 4, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV32I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV32I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV32I-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
; RV32I-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
; RV32I-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV32I-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x2
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C2]](s32)
; RV32I-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: $x11 = COPY [[ANYEXT1]](s32)
; RV32I-NEXT: $x12 = COPY [[COPY2]](s32)
; RV32I-NEXT: $x13 = COPY [[ANYEXT2]](s32)
; RV32I-NEXT: $x14 = COPY [[COPY3]](s32)
; RV32I-NEXT: $x15 = COPY [[ANYEXT3]](s32)
; RV32I-NEXT: $x16 = COPY [[COPY4]](s32)
; RV32I-NEXT: $x17 = COPY [[COPY5]](s32)
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; RV32I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT4]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: caller_half_return_stack2
; RV32IF: bb.1 (%ir-block.0):
; RV32IF-NEXT: liveins: $f10_f, $f11_f
; RV32IF-NEXT: {{ $}}
; RV32IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV32IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f11_f
; RV32IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32IF-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
; RV32IF-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4200
; RV32IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
; RV32IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
; RV32IF-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV32IF-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
; RV32IF-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
; RV32IF-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: $f11_f = COPY [[ANYEXT1]](s32)
; RV32IF-NEXT: $f12_f = COPY [[COPY2]](s32)
; RV32IF-NEXT: $f13_f = COPY [[ANYEXT2]](s32)
; RV32IF-NEXT: $f14_f = COPY [[COPY3]](s32)
; RV32IF-NEXT: $f15_f = COPY [[ANYEXT3]](s32)
; RV32IF-NEXT: $f16_f = COPY [[COPY4]](s32)
; RV32IF-NEXT: $f17_f = COPY [[COPY5]](s32)
; RV32IF-NEXT: $x10 = COPY [[COPY6]](s32)
; RV32IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $f10_f
; RV32IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
; RV32IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT4]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: caller_half_return_stack2
; RV32IZFH: bb.1 (%ir-block.0):
; RV32IZFH-NEXT: liveins: $f10_h, $f11_h
; RV32IZFH-NEXT: {{ $}}
; RV32IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f11_h
; RV32IZFH-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
; RV32IZFH-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4200
; RV32IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
; RV32IZFH-NEXT: $f11_h = COPY [[C]](s16)
; RV32IZFH-NEXT: $f12_h = COPY [[COPY]](s16)
; RV32IZFH-NEXT: $f13_h = COPY [[C1]](s16)
; RV32IZFH-NEXT: $f14_h = COPY [[COPY]](s16)
; RV32IZFH-NEXT: $f15_h = COPY [[COPY1]](s16)
; RV32IZFH-NEXT: $f16_h = COPY [[COPY1]](s16)
; RV32IZFH-NEXT: $f17_h = COPY [[COPY1]](s16)
; RV32IZFH-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
; RV32IZFH-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32IZFH-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_h, implicit $f11_h, implicit $f12_h, implicit $f13_h, implicit $f14_h, implicit $f15_h, implicit $f16_h, implicit $f17_h, implicit $x10, implicit-def $f10_h
; RV32IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY2]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
;
; RV64I-LABEL: name: caller_half_return_stack2
; RV64I: bb.1 (%ir-block.0):
; RV64I-NEXT: liveins: $x10, $x11
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
; RV64I-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
; RV64I-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4200
; RV64I-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s16)
; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s16)
; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ANYEXT3]](s64)
; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[ANYEXT3]](s64)
; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x2
; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C2]](s64)
; RV64I-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
; RV64I-NEXT: $x12 = COPY [[COPY2]](s64)
; RV64I-NEXT: $x13 = COPY [[ANYEXT2]](s64)
; RV64I-NEXT: $x14 = COPY [[COPY3]](s64)
; RV64I-NEXT: $x15 = COPY [[ANYEXT3]](s64)
; RV64I-NEXT: $x16 = COPY [[COPY4]](s64)
; RV64I-NEXT: $x17 = COPY [[COPY5]](s64)
; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s64)
; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC2]](s16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT4]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: caller_half_return_stack2
; RV64IF: bb.1 (%ir-block.0):
; RV64IF-NEXT: liveins: $f10_f, $f11_f
; RV64IF-NEXT: {{ $}}
; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f11_f
; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV64IF-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
; RV64IF-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4200
; RV64IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV64IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
; RV64IF-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV64IF-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
; RV64IF-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: $f11_f = COPY [[ANYEXT1]](s32)
; RV64IF-NEXT: $f12_f = COPY [[COPY2]](s32)
; RV64IF-NEXT: $f13_f = COPY [[ANYEXT2]](s32)
; RV64IF-NEXT: $f14_f = COPY [[COPY3]](s32)
; RV64IF-NEXT: $f15_f = COPY [[ANYEXT3]](s32)
; RV64IF-NEXT: $f16_f = COPY [[COPY4]](s32)
; RV64IF-NEXT: $f17_f = COPY [[COPY5]](s32)
; RV64IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
; RV64IF-NEXT: $x10 = COPY [[ANYEXT4]](s64)
; RV64IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $f10_f
; RV64IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; RV64IF-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT5]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: caller_half_return_stack2
; RV64IZFH: bb.1 (%ir-block.0):
; RV64IZFH-NEXT: liveins: $f10_h, $f11_h
; RV64IZFH-NEXT: {{ $}}
; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f11_h
; RV64IZFH-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
; RV64IZFH-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4200
; RV64IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
; RV64IZFH-NEXT: $f11_h = COPY [[C]](s16)
; RV64IZFH-NEXT: $f12_h = COPY [[COPY]](s16)
; RV64IZFH-NEXT: $f13_h = COPY [[C1]](s16)
; RV64IZFH-NEXT: $f14_h = COPY [[COPY]](s16)
; RV64IZFH-NEXT: $f15_h = COPY [[COPY1]](s16)
; RV64IZFH-NEXT: $f16_h = COPY [[COPY1]](s16)
; RV64IZFH-NEXT: $f17_h = COPY [[COPY1]](s16)
; RV64IZFH-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s16)
; RV64IZFH-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64IZFH-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_h, implicit $f11_h, implicit $f12_h, implicit $f13_h, implicit $f14_h, implicit $f15_h, implicit $f16_h, implicit $f17_h, implicit $x10, implicit-def $f10_h
; RV64IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
; RV64IZFH-NEXT: $f10_h = COPY [[COPY2]](s16)
; RV64IZFH-NEXT: PseudoRET implicit $f10_h
%z = call half @callee_half_return_stack2(half %x, half 1.0, half %x, half 3.0, half %x, half %y, half %y, half %y, half %x)
ret half %z
}