llvm/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64d.ll

; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \
; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN:   | FileCheck -check-prefix=RV64I %s

; This file contains tests that should have identical output for the lp64 and
; lp64f ABIs.

define i64 @callee_double_in_regs(i64 %a, double %b) nounwind {
  ; RV64I-LABEL: name: callee_double_in_regs
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   liveins: $x10, $f10_d
  ; RV64I-NEXT: {{  $}}
  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
  ; RV64I-NEXT:   [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY1]](s64)
  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[FPTOSI]]
  ; RV64I-NEXT:   $x10 = COPY [[ADD]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $x10
  %b_fptosi = fptosi double %b to i64
  %1 = add i64 %a, %b_fptosi
  ret i64 %1
}

define i64 @caller_double_in_regs() nounwind {
  ; RV64I-LABEL: name: caller_double_in_regs
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
  ; RV64I-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   $x10 = COPY [[C]](s64)
  ; RV64I-NEXT:   $f10_d = COPY [[C1]](s64)
  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_in_regs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $f10_d, implicit-def $x10
  ; RV64I-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
  ; RV64I-NEXT:   $x10 = COPY [[COPY]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $x10
  %1 = call i64 @callee_double_in_regs(i64 1, double 2.0)
  ret i64 %1
}

; Must keep define on a single line due to an update_llc_test_checks.py limitation
define i64 @callee_double_in_fpr_exhausted_gprs(i128 %a, i128 %b, i128 %c, i128 %d, i64 %e, double %f) nounwind {
  ; RV64I-LABEL: name: callee_double_in_fpr_exhausted_gprs
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_d
  ; RV64I-NEXT: {{  $}}
  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
  ; RV64I-NEXT:   [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
  ; RV64I-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
  ; RV64I-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
  ; RV64I-NEXT:   [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64)
  ; RV64I-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
  ; RV64I-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
  ; RV64I-NEXT:   [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY4]](s64), [[COPY5]](s64)
  ; RV64I-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
  ; RV64I-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
  ; RV64I-NEXT:   [[MV3:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY6]](s64), [[COPY7]](s64)
  ; RV64I-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
  ; RV64I-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %fixed-stack.0, align 16)
  ; RV64I-NEXT:   [[COPY8:%[0-9]+]]:_(s64) = COPY $f10_d
  ; RV64I-NEXT:   [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY8]](s64)
  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[FPTOSI]]
  ; RV64I-NEXT:   $x10 = COPY [[ADD]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $x10
  %f_fptosi = fptosi double %f to i64
  %1 = add i64 %e, %f_fptosi
  ret i64 %1
}

define i64 @caller_double_in_fpr_exhausted_gprs() nounwind {
  ; RV64I-LABEL: name: caller_double_in_fpr_exhausted_gprs
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 1
  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 2
  ; RV64I-NEXT:   [[C2:%[0-9]+]]:_(s128) = G_CONSTANT i128 3
  ; RV64I-NEXT:   [[C3:%[0-9]+]]:_(s128) = G_CONSTANT i128 4
  ; RV64I-NEXT:   [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
  ; RV64I-NEXT:   [[C5:%[0-9]+]]:_(s64) = G_FCONSTANT double 6.000000e+00
  ; RV64I-NEXT:   ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C]](s128)
  ; RV64I-NEXT:   [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C1]](s128)
  ; RV64I-NEXT:   [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C2]](s128)
  ; RV64I-NEXT:   [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C3]](s128)
  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x2
  ; RV64I-NEXT:   [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
  ; RV64I-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
  ; RV64I-NEXT:   G_STORE [[C4]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
  ; RV64I-NEXT:   $x10 = COPY [[UV]](s64)
  ; RV64I-NEXT:   $x11 = COPY [[UV1]](s64)
  ; RV64I-NEXT:   $x12 = COPY [[UV2]](s64)
  ; RV64I-NEXT:   $x13 = COPY [[UV3]](s64)
  ; RV64I-NEXT:   $x14 = COPY [[UV4]](s64)
  ; RV64I-NEXT:   $x15 = COPY [[UV5]](s64)
  ; RV64I-NEXT:   $x16 = COPY [[UV6]](s64)
  ; RV64I-NEXT:   $x17 = COPY [[UV7]](s64)
  ; RV64I-NEXT:   $f10_d = COPY [[C5]](s64)
  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_in_fpr_exhausted_gprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_d, implicit-def $x10
  ; RV64I-NEXT:   ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
  ; RV64I-NEXT:   $x10 = COPY [[COPY1]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $x10
  %1 = call i64 @callee_double_in_fpr_exhausted_gprs(
      i128 1, i128 2, i128 3, i128 4, i64 5, double 6.0)
  ret i64 %1
}

; Must keep define on a single line due to an update_llc_test_checks.py limitation
define i32 @callee_double_in_gpr_exhausted_fprs(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i) nounwind {
  ; RV64I-LABEL: name: callee_double_in_gpr_exhausted_fprs
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d, $f14_d, $f15_d, $f16_d, $f17_d
  ; RV64I-NEXT: {{  $}}
  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $f11_d
  ; RV64I-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $f12_d
  ; RV64I-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $f13_d
  ; RV64I-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $f14_d
  ; RV64I-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $f15_d
  ; RV64I-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $f16_d
  ; RV64I-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $f17_d
  ; RV64I-NEXT:   [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
  ; RV64I-NEXT:   [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY7]](s64)
  ; RV64I-NEXT:   [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY8]](s64)
  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[FPTOSI]], [[FPTOSI1]]
  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32)
  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $x10
  %h_fptosi = fptosi double %h to i32
  %i_fptosi = fptosi double %i to i32
  %1 = add i32 %h_fptosi, %i_fptosi
  ret i32 %1
}

define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
  ; RV64I-LABEL: name: caller_double_in_gpr_exhausted_fprs
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
  ; RV64I-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 3.000000e+00
  ; RV64I-NEXT:   [[C3:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
  ; RV64I-NEXT:   [[C4:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e+00
  ; RV64I-NEXT:   [[C5:%[0-9]+]]:_(s64) = G_FCONSTANT double 6.000000e+00
  ; RV64I-NEXT:   [[C6:%[0-9]+]]:_(s64) = G_FCONSTANT double 7.000000e+00
  ; RV64I-NEXT:   [[C7:%[0-9]+]]:_(s64) = G_FCONSTANT double 8.000000e+00
  ; RV64I-NEXT:   [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 9.000000e+00
  ; RV64I-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   $f10_d = COPY [[C]](s64)
  ; RV64I-NEXT:   $f11_d = COPY [[C1]](s64)
  ; RV64I-NEXT:   $f12_d = COPY [[C2]](s64)
  ; RV64I-NEXT:   $f13_d = COPY [[C3]](s64)
  ; RV64I-NEXT:   $f14_d = COPY [[C4]](s64)
  ; RV64I-NEXT:   $f15_d = COPY [[C5]](s64)
  ; RV64I-NEXT:   $f16_d = COPY [[C6]](s64)
  ; RV64I-NEXT:   $f17_d = COPY [[C7]](s64)
  ; RV64I-NEXT:   $x10 = COPY [[C8]](s64)
  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_in_gpr_exhausted_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit $f11_d, implicit $f12_d, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit $x10, implicit-def $x10
  ; RV64I-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
  ; RV64I-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $x10
  %1 = call i32 @callee_double_in_gpr_exhausted_fprs(
      double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0,
      double 7.0, double 8.0, double 9.0)
  ret i32 %1
}

; Must keep define on a single line due to an update_llc_test_checks.py limitation
define i64 @callee_double_on_stack_exhausted_gprs_fprs(i128 %a, double %b, i128 %c, double %d, i128 %e, double %f, i128 %g, double %h, double %i, double %j, double %k, double %l, double %m) nounwind {
  ; RV64I-LABEL: name: callee_double_on_stack_exhausted_gprs_fprs
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_d, $f11_d, $f12_d, $f13_d, $f14_d, $f15_d, $f16_d, $f17_d
  ; RV64I-NEXT: {{  $}}
  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
  ; RV64I-NEXT:   [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
  ; RV64I-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $f10_d
  ; RV64I-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x12
  ; RV64I-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x13
  ; RV64I-NEXT:   [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY3]](s64), [[COPY4]](s64)
  ; RV64I-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $f11_d
  ; RV64I-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x14
  ; RV64I-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x15
  ; RV64I-NEXT:   [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY6]](s64), [[COPY7]](s64)
  ; RV64I-NEXT:   [[COPY8:%[0-9]+]]:_(s64) = COPY $f12_d
  ; RV64I-NEXT:   [[COPY9:%[0-9]+]]:_(s64) = COPY $x16
  ; RV64I-NEXT:   [[COPY10:%[0-9]+]]:_(s64) = COPY $x17
  ; RV64I-NEXT:   [[MV3:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY9]](s64), [[COPY10]](s64)
  ; RV64I-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY $f13_d
  ; RV64I-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY $f14_d
  ; RV64I-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY $f15_d
  ; RV64I-NEXT:   [[COPY14:%[0-9]+]]:_(s64) = COPY $f16_d
  ; RV64I-NEXT:   [[COPY15:%[0-9]+]]:_(s64) = COPY $f17_d
  ; RV64I-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
  ; RV64I-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %fixed-stack.0, align 16)
  ; RV64I-NEXT:   [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[MV3]](s128)
  ; RV64I-NEXT:   [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[LOAD]](s64)
  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[TRUNC]], [[FPTOSI]]
  ; RV64I-NEXT:   $x10 = COPY [[ADD]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $x10
  %g_trunc = trunc i128 %g to i64
  %m_fptosi = fptosi double %m to i64
  %1 = add i64 %g_trunc, %m_fptosi
  ret i64 %1
}

define i64 @caller_double_on_stack_exhausted_gprs_fprs() nounwind {
  ; RV64I-LABEL: name: caller_double_on_stack_exhausted_gprs_fprs
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 1
  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
  ; RV64I-NEXT:   [[C2:%[0-9]+]]:_(s128) = G_CONSTANT i128 3
  ; RV64I-NEXT:   [[C3:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
  ; RV64I-NEXT:   [[C4:%[0-9]+]]:_(s128) = G_CONSTANT i128 5
  ; RV64I-NEXT:   [[C5:%[0-9]+]]:_(s64) = G_FCONSTANT double 6.000000e+00
  ; RV64I-NEXT:   [[C6:%[0-9]+]]:_(s128) = G_CONSTANT i128 7
  ; RV64I-NEXT:   [[C7:%[0-9]+]]:_(s64) = G_FCONSTANT double 8.000000e+00
  ; RV64I-NEXT:   [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 9.000000e+00
  ; RV64I-NEXT:   [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+01
  ; RV64I-NEXT:   [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.100000e+01
  ; RV64I-NEXT:   [[C11:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.200000e+01
  ; RV64I-NEXT:   [[C12:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.300000e+01
  ; RV64I-NEXT:   ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C]](s128)
  ; RV64I-NEXT:   [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C2]](s128)
  ; RV64I-NEXT:   [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C4]](s128)
  ; RV64I-NEXT:   [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C6]](s128)
  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x2
  ; RV64I-NEXT:   [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
  ; RV64I-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C13]](s64)
  ; RV64I-NEXT:   G_STORE [[C12]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
  ; RV64I-NEXT:   $x10 = COPY [[UV]](s64)
  ; RV64I-NEXT:   $x11 = COPY [[UV1]](s64)
  ; RV64I-NEXT:   $f10_d = COPY [[C1]](s64)
  ; RV64I-NEXT:   $x12 = COPY [[UV2]](s64)
  ; RV64I-NEXT:   $x13 = COPY [[UV3]](s64)
  ; RV64I-NEXT:   $f11_d = COPY [[C3]](s64)
  ; RV64I-NEXT:   $x14 = COPY [[UV4]](s64)
  ; RV64I-NEXT:   $x15 = COPY [[UV5]](s64)
  ; RV64I-NEXT:   $f12_d = COPY [[C5]](s64)
  ; RV64I-NEXT:   $x16 = COPY [[UV6]](s64)
  ; RV64I-NEXT:   $x17 = COPY [[UV7]](s64)
  ; RV64I-NEXT:   $f13_d = COPY [[C7]](s64)
  ; RV64I-NEXT:   $f14_d = COPY [[C8]](s64)
  ; RV64I-NEXT:   $f15_d = COPY [[C9]](s64)
  ; RV64I-NEXT:   $f16_d = COPY [[C10]](s64)
  ; RV64I-NEXT:   $f17_d = COPY [[C11]](s64)
  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_on_stack_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_d, implicit $x12, implicit $x13, implicit $f11_d, implicit $x14, implicit $x15, implicit $f12_d, implicit $x16, implicit $x17, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit-def $x10
  ; RV64I-NEXT:   ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
  ; RV64I-NEXT:   $x10 = COPY [[COPY1]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $x10
  %1 = call i64 @callee_double_on_stack_exhausted_gprs_fprs(
      i128 1, double 2.0, i128 3, double 4.0, i128 5, double 6.0, i128 7, double 8.0,
      double 9.0, double 10.0, double 11.0, double 12.0, double 13.0)
  ret i64 %1
}

define double @callee_double_ret() nounwind {
  ; RV64I-LABEL: name: callee_double_ret
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
  ; RV64I-NEXT:   $f10_d = COPY [[C]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $f10_d
  ret double 1.0
}

define i64 @caller_double_ret() nounwind {
  ; RV64I-LABEL: name: caller_double_ret
  ; RV64I: bb.1 (%ir-block.0):
  ; RV64I-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $f10_d
  ; RV64I-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
  ; RV64I-NEXT:   $x10 = COPY [[COPY]](s64)
  ; RV64I-NEXT:   PseudoRET implicit $x10
  %1 = call double @callee_double_ret()
  %2 = bitcast double %1 to i64
  ret i64 %2
}