llvm/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN:   | FileCheck -check-prefix=RV32F %s
; RUN: llc -mtriple=riscv32 -mattr=+zfinx -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN:   | FileCheck -check-prefix=RV32ZFINX %s
; RUN: llc -mtriple=riscv32 -mattr=+f,+d -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN:   | FileCheck -check-prefix=RV32FD %s
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64 -verify-machineinstrs < %s \
; RUN:   | FileCheck -check-prefix=RV64F %s
; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi=lp64 -verify-machineinstrs < %s \
; RUN:   | FileCheck -check-prefix=RV64ZFINX %s
; RUN: llc -mtriple=riscv64 -mattr=+f,+d -target-abi=lp64 -verify-machineinstrs < %s \
; RUN:   | FileCheck -check-prefix=RV64FD %s

; These functions perform extra work to ensure that `%a3` starts in a
; floating-point register, if the machine has them, and the result of
; the bitwise operation is then needed in a floating-point register.
; This should mean the optimisations will fire even if you're using the
; soft-float ABI on a machine with hardware floating-point support.

define float @bitcast_and(float %a1, float %a2) nounwind {
; RV32F-LABEL: bitcast_and:
; RV32F:       # %bb.0:
; RV32F-NEXT:    fmv.w.x fa5, a1
; RV32F-NEXT:    fmv.w.x fa4, a0
; RV32F-NEXT:    fadd.s fa5, fa4, fa5
; RV32F-NEXT:    fabs.s fa5, fa5
; RV32F-NEXT:    fadd.s fa5, fa4, fa5
; RV32F-NEXT:    fmv.x.w a0, fa5
; RV32F-NEXT:    ret
;
; RV32ZFINX-LABEL: bitcast_and:
; RV32ZFINX:       # %bb.0:
; RV32ZFINX-NEXT:    fadd.s a1, a0, a1
; RV32ZFINX-NEXT:    fabs.s a1, a1
; RV32ZFINX-NEXT:    fadd.s a0, a0, a1
; RV32ZFINX-NEXT:    ret
;
; RV32FD-LABEL: bitcast_and:
; RV32FD:       # %bb.0:
; RV32FD-NEXT:    fmv.w.x fa5, a1
; RV32FD-NEXT:    fmv.w.x fa4, a0
; RV32FD-NEXT:    fadd.s fa5, fa4, fa5
; RV32FD-NEXT:    fabs.s fa5, fa5
; RV32FD-NEXT:    fadd.s fa5, fa4, fa5
; RV32FD-NEXT:    fmv.x.w a0, fa5
; RV32FD-NEXT:    ret
;
; RV64F-LABEL: bitcast_and:
; RV64F:       # %bb.0:
; RV64F-NEXT:    fmv.w.x fa5, a1
; RV64F-NEXT:    fmv.w.x fa4, a0
; RV64F-NEXT:    fadd.s fa5, fa4, fa5
; RV64F-NEXT:    fabs.s fa5, fa5
; RV64F-NEXT:    fadd.s fa5, fa4, fa5
; RV64F-NEXT:    fmv.x.w a0, fa5
; RV64F-NEXT:    ret
;
; RV64ZFINX-LABEL: bitcast_and:
; RV64ZFINX:       # %bb.0:
; RV64ZFINX-NEXT:    fadd.s a1, a0, a1
; RV64ZFINX-NEXT:    fabs.s a1, a1
; RV64ZFINX-NEXT:    fadd.s a0, a0, a1
; RV64ZFINX-NEXT:    ret
;
; RV64FD-LABEL: bitcast_and:
; RV64FD:       # %bb.0:
; RV64FD-NEXT:    fmv.w.x fa5, a1
; RV64FD-NEXT:    fmv.w.x fa4, a0
; RV64FD-NEXT:    fadd.s fa5, fa4, fa5
; RV64FD-NEXT:    fabs.s fa5, fa5
; RV64FD-NEXT:    fadd.s fa5, fa4, fa5
; RV64FD-NEXT:    fmv.x.w a0, fa5
; RV64FD-NEXT:    ret
  %a3 = fadd float %a1, %a2
  %bc1 = bitcast float %a3 to i32
  %and = and i32 %bc1, 2147483647
  %bc2 = bitcast i32 %and to float
  %a4 = fadd float %a1, %bc2
  ret float %a4
}

define double @bitcast_double_and(double %a1, double %a2) nounwind {
; RV32F-LABEL: bitcast_double_and:
; RV32F:       # %bb.0:
; RV32F-NEXT:    addi sp, sp, -16
; RV32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
; RV32F-NEXT:    mv s0, a1
; RV32F-NEXT:    mv s1, a0
; RV32F-NEXT:    call __adddf3
; RV32F-NEXT:    mv a2, a0
; RV32F-NEXT:    slli a1, a1, 1
; RV32F-NEXT:    srli a3, a1, 1
; RV32F-NEXT:    mv a0, s1
; RV32F-NEXT:    mv a1, s0
; RV32F-NEXT:    call __adddf3
; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
; RV32F-NEXT:    addi sp, sp, 16
; RV32F-NEXT:    ret
;
; RV32ZFINX-LABEL: bitcast_double_and:
; RV32ZFINX:       # %bb.0:
; RV32ZFINX-NEXT:    addi sp, sp, -16
; RV32ZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT:    mv s0, a1
; RV32ZFINX-NEXT:    mv s1, a0
; RV32ZFINX-NEXT:    call __adddf3
; RV32ZFINX-NEXT:    mv a2, a0
; RV32ZFINX-NEXT:    slli a1, a1, 1
; RV32ZFINX-NEXT:    srli a3, a1, 1
; RV32ZFINX-NEXT:    mv a0, s1
; RV32ZFINX-NEXT:    mv a1, s0
; RV32ZFINX-NEXT:    call __adddf3
; RV32ZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT:    addi sp, sp, 16
; RV32ZFINX-NEXT:    ret
;
; RV32FD-LABEL: bitcast_double_and:
; RV32FD:       # %bb.0:
; RV32FD-NEXT:    addi sp, sp, -16
; RV32FD-NEXT:    sw a2, 8(sp)
; RV32FD-NEXT:    sw a3, 12(sp)
; RV32FD-NEXT:    fld fa5, 8(sp)
; RV32FD-NEXT:    sw a0, 8(sp)
; RV32FD-NEXT:    sw a1, 12(sp)
; RV32FD-NEXT:    fld fa4, 8(sp)
; RV32FD-NEXT:    fadd.d fa5, fa4, fa5
; RV32FD-NEXT:    fabs.d fa5, fa5
; RV32FD-NEXT:    fadd.d fa5, fa4, fa5
; RV32FD-NEXT:    fsd fa5, 8(sp)
; RV32FD-NEXT:    lw a0, 8(sp)
; RV32FD-NEXT:    lw a1, 12(sp)
; RV32FD-NEXT:    addi sp, sp, 16
; RV32FD-NEXT:    ret
;
; RV64F-LABEL: bitcast_double_and:
; RV64F:       # %bb.0:
; RV64F-NEXT:    addi sp, sp, -16
; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64F-NEXT:    mv s0, a0
; RV64F-NEXT:    call __adddf3
; RV64F-NEXT:    slli a0, a0, 1
; RV64F-NEXT:    srli a1, a0, 1
; RV64F-NEXT:    mv a0, s0
; RV64F-NEXT:    call __adddf3
; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64F-NEXT:    addi sp, sp, 16
; RV64F-NEXT:    ret
;
; RV64ZFINX-LABEL: bitcast_double_and:
; RV64ZFINX:       # %bb.0:
; RV64ZFINX-NEXT:    addi sp, sp, -16
; RV64ZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT:    mv s0, a0
; RV64ZFINX-NEXT:    call __adddf3
; RV64ZFINX-NEXT:    slli a0, a0, 1
; RV64ZFINX-NEXT:    srli a1, a0, 1
; RV64ZFINX-NEXT:    mv a0, s0
; RV64ZFINX-NEXT:    call __adddf3
; RV64ZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT:    addi sp, sp, 16
; RV64ZFINX-NEXT:    ret
;
; RV64FD-LABEL: bitcast_double_and:
; RV64FD:       # %bb.0:
; RV64FD-NEXT:    fmv.d.x fa5, a1
; RV64FD-NEXT:    fmv.d.x fa4, a0
; RV64FD-NEXT:    fadd.d fa5, fa4, fa5
; RV64FD-NEXT:    fabs.d fa5, fa5
; RV64FD-NEXT:    fadd.d fa5, fa4, fa5
; RV64FD-NEXT:    fmv.x.d a0, fa5
; RV64FD-NEXT:    ret
  %a3 = fadd double %a1, %a2
  %bc1 = bitcast double %a3 to i64
  %and = and i64 %bc1, 9223372036854775807
  %bc2 = bitcast i64 %and to double
  %a4 = fadd double %a1, %bc2
  ret double %a4
}


define float @bitcast_xor(float %a1, float %a2) nounwind {
; RV32F-LABEL: bitcast_xor:
; RV32F:       # %bb.0:
; RV32F-NEXT:    fmv.w.x fa5, a1
; RV32F-NEXT:    fmv.w.x fa4, a0
; RV32F-NEXT:    fmul.s fa5, fa4, fa5
; RV32F-NEXT:    fneg.s fa5, fa5
; RV32F-NEXT:    fmul.s fa5, fa4, fa5
; RV32F-NEXT:    fmv.x.w a0, fa5
; RV32F-NEXT:    ret
;
; RV32ZFINX-LABEL: bitcast_xor:
; RV32ZFINX:       # %bb.0:
; RV32ZFINX-NEXT:    fmul.s a1, a0, a1
; RV32ZFINX-NEXT:    fneg.s a1, a1
; RV32ZFINX-NEXT:    fmul.s a0, a0, a1
; RV32ZFINX-NEXT:    ret
;
; RV32FD-LABEL: bitcast_xor:
; RV32FD:       # %bb.0:
; RV32FD-NEXT:    fmv.w.x fa5, a1
; RV32FD-NEXT:    fmv.w.x fa4, a0
; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
; RV32FD-NEXT:    fneg.s fa5, fa5
; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
; RV32FD-NEXT:    fmv.x.w a0, fa5
; RV32FD-NEXT:    ret
;
; RV64F-LABEL: bitcast_xor:
; RV64F:       # %bb.0:
; RV64F-NEXT:    fmv.w.x fa5, a1
; RV64F-NEXT:    fmv.w.x fa4, a0
; RV64F-NEXT:    fmul.s fa5, fa4, fa5
; RV64F-NEXT:    fneg.s fa5, fa5
; RV64F-NEXT:    fmul.s fa5, fa4, fa5
; RV64F-NEXT:    fmv.x.w a0, fa5
; RV64F-NEXT:    ret
;
; RV64ZFINX-LABEL: bitcast_xor:
; RV64ZFINX:       # %bb.0:
; RV64ZFINX-NEXT:    fmul.s a1, a0, a1
; RV64ZFINX-NEXT:    fneg.s a1, a1
; RV64ZFINX-NEXT:    fmul.s a0, a0, a1
; RV64ZFINX-NEXT:    ret
;
; RV64FD-LABEL: bitcast_xor:
; RV64FD:       # %bb.0:
; RV64FD-NEXT:    fmv.w.x fa5, a1
; RV64FD-NEXT:    fmv.w.x fa4, a0
; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
; RV64FD-NEXT:    fneg.s fa5, fa5
; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
; RV64FD-NEXT:    fmv.x.w a0, fa5
; RV64FD-NEXT:    ret
  %a3 = fmul float %a1, %a2
  %bc1 = bitcast float %a3 to i32
  %and = xor i32 %bc1, 2147483648
  %bc2 = bitcast i32 %and to float
  %a4 = fmul float %a1, %bc2
  ret float %a4
}

define double @bitcast_double_xor(double %a1, double %a2) nounwind {
; RV32F-LABEL: bitcast_double_xor:
; RV32F:       # %bb.0:
; RV32F-NEXT:    addi sp, sp, -16
; RV32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
; RV32F-NEXT:    mv s0, a1
; RV32F-NEXT:    mv s1, a0
; RV32F-NEXT:    call __muldf3
; RV32F-NEXT:    mv a2, a0
; RV32F-NEXT:    lui a3, 524288
; RV32F-NEXT:    xor a3, a1, a3
; RV32F-NEXT:    mv a0, s1
; RV32F-NEXT:    mv a1, s0
; RV32F-NEXT:    call __muldf3
; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
; RV32F-NEXT:    addi sp, sp, 16
; RV32F-NEXT:    ret
;
; RV32ZFINX-LABEL: bitcast_double_xor:
; RV32ZFINX:       # %bb.0:
; RV32ZFINX-NEXT:    addi sp, sp, -16
; RV32ZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT:    mv s0, a1
; RV32ZFINX-NEXT:    mv s1, a0
; RV32ZFINX-NEXT:    call __muldf3
; RV32ZFINX-NEXT:    mv a2, a0
; RV32ZFINX-NEXT:    lui a3, 524288
; RV32ZFINX-NEXT:    xor a3, a1, a3
; RV32ZFINX-NEXT:    mv a0, s1
; RV32ZFINX-NEXT:    mv a1, s0
; RV32ZFINX-NEXT:    call __muldf3
; RV32ZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT:    addi sp, sp, 16
; RV32ZFINX-NEXT:    ret
;
; RV32FD-LABEL: bitcast_double_xor:
; RV32FD:       # %bb.0:
; RV32FD-NEXT:    addi sp, sp, -16
; RV32FD-NEXT:    sw a2, 8(sp)
; RV32FD-NEXT:    sw a3, 12(sp)
; RV32FD-NEXT:    fld fa5, 8(sp)
; RV32FD-NEXT:    sw a0, 8(sp)
; RV32FD-NEXT:    sw a1, 12(sp)
; RV32FD-NEXT:    fld fa4, 8(sp)
; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
; RV32FD-NEXT:    fneg.d fa5, fa5
; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
; RV32FD-NEXT:    fsd fa5, 8(sp)
; RV32FD-NEXT:    lw a0, 8(sp)
; RV32FD-NEXT:    lw a1, 12(sp)
; RV32FD-NEXT:    addi sp, sp, 16
; RV32FD-NEXT:    ret
;
; RV64F-LABEL: bitcast_double_xor:
; RV64F:       # %bb.0:
; RV64F-NEXT:    addi sp, sp, -16
; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64F-NEXT:    mv s0, a0
; RV64F-NEXT:    call __muldf3
; RV64F-NEXT:    li a1, -1
; RV64F-NEXT:    slli a1, a1, 63
; RV64F-NEXT:    xor a1, a0, a1
; RV64F-NEXT:    mv a0, s0
; RV64F-NEXT:    call __muldf3
; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64F-NEXT:    addi sp, sp, 16
; RV64F-NEXT:    ret
;
; RV64ZFINX-LABEL: bitcast_double_xor:
; RV64ZFINX:       # %bb.0:
; RV64ZFINX-NEXT:    addi sp, sp, -16
; RV64ZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT:    mv s0, a0
; RV64ZFINX-NEXT:    call __muldf3
; RV64ZFINX-NEXT:    li a1, -1
; RV64ZFINX-NEXT:    slli a1, a1, 63
; RV64ZFINX-NEXT:    xor a1, a0, a1
; RV64ZFINX-NEXT:    mv a0, s0
; RV64ZFINX-NEXT:    call __muldf3
; RV64ZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT:    addi sp, sp, 16
; RV64ZFINX-NEXT:    ret
;
; RV64FD-LABEL: bitcast_double_xor:
; RV64FD:       # %bb.0:
; RV64FD-NEXT:    fmv.d.x fa5, a1
; RV64FD-NEXT:    fmv.d.x fa4, a0
; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
; RV64FD-NEXT:    fneg.d fa5, fa5
; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
; RV64FD-NEXT:    fmv.x.d a0, fa5
; RV64FD-NEXT:    ret
  %a3 = fmul double %a1, %a2
  %bc1 = bitcast double %a3 to i64
  %and = xor i64 %bc1, 9223372036854775808
  %bc2 = bitcast i64 %and to double
  %a4 = fmul double %a1, %bc2
  ret double %a4
}

define float @bitcast_or(float %a1, float %a2) nounwind {
; RV32F-LABEL: bitcast_or:
; RV32F:       # %bb.0:
; RV32F-NEXT:    fmv.w.x fa5, a1
; RV32F-NEXT:    fmv.w.x fa4, a0
; RV32F-NEXT:    fmul.s fa5, fa4, fa5
; RV32F-NEXT:    fabs.s fa5, fa5
; RV32F-NEXT:    fneg.s fa5, fa5
; RV32F-NEXT:    fmul.s fa5, fa4, fa5
; RV32F-NEXT:    fmv.x.w a0, fa5
; RV32F-NEXT:    ret
;
; RV32ZFINX-LABEL: bitcast_or:
; RV32ZFINX:       # %bb.0:
; RV32ZFINX-NEXT:    fmul.s a1, a0, a1
; RV32ZFINX-NEXT:    fabs.s a1, a1
; RV32ZFINX-NEXT:    fneg.s a1, a1
; RV32ZFINX-NEXT:    fmul.s a0, a0, a1
; RV32ZFINX-NEXT:    ret
;
; RV32FD-LABEL: bitcast_or:
; RV32FD:       # %bb.0:
; RV32FD-NEXT:    fmv.w.x fa5, a1
; RV32FD-NEXT:    fmv.w.x fa4, a0
; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
; RV32FD-NEXT:    fabs.s fa5, fa5
; RV32FD-NEXT:    fneg.s fa5, fa5
; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
; RV32FD-NEXT:    fmv.x.w a0, fa5
; RV32FD-NEXT:    ret
;
; RV64F-LABEL: bitcast_or:
; RV64F:       # %bb.0:
; RV64F-NEXT:    fmv.w.x fa5, a1
; RV64F-NEXT:    fmv.w.x fa4, a0
; RV64F-NEXT:    fmul.s fa5, fa4, fa5
; RV64F-NEXT:    fabs.s fa5, fa5
; RV64F-NEXT:    fneg.s fa5, fa5
; RV64F-NEXT:    fmul.s fa5, fa4, fa5
; RV64F-NEXT:    fmv.x.w a0, fa5
; RV64F-NEXT:    ret
;
; RV64ZFINX-LABEL: bitcast_or:
; RV64ZFINX:       # %bb.0:
; RV64ZFINX-NEXT:    fmul.s a1, a0, a1
; RV64ZFINX-NEXT:    fabs.s a1, a1
; RV64ZFINX-NEXT:    fneg.s a1, a1
; RV64ZFINX-NEXT:    fmul.s a0, a0, a1
; RV64ZFINX-NEXT:    ret
;
; RV64FD-LABEL: bitcast_or:
; RV64FD:       # %bb.0:
; RV64FD-NEXT:    fmv.w.x fa5, a1
; RV64FD-NEXT:    fmv.w.x fa4, a0
; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
; RV64FD-NEXT:    fabs.s fa5, fa5
; RV64FD-NEXT:    fneg.s fa5, fa5
; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
; RV64FD-NEXT:    fmv.x.w a0, fa5
; RV64FD-NEXT:    ret
  %a3 = fmul float %a1, %a2
  %bc1 = bitcast float %a3 to i32
  %and = or i32 %bc1, 2147483648
  %bc2 = bitcast i32 %and to float
  %a4 = fmul float %a1, %bc2
  ret float %a4
}

define double @bitcast_double_or(double %a1, double %a2) nounwind {
; RV32F-LABEL: bitcast_double_or:
; RV32F:       # %bb.0:
; RV32F-NEXT:    addi sp, sp, -16
; RV32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
; RV32F-NEXT:    mv s0, a1
; RV32F-NEXT:    mv s1, a0
; RV32F-NEXT:    call __muldf3
; RV32F-NEXT:    mv a2, a0
; RV32F-NEXT:    lui a3, 524288
; RV32F-NEXT:    or a3, a1, a3
; RV32F-NEXT:    mv a0, s1
; RV32F-NEXT:    mv a1, s0
; RV32F-NEXT:    call __muldf3
; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
; RV32F-NEXT:    addi sp, sp, 16
; RV32F-NEXT:    ret
;
; RV32ZFINX-LABEL: bitcast_double_or:
; RV32ZFINX:       # %bb.0:
; RV32ZFINX-NEXT:    addi sp, sp, -16
; RV32ZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT:    mv s0, a1
; RV32ZFINX-NEXT:    mv s1, a0
; RV32ZFINX-NEXT:    call __muldf3
; RV32ZFINX-NEXT:    mv a2, a0
; RV32ZFINX-NEXT:    lui a3, 524288
; RV32ZFINX-NEXT:    or a3, a1, a3
; RV32ZFINX-NEXT:    mv a0, s1
; RV32ZFINX-NEXT:    mv a1, s0
; RV32ZFINX-NEXT:    call __muldf3
; RV32ZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT:    addi sp, sp, 16
; RV32ZFINX-NEXT:    ret
;
; RV32FD-LABEL: bitcast_double_or:
; RV32FD:       # %bb.0:
; RV32FD-NEXT:    addi sp, sp, -16
; RV32FD-NEXT:    sw a2, 8(sp)
; RV32FD-NEXT:    sw a3, 12(sp)
; RV32FD-NEXT:    fld fa5, 8(sp)
; RV32FD-NEXT:    sw a0, 8(sp)
; RV32FD-NEXT:    sw a1, 12(sp)
; RV32FD-NEXT:    fld fa4, 8(sp)
; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
; RV32FD-NEXT:    fabs.d fa5, fa5
; RV32FD-NEXT:    fneg.d fa5, fa5
; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
; RV32FD-NEXT:    fsd fa5, 8(sp)
; RV32FD-NEXT:    lw a0, 8(sp)
; RV32FD-NEXT:    lw a1, 12(sp)
; RV32FD-NEXT:    addi sp, sp, 16
; RV32FD-NEXT:    ret
;
; RV64F-LABEL: bitcast_double_or:
; RV64F:       # %bb.0:
; RV64F-NEXT:    addi sp, sp, -16
; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64F-NEXT:    mv s0, a0
; RV64F-NEXT:    call __muldf3
; RV64F-NEXT:    li a1, -1
; RV64F-NEXT:    slli a1, a1, 63
; RV64F-NEXT:    or a1, a0, a1
; RV64F-NEXT:    mv a0, s0
; RV64F-NEXT:    call __muldf3
; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64F-NEXT:    addi sp, sp, 16
; RV64F-NEXT:    ret
;
; RV64ZFINX-LABEL: bitcast_double_or:
; RV64ZFINX:       # %bb.0:
; RV64ZFINX-NEXT:    addi sp, sp, -16
; RV64ZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT:    mv s0, a0
; RV64ZFINX-NEXT:    call __muldf3
; RV64ZFINX-NEXT:    li a1, -1
; RV64ZFINX-NEXT:    slli a1, a1, 63
; RV64ZFINX-NEXT:    or a1, a0, a1
; RV64ZFINX-NEXT:    mv a0, s0
; RV64ZFINX-NEXT:    call __muldf3
; RV64ZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT:    addi sp, sp, 16
; RV64ZFINX-NEXT:    ret
;
; RV64FD-LABEL: bitcast_double_or:
; RV64FD:       # %bb.0:
; RV64FD-NEXT:    fmv.d.x fa5, a1
; RV64FD-NEXT:    fmv.d.x fa4, a0
; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
; RV64FD-NEXT:    fabs.d fa5, fa5
; RV64FD-NEXT:    fneg.d fa5, fa5
; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
; RV64FD-NEXT:    fmv.x.d a0, fa5
; RV64FD-NEXT:    ret
  %a3 = fmul double %a1, %a2
  %bc1 = bitcast double %a3 to i64
  %and = or i64 %bc1, 9223372036854775808
  %bc2 = bitcast i64 %and to double
  %a4 = fmul double %a1, %bc2
  ret double %a4
}