llvm/llvm/test/CodeGen/RISCV/float-intrinsics.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+f \
; RUN:   -verify-machineinstrs -target-abi=ilp32f \
; RUN:   | FileCheck -check-prefix=RV32IF %s
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zfinx \
; RUN:   -verify-machineinstrs -target-abi=ilp32 \
; RUN:   | FileCheck -check-prefix=RV32IZFINX %s
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
; RUN:   -verify-machineinstrs -target-abi=ilp32f \
; RUN:   | FileCheck -check-prefix=RV32IF %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+f \
; RUN:   -verify-machineinstrs -target-abi=lp64f \
; RUN:   | FileCheck -check-prefix=RV64IF %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zfinx \
; RUN:   -verify-machineinstrs -target-abi=lp64 \
; RUN:   | FileCheck -check-prefix=RV64IZFINX %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
; RUN:   -verify-machineinstrs -target-abi=lp64d \
; RUN:   | FileCheck -check-prefix=RV64IF %s
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV32I %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \
; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV64I %s

declare float @llvm.sqrt.f32(float)

define float @sqrt_f32(float %a) nounwind {
; RV32IF-LABEL: sqrt_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fsqrt.s fa0, fa0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: sqrt_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fsqrt.s a0, a0
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: sqrt_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fsqrt.s fa0, fa0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: sqrt_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fsqrt.s a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: sqrt_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call sqrtf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: sqrt_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call sqrtf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.sqrt.f32(float %a)
  ret float %1
}

declare float @llvm.powi.f32.i32(float, i32)

define float @powi_f32(float %a, i32 %b) nounwind {
; RV32IF-LABEL: powi_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail __powisf2
;
; RV32IZFINX-LABEL: powi_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail __powisf2
;
; RV64IF-LABEL: powi_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    addi sp, sp, -16
; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT:    sext.w a0, a0
; RV64IF-NEXT:    call __powisf2
; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT:    addi sp, sp, 16
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: powi_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    sext.w a1, a1
; RV64IZFINX-NEXT:    call __powisf2
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: powi_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call __powisf2
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: powi_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    sext.w a1, a1
; RV64I-NEXT:    call __powisf2
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.powi.f32.i32(float %a, i32 %b)
  ret float %1
}

declare float @llvm.sin.f32(float)

define float @sin_f32(float %a) nounwind {
; RV32IF-LABEL: sin_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail sinf
;
; RV32IZFINX-LABEL: sin_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail sinf
;
; RV64IF-LABEL: sin_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    tail sinf
;
; RV64IZFINX-LABEL: sin_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    call sinf
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: sin_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call sinf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: sin_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call sinf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.sin.f32(float %a)
  ret float %1
}

declare float @llvm.cos.f32(float)

define float @cos_f32(float %a) nounwind {
; RV32IF-LABEL: cos_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail cosf
;
; RV32IZFINX-LABEL: cos_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail cosf
;
; RV64IF-LABEL: cos_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    tail cosf
;
; RV64IZFINX-LABEL: cos_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    call cosf
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: cos_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call cosf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: cos_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call cosf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.cos.f32(float %a)
  ret float %1
}

; The sin+cos combination results in an FSINCOS SelectionDAG node.
define float @sincos_f32(float %a) nounwind {
; RV32IF-LABEL: sincos_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    addi sp, sp, -16
; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    fsw fs1, 4(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    fmv.s fs0, fa0
; RV32IF-NEXT:    call sinf
; RV32IF-NEXT:    fmv.s fs1, fa0
; RV32IF-NEXT:    fmv.s fa0, fs0
; RV32IF-NEXT:    call cosf
; RV32IF-NEXT:    fadd.s fa0, fs1, fa0
; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    flw fs1, 4(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    addi sp, sp, 16
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: sincos_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    addi sp, sp, -16
; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT:    mv s0, a0
; RV32IZFINX-NEXT:    call sinf
; RV32IZFINX-NEXT:    mv s1, a0
; RV32IZFINX-NEXT:    mv a0, s0
; RV32IZFINX-NEXT:    call cosf
; RV32IZFINX-NEXT:    fadd.s a0, s1, a0
; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT:    addi sp, sp, 16
; RV32IZFINX-NEXT:    ret
;
; RV64IZFINX-LABEL: sincos_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -32
; RV64IZFINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    mv s0, a0
; RV64IZFINX-NEXT:    call sinf
; RV64IZFINX-NEXT:    mv s1, a0
; RV64IZFINX-NEXT:    mv a0, s0
; RV64IZFINX-NEXT:    call cosf
; RV64IZFINX-NEXT:    fadd.s a0, s1, a0
; RV64IZFINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 32
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: sincos_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT:    mv s0, a0
; RV32I-NEXT:    call sinf
; RV32I-NEXT:    mv s1, a0
; RV32I-NEXT:    mv a0, s0
; RV32I-NEXT:    call cosf
; RV32I-NEXT:    mv a1, a0
; RV32I-NEXT:    mv a0, s1
; RV32I-NEXT:    call __addsf3
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: sincos_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -32
; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    mv s0, a0
; RV64I-NEXT:    call sinf
; RV64I-NEXT:    mv s1, a0
; RV64I-NEXT:    mv a0, s0
; RV64I-NEXT:    call cosf
; RV64I-NEXT:    mv a1, a0
; RV64I-NEXT:    mv a0, s1
; RV64I-NEXT:    call __addsf3
; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 32
; RV64I-NEXT:    ret
  %1 = call float @llvm.sin.f32(float %a)
  %2 = call float @llvm.cos.f32(float %a)
  %3 = fadd float %1, %2
  ret float %3
}

declare float @llvm.pow.f32(float, float)

define float @pow_f32(float %a, float %b) nounwind {
; RV32IF-LABEL: pow_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail powf
;
; RV32IZFINX-LABEL: pow_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail powf
;
; RV64IF-LABEL: pow_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    tail powf
;
; RV64IZFINX-LABEL: pow_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    call powf
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: pow_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call powf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: pow_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call powf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.pow.f32(float %a, float %b)
  ret float %1
}

declare float @llvm.exp.f32(float)

define float @exp_f32(float %a) nounwind {
; RV32IF-LABEL: exp_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail expf
;
; RV32IZFINX-LABEL: exp_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail expf
;
; RV64IF-LABEL: exp_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    tail expf
;
; RV64IZFINX-LABEL: exp_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    call expf
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: exp_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call expf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: exp_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call expf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.exp.f32(float %a)
  ret float %1
}

declare float @llvm.exp2.f32(float)

define float @exp2_f32(float %a) nounwind {
; RV32IF-LABEL: exp2_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail exp2f
;
; RV32IZFINX-LABEL: exp2_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail exp2f
;
; RV64IF-LABEL: exp2_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    tail exp2f
;
; RV64IZFINX-LABEL: exp2_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    call exp2f
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: exp2_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call exp2f
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: exp2_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call exp2f
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.exp2.f32(float %a)
  ret float %1
}

declare float @llvm.log.f32(float)

define float @log_f32(float %a) nounwind {
; RV32IF-LABEL: log_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail logf
;
; RV32IZFINX-LABEL: log_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail logf
;
; RV64IF-LABEL: log_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    tail logf
;
; RV64IZFINX-LABEL: log_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    call logf
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: log_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call logf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: log_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call logf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.log.f32(float %a)
  ret float %1
}

declare float @llvm.log10.f32(float)

define float @log10_f32(float %a) nounwind {
; RV32IF-LABEL: log10_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail log10f
;
; RV32IZFINX-LABEL: log10_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail log10f
;
; RV64IF-LABEL: log10_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    tail log10f
;
; RV64IZFINX-LABEL: log10_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    call log10f
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: log10_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call log10f
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: log10_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call log10f
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.log10.f32(float %a)
  ret float %1
}

declare float @llvm.log2.f32(float)

define float @log2_f32(float %a) nounwind {
; RV32IF-LABEL: log2_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail log2f
;
; RV32IZFINX-LABEL: log2_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail log2f
;
; RV64IF-LABEL: log2_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    tail log2f
;
; RV64IZFINX-LABEL: log2_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    call log2f
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: log2_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call log2f
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: log2_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call log2f
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.log2.f32(float %a)
  ret float %1
}

declare float @llvm.fma.f32(float, float, float)

define float @fma_f32(float %a, float %b, float %c) nounwind {
; RV32IF-LABEL: fma_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: fma_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fmadd.s a0, a0, a1, a2
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: fma_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: fma_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fmadd.s a0, a0, a1, a2
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: fma_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call fmaf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: fma_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call fmaf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.fma.f32(float %a, float %b, float %c)
  ret float %1
}

declare float @llvm.fmuladd.f32(float, float, float)

define float @fmuladd_f32(float %a, float %b, float %c) nounwind {
; RV32IF-LABEL: fmuladd_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: fmuladd_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fmadd.s a0, a0, a1, a2
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: fmuladd_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: fmuladd_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fmadd.s a0, a0, a1, a2
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: fmuladd_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT:    mv s0, a2
; RV32I-NEXT:    call __mulsf3
; RV32I-NEXT:    mv a1, s0
; RV32I-NEXT:    call __addsf3
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: fmuladd_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT:    mv s0, a2
; RV64I-NEXT:    call __mulsf3
; RV64I-NEXT:    mv a1, s0
; RV64I-NEXT:    call __addsf3
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
  ret float %1
}

declare float @llvm.fabs.f32(float)

define float @fabs_f32(float %a) nounwind {
; RV32IF-LABEL: fabs_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fabs.s fa0, fa0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: fabs_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    slli a0, a0, 1
; RV32IZFINX-NEXT:    srli a0, a0, 1
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: fabs_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fabs.s fa0, fa0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: fabs_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    slli a0, a0, 33
; RV64IZFINX-NEXT:    srli a0, a0, 33
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: fabs_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 1
; RV32I-NEXT:    srli a0, a0, 1
; RV32I-NEXT:    ret
;
; RV64I-LABEL: fabs_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    ret
  %1 = call float @llvm.fabs.f32(float %a)
  ret float %1
}

declare float @llvm.minnum.f32(float, float)

define float @minnum_f32(float %a, float %b) nounwind {
; RV32IF-LABEL: minnum_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fmin.s fa0, fa0, fa1
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: minnum_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fmin.s a0, a0, a1
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: minnum_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fmin.s fa0, fa0, fa1
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: minnum_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fmin.s a0, a0, a1
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: minnum_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call fminf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: minnum_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call fminf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.minnum.f32(float %a, float %b)
  ret float %1
}

declare float @llvm.maxnum.f32(float, float)

define float @maxnum_f32(float %a, float %b) nounwind {
; RV32IF-LABEL: maxnum_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fmax.s fa0, fa0, fa1
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: maxnum_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fmax.s a0, a0, a1
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: maxnum_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fmax.s fa0, fa0, fa1
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: maxnum_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fmax.s a0, a0, a1
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: maxnum_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call fmaxf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: maxnum_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call fmaxf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.maxnum.f32(float %a, float %b)
  ret float %1
}

; TODO: FMINNAN and FMAXNAN aren't handled in
; SelectionDAGLegalize::ExpandNode.

; declare float @llvm.minimum.f32(float, float)

; define float @fminimum_f32(float %a, float %b) nounwind {
;   %1 = call float @llvm.minimum.f32(float %a, float %b)
;   ret float %1
; }

; declare float @llvm.maximum.f32(float, float)

; define float @fmaximum_f32(float %a, float %b) nounwind {
;   %1 = call float @llvm.maximum.f32(float %a, float %b)
;   ret float %1
; }

declare float @llvm.copysign.f32(float, float)

define float @copysign_f32(float %a, float %b) nounwind {
; RV32IF-LABEL: copysign_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fsgnj.s fa0, fa0, fa1
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: copysign_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fsgnj.s a0, a0, a1
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: copysign_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fsgnj.s fa0, fa0, fa1
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: copysign_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fsgnj.s a0, a0, a1
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: copysign_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lui a2, 524288
; RV32I-NEXT:    and a1, a1, a2
; RV32I-NEXT:    slli a0, a0, 1
; RV32I-NEXT:    srli a0, a0, 1
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    ret
;
; RV64I-LABEL: copysign_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    lui a2, 524288
; RV64I-NEXT:    and a1, a1, a2
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    or a0, a0, a1
; RV64I-NEXT:    ret
  %1 = call float @llvm.copysign.f32(float %a, float %b)
  ret float %1
}

declare float @llvm.floor.f32(float)

define float @floor_f32(float %a) nounwind {
; RV32IF-LABEL: floor_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    lui a0, 307200
; RV32IF-NEXT:    fmv.w.x fa5, a0
; RV32IF-NEXT:    fabs.s fa4, fa0
; RV32IF-NEXT:    flt.s a0, fa4, fa5
; RV32IF-NEXT:    beqz a0, .LBB17_2
; RV32IF-NEXT:  # %bb.1:
; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT:  .LBB17_2:
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: floor_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    lui a1, 307200
; RV32IZFINX-NEXT:    fabs.s a2, a0
; RV32IZFINX-NEXT:    flt.s a1, a2, a1
; RV32IZFINX-NEXT:    beqz a1, .LBB17_2
; RV32IZFINX-NEXT:  # %bb.1:
; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rdn
; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rdn
; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV32IZFINX-NEXT:  .LBB17_2:
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: floor_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    lui a0, 307200
; RV64IF-NEXT:    fmv.w.x fa5, a0
; RV64IF-NEXT:    fabs.s fa4, fa0
; RV64IF-NEXT:    flt.s a0, fa4, fa5
; RV64IF-NEXT:    beqz a0, .LBB17_2
; RV64IF-NEXT:  # %bb.1:
; RV64IF-NEXT:    fcvt.w.s a0, fa0, rdn
; RV64IF-NEXT:    fcvt.s.w fa5, a0, rdn
; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT:  .LBB17_2:
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: floor_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    lui a1, 307200
; RV64IZFINX-NEXT:    fabs.s a2, a0
; RV64IZFINX-NEXT:    flt.s a1, a2, a1
; RV64IZFINX-NEXT:    beqz a1, .LBB17_2
; RV64IZFINX-NEXT:  # %bb.1:
; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rdn
; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rdn
; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV64IZFINX-NEXT:  .LBB17_2:
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: floor_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call floorf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: floor_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call floorf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.floor.f32(float %a)
  ret float %1
}

declare float @llvm.ceil.f32(float)

define float @ceil_f32(float %a) nounwind {
; RV32IF-LABEL: ceil_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    lui a0, 307200
; RV32IF-NEXT:    fmv.w.x fa5, a0
; RV32IF-NEXT:    fabs.s fa4, fa0
; RV32IF-NEXT:    flt.s a0, fa4, fa5
; RV32IF-NEXT:    beqz a0, .LBB18_2
; RV32IF-NEXT:  # %bb.1:
; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT:  .LBB18_2:
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: ceil_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    lui a1, 307200
; RV32IZFINX-NEXT:    fabs.s a2, a0
; RV32IZFINX-NEXT:    flt.s a1, a2, a1
; RV32IZFINX-NEXT:    beqz a1, .LBB18_2
; RV32IZFINX-NEXT:  # %bb.1:
; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rup
; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rup
; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV32IZFINX-NEXT:  .LBB18_2:
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: ceil_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    lui a0, 307200
; RV64IF-NEXT:    fmv.w.x fa5, a0
; RV64IF-NEXT:    fabs.s fa4, fa0
; RV64IF-NEXT:    flt.s a0, fa4, fa5
; RV64IF-NEXT:    beqz a0, .LBB18_2
; RV64IF-NEXT:  # %bb.1:
; RV64IF-NEXT:    fcvt.w.s a0, fa0, rup
; RV64IF-NEXT:    fcvt.s.w fa5, a0, rup
; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT:  .LBB18_2:
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: ceil_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    lui a1, 307200
; RV64IZFINX-NEXT:    fabs.s a2, a0
; RV64IZFINX-NEXT:    flt.s a1, a2, a1
; RV64IZFINX-NEXT:    beqz a1, .LBB18_2
; RV64IZFINX-NEXT:  # %bb.1:
; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rup
; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rup
; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV64IZFINX-NEXT:  .LBB18_2:
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: ceil_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call ceilf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: ceil_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call ceilf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.ceil.f32(float %a)
  ret float %1
}

declare float @llvm.trunc.f32(float)

define float @trunc_f32(float %a) nounwind {
; RV32IF-LABEL: trunc_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    lui a0, 307200
; RV32IF-NEXT:    fmv.w.x fa5, a0
; RV32IF-NEXT:    fabs.s fa4, fa0
; RV32IF-NEXT:    flt.s a0, fa4, fa5
; RV32IF-NEXT:    beqz a0, .LBB19_2
; RV32IF-NEXT:  # %bb.1:
; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT:  .LBB19_2:
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: trunc_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    lui a1, 307200
; RV32IZFINX-NEXT:    fabs.s a2, a0
; RV32IZFINX-NEXT:    flt.s a1, a2, a1
; RV32IZFINX-NEXT:    beqz a1, .LBB19_2
; RV32IZFINX-NEXT:  # %bb.1:
; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rtz
; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rtz
; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV32IZFINX-NEXT:  .LBB19_2:
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: trunc_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    lui a0, 307200
; RV64IF-NEXT:    fmv.w.x fa5, a0
; RV64IF-NEXT:    fabs.s fa4, fa0
; RV64IF-NEXT:    flt.s a0, fa4, fa5
; RV64IF-NEXT:    beqz a0, .LBB19_2
; RV64IF-NEXT:  # %bb.1:
; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
; RV64IF-NEXT:    fcvt.s.w fa5, a0, rtz
; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT:  .LBB19_2:
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: trunc_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    lui a1, 307200
; RV64IZFINX-NEXT:    fabs.s a2, a0
; RV64IZFINX-NEXT:    flt.s a1, a2, a1
; RV64IZFINX-NEXT:    beqz a1, .LBB19_2
; RV64IZFINX-NEXT:  # %bb.1:
; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rtz
; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rtz
; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV64IZFINX-NEXT:  .LBB19_2:
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: trunc_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call truncf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: trunc_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call truncf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.trunc.f32(float %a)
  ret float %1
}

declare float @llvm.rint.f32(float)

define float @rint_f32(float %a) nounwind {
; RV32IF-LABEL: rint_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    lui a0, 307200
; RV32IF-NEXT:    fmv.w.x fa5, a0
; RV32IF-NEXT:    fabs.s fa4, fa0
; RV32IF-NEXT:    flt.s a0, fa4, fa5
; RV32IF-NEXT:    beqz a0, .LBB20_2
; RV32IF-NEXT:  # %bb.1:
; RV32IF-NEXT:    fcvt.w.s a0, fa0
; RV32IF-NEXT:    fcvt.s.w fa5, a0
; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT:  .LBB20_2:
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: rint_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    lui a1, 307200
; RV32IZFINX-NEXT:    fabs.s a2, a0
; RV32IZFINX-NEXT:    flt.s a1, a2, a1
; RV32IZFINX-NEXT:    beqz a1, .LBB20_2
; RV32IZFINX-NEXT:  # %bb.1:
; RV32IZFINX-NEXT:    fcvt.w.s a1, a0
; RV32IZFINX-NEXT:    fcvt.s.w a1, a1
; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV32IZFINX-NEXT:  .LBB20_2:
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: rint_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    lui a0, 307200
; RV64IF-NEXT:    fmv.w.x fa5, a0
; RV64IF-NEXT:    fabs.s fa4, fa0
; RV64IF-NEXT:    flt.s a0, fa4, fa5
; RV64IF-NEXT:    beqz a0, .LBB20_2
; RV64IF-NEXT:  # %bb.1:
; RV64IF-NEXT:    fcvt.w.s a0, fa0
; RV64IF-NEXT:    fcvt.s.w fa5, a0
; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT:  .LBB20_2:
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: rint_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    lui a1, 307200
; RV64IZFINX-NEXT:    fabs.s a2, a0
; RV64IZFINX-NEXT:    flt.s a1, a2, a1
; RV64IZFINX-NEXT:    beqz a1, .LBB20_2
; RV64IZFINX-NEXT:  # %bb.1:
; RV64IZFINX-NEXT:    fcvt.w.s a1, a0
; RV64IZFINX-NEXT:    fcvt.s.w a1, a1
; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV64IZFINX-NEXT:  .LBB20_2:
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: rint_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call rintf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: rint_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call rintf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.rint.f32(float %a)
  ret float %1
}

declare float @llvm.nearbyint.f32(float)

define float @nearbyint_f32(float %a) nounwind {
; RV32IF-LABEL: nearbyint_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    tail nearbyintf
;
; RV32IZFINX-LABEL: nearbyint_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    tail nearbyintf
;
; RV64IF-LABEL: nearbyint_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    tail nearbyintf
;
; RV64IZFINX-LABEL: nearbyint_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    addi sp, sp, -16
; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT:    call nearbyintf
; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT:    addi sp, sp, 16
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: nearbyint_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call nearbyintf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: nearbyint_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call nearbyintf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.nearbyint.f32(float %a)
  ret float %1
}

declare float @llvm.round.f32(float)

define float @round_f32(float %a) nounwind {
; RV32IF-LABEL: round_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    lui a0, 307200
; RV32IF-NEXT:    fmv.w.x fa5, a0
; RV32IF-NEXT:    fabs.s fa4, fa0
; RV32IF-NEXT:    flt.s a0, fa4, fa5
; RV32IF-NEXT:    beqz a0, .LBB22_2
; RV32IF-NEXT:  # %bb.1:
; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT:  .LBB22_2:
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: round_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    lui a1, 307200
; RV32IZFINX-NEXT:    fabs.s a2, a0
; RV32IZFINX-NEXT:    flt.s a1, a2, a1
; RV32IZFINX-NEXT:    beqz a1, .LBB22_2
; RV32IZFINX-NEXT:  # %bb.1:
; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rmm
; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rmm
; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV32IZFINX-NEXT:  .LBB22_2:
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: round_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    lui a0, 307200
; RV64IF-NEXT:    fmv.w.x fa5, a0
; RV64IF-NEXT:    fabs.s fa4, fa0
; RV64IF-NEXT:    flt.s a0, fa4, fa5
; RV64IF-NEXT:    beqz a0, .LBB22_2
; RV64IF-NEXT:  # %bb.1:
; RV64IF-NEXT:    fcvt.w.s a0, fa0, rmm
; RV64IF-NEXT:    fcvt.s.w fa5, a0, rmm
; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT:  .LBB22_2:
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: round_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    lui a1, 307200
; RV64IZFINX-NEXT:    fabs.s a2, a0
; RV64IZFINX-NEXT:    flt.s a1, a2, a1
; RV64IZFINX-NEXT:    beqz a1, .LBB22_2
; RV64IZFINX-NEXT:  # %bb.1:
; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rmm
; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rmm
; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV64IZFINX-NEXT:  .LBB22_2:
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: round_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call roundf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: round_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call roundf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.round.f32(float %a)
  ret float %1
}

declare float @llvm.roundeven.f32(float)

define float @roundeven_f32(float %a) nounwind {
; RV32IF-LABEL: roundeven_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    lui a0, 307200
; RV32IF-NEXT:    fmv.w.x fa5, a0
; RV32IF-NEXT:    fabs.s fa4, fa0
; RV32IF-NEXT:    flt.s a0, fa4, fa5
; RV32IF-NEXT:    beqz a0, .LBB23_2
; RV32IF-NEXT:  # %bb.1:
; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT:  .LBB23_2:
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: roundeven_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    lui a1, 307200
; RV32IZFINX-NEXT:    fabs.s a2, a0
; RV32IZFINX-NEXT:    flt.s a1, a2, a1
; RV32IZFINX-NEXT:    beqz a1, .LBB23_2
; RV32IZFINX-NEXT:  # %bb.1:
; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rne
; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rne
; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV32IZFINX-NEXT:  .LBB23_2:
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: roundeven_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    lui a0, 307200
; RV64IF-NEXT:    fmv.w.x fa5, a0
; RV64IF-NEXT:    fabs.s fa4, fa0
; RV64IF-NEXT:    flt.s a0, fa4, fa5
; RV64IF-NEXT:    beqz a0, .LBB23_2
; RV64IF-NEXT:  # %bb.1:
; RV64IF-NEXT:    fcvt.w.s a0, fa0, rne
; RV64IF-NEXT:    fcvt.s.w fa5, a0, rne
; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT:  .LBB23_2:
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: roundeven_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    lui a1, 307200
; RV64IZFINX-NEXT:    fabs.s a2, a0
; RV64IZFINX-NEXT:    flt.s a1, a2, a1
; RV64IZFINX-NEXT:    beqz a1, .LBB23_2
; RV64IZFINX-NEXT:  # %bb.1:
; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rne
; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rne
; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
; RV64IZFINX-NEXT:  .LBB23_2:
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: roundeven_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call roundevenf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: roundeven_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call roundevenf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call float @llvm.roundeven.f32(float %a)
  ret float %1
}

declare iXLen @llvm.lrint.iXLen.f32(float)

define iXLen @lrint_f32(float %a) nounwind {
; RV32IF-LABEL: lrint_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fcvt.w.s a0, fa0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: lrint_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fcvt.w.s a0, a0
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: lrint_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fcvt.l.s a0, fa0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: lrint_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fcvt.l.s a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: lrint_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call lrintf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: lrint_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call lrintf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call iXLen @llvm.lrint.iXLen.f32(float %a)
  ret iXLen %1
}

declare i32 @llvm.lround.i32.f32(float)
declare i64 @llvm.lround.i64.f32(float)

define iXLen @lround_f32(float %a) nounwind {
; RV32IF-LABEL: lround_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: lround_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rmm
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: lround_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: lround_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fcvt.l.s a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: lround_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call lroundf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: lround_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call lroundf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call iXLen @llvm.lround.iXLen.f32(float %a)
  ret iXLen %1
}

; We support i32 lround on RV64 even though long isn't 32 bits. This is needed
; by flang.
define i32 @lround_i32_f32(float %a) nounwind {
; RV32IF-LABEL: lround_i32_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: lround_i32_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rmm
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: lround_i32_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fcvt.w.s a0, fa0, rmm
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: lround_i32_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fcvt.w.s a0, a0, rmm
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: lround_i32_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call lroundf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: lround_i32_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call lroundf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call i32 @llvm.lround.i32.f32(float %a)
  ret i32 %1
}

declare i64 @llvm.llrint.i64.f32(float)

define i64 @llrint_f32(float %a) nounwind {
; RV32IF-LABEL: llrint_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    addi sp, sp, -16
; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    call llrintf
; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    addi sp, sp, 16
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: llrint_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    addi sp, sp, -16
; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT:    call llrintf
; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT:    addi sp, sp, 16
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: llrint_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fcvt.l.s a0, fa0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: llrint_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fcvt.l.s a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: llrint_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call llrintf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: llrint_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call llrintf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call i64 @llvm.llrint.i64.f32(float %a)
  ret i64 %1
}

declare i64 @llvm.llround.i64.f32(float)

define i64 @llround_f32(float %a) nounwind {
; RV32IF-LABEL: llround_f32:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    addi sp, sp, -16
; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    call llroundf
; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    addi sp, sp, 16
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: llround_f32:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    addi sp, sp, -16
; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT:    call llroundf
; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT:    addi sp, sp, 16
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: llround_f32:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: llround_f32:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fcvt.l.s a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: llround_f32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call llroundf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: llround_f32:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call llroundf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %1 = call i64 @llvm.llround.i64.f32(float %a)
  ret i64 %1
}

declare i1 @llvm.is.fpclass.f32(float, i32)
define i1 @fpclass(float %x) {
; RV32IF-LABEL: fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    andi a0, a0, 927
; RV32IF-NEXT:    snez a0, a0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    andi a0, a0, 927
; RV32IZFINX-NEXT:    snez a0, a0
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    andi a0, a0, 927
; RV64IF-NEXT:    snez a0, a0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    andi a0, a0, 927
; RV64IZFINX-NEXT:    snez a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a1, a0, 1
; RV32I-NEXT:    srli a1, a1, 1
; RV32I-NEXT:    addi a2, a1, -1
; RV32I-NEXT:    lui a3, 2048
; RV32I-NEXT:    addi a3, a3, -1
; RV32I-NEXT:    sltu a2, a2, a3
; RV32I-NEXT:    slti a0, a0, 0
; RV32I-NEXT:    and a2, a2, a0
; RV32I-NEXT:    seqz a3, a1
; RV32I-NEXT:    lui a4, 522240
; RV32I-NEXT:    xor a5, a1, a4
; RV32I-NEXT:    seqz a5, a5
; RV32I-NEXT:    or a3, a3, a5
; RV32I-NEXT:    or a2, a3, a2
; RV32I-NEXT:    slt a3, a4, a1
; RV32I-NEXT:    or a2, a2, a3
; RV32I-NEXT:    lui a3, 1046528
; RV32I-NEXT:    add a1, a1, a3
; RV32I-NEXT:    srli a1, a1, 24
; RV32I-NEXT:    sltiu a1, a1, 127
; RV32I-NEXT:    and a0, a1, a0
; RV32I-NEXT:    or a0, a2, a0
; RV32I-NEXT:    ret
;
; RV64I-LABEL: fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    sext.w a1, a0
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    addi a2, a0, -1
; RV64I-NEXT:    lui a3, 2048
; RV64I-NEXT:    addiw a3, a3, -1
; RV64I-NEXT:    sltu a2, a2, a3
; RV64I-NEXT:    slti a1, a1, 0
; RV64I-NEXT:    and a2, a2, a1
; RV64I-NEXT:    seqz a3, a0
; RV64I-NEXT:    lui a4, 522240
; RV64I-NEXT:    xor a5, a0, a4
; RV64I-NEXT:    seqz a5, a5
; RV64I-NEXT:    or a3, a3, a5
; RV64I-NEXT:    or a2, a3, a2
; RV64I-NEXT:    slt a3, a4, a0
; RV64I-NEXT:    or a2, a2, a3
; RV64I-NEXT:    lui a3, 1046528
; RV64I-NEXT:    add a0, a0, a3
; RV64I-NEXT:    srliw a0, a0, 24
; RV64I-NEXT:    sltiu a0, a0, 127
; RV64I-NEXT:    and a0, a0, a1
; RV64I-NEXT:    or a0, a2, a0
; RV64I-NEXT:    ret
  %cmp = call i1 @llvm.is.fpclass.f32(float %x, i32 639)
  ret i1 %cmp
}

define i1 @isnan_fpclass(float %x) {
; RV32IF-LABEL: isnan_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    andi a0, a0, 768
; RV32IF-NEXT:    snez a0, a0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: isnan_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    andi a0, a0, 768
; RV32IZFINX-NEXT:    snez a0, a0
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: isnan_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    andi a0, a0, 768
; RV64IF-NEXT:    snez a0, a0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: isnan_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    andi a0, a0, 768
; RV64IZFINX-NEXT:    snez a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: isnan_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 1
; RV32I-NEXT:    srli a0, a0, 1
; RV32I-NEXT:    lui a1, 522240
; RV32I-NEXT:    slt a0, a1, a0
; RV32I-NEXT:    ret
;
; RV64I-LABEL: isnan_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    lui a1, 522240
; RV64I-NEXT:    slt a0, a1, a0
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 3)  ; nan
  ret i1 %1
}

define i1 @isqnan_fpclass(float %x) {
; RV32IF-LABEL: isqnan_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    srli a0, a0, 9
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: isqnan_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    srli a0, a0, 9
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: isqnan_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    srli a0, a0, 9
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: isqnan_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    srli a0, a0, 9
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: isqnan_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 1
; RV32I-NEXT:    srli a0, a0, 1
; RV32I-NEXT:    lui a1, 523264
; RV32I-NEXT:    addi a1, a1, -1
; RV32I-NEXT:    slt a0, a1, a0
; RV32I-NEXT:    ret
;
; RV64I-LABEL: isqnan_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    lui a1, 523264
; RV64I-NEXT:    addiw a1, a1, -1
; RV64I-NEXT:    slt a0, a1, a0
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 2)  ; qnan
  ret i1 %1
}

define i1 @issnan_fpclass(float %x) {
; RV32IF-LABEL: issnan_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    slli a0, a0, 23
; RV32IF-NEXT:    srli a0, a0, 31
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: issnan_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    slli a0, a0, 23
; RV32IZFINX-NEXT:    srli a0, a0, 31
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: issnan_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    slli a0, a0, 55
; RV64IF-NEXT:    srli a0, a0, 63
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: issnan_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    slli a0, a0, 55
; RV64IZFINX-NEXT:    srli a0, a0, 63
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: issnan_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 1
; RV32I-NEXT:    srli a0, a0, 1
; RV32I-NEXT:    lui a1, 523264
; RV32I-NEXT:    slt a1, a0, a1
; RV32I-NEXT:    lui a2, 522240
; RV32I-NEXT:    slt a0, a2, a0
; RV32I-NEXT:    and a0, a0, a1
; RV32I-NEXT:    ret
;
; RV64I-LABEL: issnan_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    lui a1, 523264
; RV64I-NEXT:    slt a1, a0, a1
; RV64I-NEXT:    lui a2, 522240
; RV64I-NEXT:    slt a0, a2, a0
; RV64I-NEXT:    and a0, a0, a1
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 1)  ; snan
  ret i1 %1
}

define i1 @isinf_fpclass(float %x) {
; RV32IF-LABEL: isinf_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    andi a0, a0, 129
; RV32IF-NEXT:    snez a0, a0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: isinf_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    andi a0, a0, 129
; RV32IZFINX-NEXT:    snez a0, a0
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: isinf_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    andi a0, a0, 129
; RV64IF-NEXT:    snez a0, a0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: isinf_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    andi a0, a0, 129
; RV64IZFINX-NEXT:    snez a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: isinf_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 1
; RV32I-NEXT:    srli a0, a0, 1
; RV32I-NEXT:    lui a1, 522240
; RV32I-NEXT:    xor a0, a0, a1
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    ret
;
; RV64I-LABEL: isinf_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    lui a1, 522240
; RV64I-NEXT:    xor a0, a0, a1
; RV64I-NEXT:    seqz a0, a0
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 516)  ; 0x204 = "inf"
  ret i1 %1
}

define i1 @isposinf_fpclass(float %x) {
; RV32IF-LABEL: isposinf_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    slli a0, a0, 24
; RV32IF-NEXT:    srli a0, a0, 31
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: isposinf_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    slli a0, a0, 24
; RV32IZFINX-NEXT:    srli a0, a0, 31
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: isposinf_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    slli a0, a0, 56
; RV64IF-NEXT:    srli a0, a0, 63
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: isposinf_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    slli a0, a0, 56
; RV64IZFINX-NEXT:    srli a0, a0, 63
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: isposinf_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lui a1, 522240
; RV32I-NEXT:    xor a0, a0, a1
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    ret
;
; RV64I-LABEL: isposinf_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    sext.w a0, a0
; RV64I-NEXT:    lui a1, 522240
; RV64I-NEXT:    xor a0, a0, a1
; RV64I-NEXT:    seqz a0, a0
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 512)  ; 0x200 = "+inf"
  ret i1 %1
}

define i1 @isneginf_fpclass(float %x) {
; RV32IF-LABEL: isneginf_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    andi a0, a0, 1
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: isneginf_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    andi a0, a0, 1
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: isneginf_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    andi a0, a0, 1
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: isneginf_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    andi a0, a0, 1
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: isneginf_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lui a1, 1046528
; RV32I-NEXT:    xor a0, a0, a1
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    ret
;
; RV64I-LABEL: isneginf_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    sext.w a0, a0
; RV64I-NEXT:    lui a1, 1046528
; RV64I-NEXT:    xor a0, a0, a1
; RV64I-NEXT:    seqz a0, a0
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 4)  ; "-inf"
  ret i1 %1
}

define i1 @isfinite_fpclass(float %x) {
; RV32IF-LABEL: isfinite_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    andi a0, a0, 126
; RV32IF-NEXT:    snez a0, a0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: isfinite_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    andi a0, a0, 126
; RV32IZFINX-NEXT:    snez a0, a0
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: isfinite_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    andi a0, a0, 126
; RV64IF-NEXT:    snez a0, a0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: isfinite_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    andi a0, a0, 126
; RV64IZFINX-NEXT:    snez a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: isfinite_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 1
; RV32I-NEXT:    srli a0, a0, 1
; RV32I-NEXT:    lui a1, 522240
; RV32I-NEXT:    slt a0, a0, a1
; RV32I-NEXT:    ret
;
; RV64I-LABEL: isfinite_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    lui a1, 522240
; RV64I-NEXT:    slt a0, a0, a1
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 504)  ; 0x1f8 = "finite"
  ret i1 %1
}

define i1 @isposfinite_fpclass(float %x) {
; RV32IF-LABEL: isposfinite_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    andi a0, a0, 112
; RV32IF-NEXT:    snez a0, a0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: isposfinite_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    andi a0, a0, 112
; RV32IZFINX-NEXT:    snez a0, a0
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: isposfinite_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    andi a0, a0, 112
; RV64IF-NEXT:    snez a0, a0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: isposfinite_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    andi a0, a0, 112
; RV64IZFINX-NEXT:    snez a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: isposfinite_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    srli a0, a0, 23
; RV32I-NEXT:    sltiu a0, a0, 255
; RV32I-NEXT:    ret
;
; RV64I-LABEL: isposfinite_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a0, a0, 23
; RV64I-NEXT:    sltiu a0, a0, 255
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 448)  ; 0x1c0 = "+finite"
  ret i1 %1
}

define i1 @isnegfinite_fpclass(float %x) {
; RV32IF-LABEL: isnegfinite_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    andi a0, a0, 14
; RV32IF-NEXT:    snez a0, a0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: isnegfinite_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    andi a0, a0, 14
; RV32IZFINX-NEXT:    snez a0, a0
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: isnegfinite_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    andi a0, a0, 14
; RV64IF-NEXT:    snez a0, a0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: isnegfinite_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    andi a0, a0, 14
; RV64IZFINX-NEXT:    snez a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: isnegfinite_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a1, a0, 1
; RV32I-NEXT:    srli a1, a1, 1
; RV32I-NEXT:    lui a2, 522240
; RV32I-NEXT:    slt a1, a1, a2
; RV32I-NEXT:    slti a0, a0, 0
; RV32I-NEXT:    and a0, a1, a0
; RV32I-NEXT:    ret
;
; RV64I-LABEL: isnegfinite_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    sext.w a1, a0
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    lui a2, 522240
; RV64I-NEXT:    slt a0, a0, a2
; RV64I-NEXT:    slti a1, a1, 0
; RV64I-NEXT:    and a0, a0, a1
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 56)  ; 0x38 = "-finite"
  ret i1 %1
}

define i1 @isnotfinite_fpclass(float %x) {
; RV32IF-LABEL: isnotfinite_fpclass:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fclass.s a0, fa0
; RV32IF-NEXT:    andi a0, a0, 897
; RV32IF-NEXT:    snez a0, a0
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: isnotfinite_fpclass:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fclass.s a0, a0
; RV32IZFINX-NEXT:    andi a0, a0, 897
; RV32IZFINX-NEXT:    snez a0, a0
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: isnotfinite_fpclass:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fclass.s a0, fa0
; RV64IF-NEXT:    andi a0, a0, 897
; RV64IF-NEXT:    snez a0, a0
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: isnotfinite_fpclass:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fclass.s a0, a0
; RV64IZFINX-NEXT:    andi a0, a0, 897
; RV64IZFINX-NEXT:    snez a0, a0
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: isnotfinite_fpclass:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 1
; RV32I-NEXT:    srli a0, a0, 1
; RV32I-NEXT:    lui a1, 522240
; RV32I-NEXT:    addi a1, a1, -1
; RV32I-NEXT:    slt a0, a1, a0
; RV32I-NEXT:    ret
;
; RV64I-LABEL: isnotfinite_fpclass:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 33
; RV64I-NEXT:    srli a0, a0, 33
; RV64I-NEXT:    lui a1, 522240
; RV64I-NEXT:    addiw a1, a1, -1
; RV64I-NEXT:    slt a0, a1, a0
; RV64I-NEXT:    ret
  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 519)  ; ox207 = "inf|nan"
  ret i1 %1
}

declare float @llvm.maximumnum.f32(float, float)

define float @maximumnum_float(float %x, float %y) {
; RV32IF-LABEL: maximumnum_float:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fmax.s fa0, fa0, fa1
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: maximumnum_float:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fmax.s a0, a0, a1
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: maximumnum_float:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fmax.s fa0, fa0, fa1
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: maximumnum_float:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fmax.s a0, a0, a1
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: maximumnum_float:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    .cfi_def_cfa_offset 16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    .cfi_offset ra, -4
; RV32I-NEXT:    call fmaximum_numf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: maximumnum_float:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    .cfi_def_cfa_offset 16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    .cfi_offset ra, -8
; RV64I-NEXT:    call fmaximum_numf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %z = call float @llvm.maximumnum.f32(float %x, float %y)
  ret float %z
}

declare float @llvm.minimumnum.f32(float, float)

define float @minimumnum_float(float %x, float %y) {
; RV32IF-LABEL: minimumnum_float:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    fmin.s fa0, fa0, fa1
; RV32IF-NEXT:    ret
;
; RV32IZFINX-LABEL: minimumnum_float:
; RV32IZFINX:       # %bb.0:
; RV32IZFINX-NEXT:    fmin.s a0, a0, a1
; RV32IZFINX-NEXT:    ret
;
; RV64IF-LABEL: minimumnum_float:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    fmin.s fa0, fa0, fa1
; RV64IF-NEXT:    ret
;
; RV64IZFINX-LABEL: minimumnum_float:
; RV64IZFINX:       # %bb.0:
; RV64IZFINX-NEXT:    fmin.s a0, a0, a1
; RV64IZFINX-NEXT:    ret
;
; RV32I-LABEL: minimumnum_float:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    .cfi_def_cfa_offset 16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    .cfi_offset ra, -4
; RV32I-NEXT:    call fminimum_numf
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: minimumnum_float:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    .cfi_def_cfa_offset 16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    .cfi_offset ra, -8
; RV64I-NEXT:    call fminimum_numf
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
  %z = call float @llvm.minimumnum.f32(float %x, float %y)
  ret float %z
}