llvm/llvm/test/CodeGen/Mips/mips64-f128.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips4 -mattr=+soft-float -O1 \
; RUN:     -disable-mips-delay-filler -relocation-model=pic < %s | FileCheck \
; RUN:     %s -check-prefixes=ALL,C_CC_FMT,PRER6,NOT-R2R6
; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64 -mattr=+soft-float -O1 \
; RUN:     -disable-mips-delay-filler -relocation-model=pic < %s | FileCheck \
; RUN:     %s -check-prefixes=ALL,C_CC_FMT,PRER6,NOT-R2R6
; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r2 -mattr=+soft-float \
; RUN:     -O1 -disable-mips-delay-filler -relocation-model=pic < %s | FileCheck \
; RUN:     %s -check-prefixes=ALL,C_CC_FMT,PRER6,R2R6
; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r6 -mattr=+soft-float \
; RUN:     -O1 -disable-mips-delay-filler -relocation-model=pic < %s | FileCheck \
; RUN:     %s -check-prefixes=ALL,CMP_CC_FMT,R6,R2R6

@gld0 = external global fp128
@gld1 = external global fp128
@gld2 = external global fp128
@gf1 = external global float
@gd1 = external global double

define fp128 @addLD() {
; C_CC_FMT-LABEL: addLD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(addLD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(addLD)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; C_CC_FMT-NEXT:    ld $6, 0($1)
; C_CC_FMT-NEXT:    ld $7, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(__addtf3)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp0, R_MIPS_JALR, __addtf3
; C_CC_FMT-NEXT:  .Ltmp0:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: addLD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(addLD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(addLD)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; CMP_CC_FMT-NEXT:    ld $6, 0($1)
; CMP_CC_FMT-NEXT:    ld $7, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(__addtf3)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp0, R_MIPS_JALR, __addtf3
; CMP_CC_FMT-NEXT:  .Ltmp0:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %1 = load fp128, ptr @gld1, align 16
  %add = fadd fp128 %0, %1
  ret fp128 %add
}

define fp128 @subLD() {
; C_CC_FMT-LABEL: subLD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(subLD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(subLD)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; C_CC_FMT-NEXT:    ld $6, 0($1)
; C_CC_FMT-NEXT:    ld $7, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(__subtf3)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp1, R_MIPS_JALR, __subtf3
; C_CC_FMT-NEXT:  .Ltmp1:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: subLD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(subLD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(subLD)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; CMP_CC_FMT-NEXT:    ld $6, 0($1)
; CMP_CC_FMT-NEXT:    ld $7, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(__subtf3)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp1, R_MIPS_JALR, __subtf3
; CMP_CC_FMT-NEXT:  .Ltmp1:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %1 = load fp128, ptr @gld1, align 16
  %sub = fsub fp128 %0, %1
  ret fp128 %sub
}

define fp128 @mulLD() {
; C_CC_FMT-LABEL: mulLD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(mulLD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(mulLD)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; C_CC_FMT-NEXT:    ld $6, 0($1)
; C_CC_FMT-NEXT:    ld $7, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(__multf3)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp2, R_MIPS_JALR, __multf3
; C_CC_FMT-NEXT:  .Ltmp2:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: mulLD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(mulLD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(mulLD)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; CMP_CC_FMT-NEXT:    ld $6, 0($1)
; CMP_CC_FMT-NEXT:    ld $7, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(__multf3)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp2, R_MIPS_JALR, __multf3
; CMP_CC_FMT-NEXT:  .Ltmp2:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %1 = load fp128, ptr @gld1, align 16
  %mul = fmul fp128 %0, %1
  ret fp128 %mul
}

define fp128 @divLD() {
; C_CC_FMT-LABEL: divLD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(divLD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(divLD)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; C_CC_FMT-NEXT:    ld $6, 0($1)
; C_CC_FMT-NEXT:    ld $7, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(__divtf3)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp3, R_MIPS_JALR, __divtf3
; C_CC_FMT-NEXT:  .Ltmp3:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: divLD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(divLD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(divLD)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; CMP_CC_FMT-NEXT:    ld $6, 0($1)
; CMP_CC_FMT-NEXT:    ld $7, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(__divtf3)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp3, R_MIPS_JALR, __divtf3
; CMP_CC_FMT-NEXT:  .Ltmp3:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %1 = load fp128, ptr @gld1, align 16
  %div = fdiv fp128 %0, %1
  ret fp128 %div
}

define fp128 @conv_LD_char(i8 signext %a) {
; C_CC_FMT-LABEL: conv_LD_char:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_char)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_char)))
; C_CC_FMT-NEXT:    ld $25, %call16(__floatsitf)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp4, R_MIPS_JALR, __floatsitf
; C_CC_FMT-NEXT:  .Ltmp4:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_char:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_char)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_char)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__floatsitf)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp4, R_MIPS_JALR, __floatsitf
; CMP_CC_FMT-NEXT:  .Ltmp4:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = sitofp i8 %a to fp128
  ret fp128 %conv
}

define fp128 @conv_LD_short(i16 signext %a) {
; C_CC_FMT-LABEL: conv_LD_short:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_short)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_short)))
; C_CC_FMT-NEXT:    ld $25, %call16(__floatsitf)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp5, R_MIPS_JALR, __floatsitf
; C_CC_FMT-NEXT:  .Ltmp5:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_short:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_short)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_short)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__floatsitf)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp5, R_MIPS_JALR, __floatsitf
; CMP_CC_FMT-NEXT:  .Ltmp5:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = sitofp i16 %a to fp128
  ret fp128 %conv
}

define fp128 @conv_LD_int(i32 %a) {
; C_CC_FMT-LABEL: conv_LD_int:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_int)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_int)))
; C_CC_FMT-NEXT:    sll $4, $4, 0
; C_CC_FMT-NEXT:    ld $25, %call16(__floatsitf)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp6, R_MIPS_JALR, __floatsitf
; C_CC_FMT-NEXT:  .Ltmp6:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_int:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_int)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_int)))
; CMP_CC_FMT-NEXT:    sll $4, $4, 0
; CMP_CC_FMT-NEXT:    ld $25, %call16(__floatsitf)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp6, R_MIPS_JALR, __floatsitf
; CMP_CC_FMT-NEXT:  .Ltmp6:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = sitofp i32 %a to fp128
  ret fp128 %conv
}

define fp128 @conv_LD_LL(i64 %a) {
; C_CC_FMT-LABEL: conv_LD_LL:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_LL)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_LL)))
; C_CC_FMT-NEXT:    ld $25, %call16(__floatditf)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp7, R_MIPS_JALR, __floatditf
; C_CC_FMT-NEXT:  .Ltmp7:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_LL:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_LL)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_LL)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__floatditf)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp7, R_MIPS_JALR, __floatditf
; CMP_CC_FMT-NEXT:  .Ltmp7:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = sitofp i64 %a to fp128
  ret fp128 %conv
}

define fp128 @conv_LD_UChar(i8 zeroext %a) {
; C_CC_FMT-LABEL: conv_LD_UChar:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_UChar)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_UChar)))
; C_CC_FMT-NEXT:    ld $25, %call16(__floatunsitf)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp8, R_MIPS_JALR, __floatunsitf
; C_CC_FMT-NEXT:  .Ltmp8:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_UChar:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_UChar)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_UChar)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__floatunsitf)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp8, R_MIPS_JALR, __floatunsitf
; CMP_CC_FMT-NEXT:  .Ltmp8:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = uitofp i8 %a to fp128
  ret fp128 %conv
}

define fp128 @conv_LD_UShort(i16 zeroext %a) {
; C_CC_FMT-LABEL: conv_LD_UShort:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_UShort)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_UShort)))
; C_CC_FMT-NEXT:    ld $25, %call16(__floatunsitf)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp9, R_MIPS_JALR, __floatunsitf
; C_CC_FMT-NEXT:  .Ltmp9:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_UShort:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_UShort)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_UShort)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__floatunsitf)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp9, R_MIPS_JALR, __floatunsitf
; CMP_CC_FMT-NEXT:  .Ltmp9:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = uitofp i16 %a to fp128
  ret fp128 %conv
}

define fp128 @conv_LD_UInt(i32 signext %a) {
; C_CC_FMT-LABEL: conv_LD_UInt:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_UInt)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_UInt)))
; C_CC_FMT-NEXT:    ld $25, %call16(__floatunsitf)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp10, R_MIPS_JALR, __floatunsitf
; C_CC_FMT-NEXT:  .Ltmp10:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_UInt:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_UInt)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_UInt)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__floatunsitf)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp10, R_MIPS_JALR, __floatunsitf
; CMP_CC_FMT-NEXT:  .Ltmp10:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = uitofp i32 %a to fp128
  ret fp128 %conv
}

define fp128 @conv_LD_ULL(i64 %a) {
; C_CC_FMT-LABEL: conv_LD_ULL:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_ULL)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_ULL)))
; C_CC_FMT-NEXT:    ld $25, %call16(__floatunditf)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp11, R_MIPS_JALR, __floatunditf
; C_CC_FMT-NEXT:  .Ltmp11:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_ULL:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_ULL)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_ULL)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__floatunditf)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp11, R_MIPS_JALR, __floatunditf
; CMP_CC_FMT-NEXT:  .Ltmp11:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = uitofp i64 %a to fp128
  ret fp128 %conv
}

define signext i8 @conv_char_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_char_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_char_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_char_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp12, R_MIPS_JALR, __fixtfsi
; C_CC_FMT-NEXT:  .Ltmp12:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_char_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_char_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_char_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp12, R_MIPS_JALR, __fixtfsi
; CMP_CC_FMT-NEXT:  .Ltmp12:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptosi fp128 %a to i8
  ret i8 %conv
}

define signext i16 @conv_short_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_short_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_short_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_short_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp13, R_MIPS_JALR, __fixtfsi
; C_CC_FMT-NEXT:  .Ltmp13:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_short_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_short_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_short_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp13, R_MIPS_JALR, __fixtfsi
; CMP_CC_FMT-NEXT:  .Ltmp13:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptosi fp128 %a to i16
  ret i16 %conv
}

define i32 @conv_int_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_int_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_int_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_int_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp14, R_MIPS_JALR, __fixtfsi
; C_CC_FMT-NEXT:  .Ltmp14:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_int_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_int_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_int_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp14, R_MIPS_JALR, __fixtfsi
; CMP_CC_FMT-NEXT:  .Ltmp14:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptosi fp128 %a to i32
  ret i32 %conv
}

define i64 @conv_LL_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_LL_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LL_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LL_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__fixtfdi)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp15, R_MIPS_JALR, __fixtfdi
; C_CC_FMT-NEXT:  .Ltmp15:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LL_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LL_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LL_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__fixtfdi)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp15, R_MIPS_JALR, __fixtfdi
; CMP_CC_FMT-NEXT:  .Ltmp15:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptosi fp128 %a to i64
  ret i64 %conv
}

define zeroext i8 @conv_UChar_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_UChar_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_UChar_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_UChar_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp16, R_MIPS_JALR, __fixtfsi
; C_CC_FMT-NEXT:  .Ltmp16:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_UChar_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_UChar_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_UChar_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp16, R_MIPS_JALR, __fixtfsi
; CMP_CC_FMT-NEXT:  .Ltmp16:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptoui fp128 %a to i8
  ret i8 %conv
}

define zeroext i16 @conv_UShort_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_UShort_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_UShort_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_UShort_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp17, R_MIPS_JALR, __fixtfsi
; C_CC_FMT-NEXT:  .Ltmp17:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_UShort_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_UShort_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_UShort_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__fixtfsi)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp17, R_MIPS_JALR, __fixtfsi
; CMP_CC_FMT-NEXT:  .Ltmp17:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptoui fp128 %a to i16
  ret i16 %conv
}

define i32 @conv_UInt_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_UInt_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_UInt_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_UInt_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__fixunstfsi)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp18, R_MIPS_JALR, __fixunstfsi
; C_CC_FMT-NEXT:  .Ltmp18:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_UInt_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_UInt_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_UInt_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__fixunstfsi)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp18, R_MIPS_JALR, __fixunstfsi
; CMP_CC_FMT-NEXT:  .Ltmp18:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptoui fp128 %a to i32
  ret i32 %conv
}

define i64 @conv_ULL_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_ULL_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_ULL_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_ULL_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__fixunstfdi)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp19, R_MIPS_JALR, __fixunstfdi
; C_CC_FMT-NEXT:  .Ltmp19:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_ULL_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_ULL_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_ULL_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__fixunstfdi)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp19, R_MIPS_JALR, __fixunstfdi
; CMP_CC_FMT-NEXT:  .Ltmp19:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptoui fp128 %a to i64
  ret i64 %conv
}

define fp128 @conv_LD_float(float %a) {
; C_CC_FMT-LABEL: conv_LD_float:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_float)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_float)))
; C_CC_FMT-NEXT:    ld $25, %call16(__extendsftf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp20, R_MIPS_JALR, __extendsftf2
; C_CC_FMT-NEXT:  .Ltmp20:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_float:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_float)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_float)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__extendsftf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp20, R_MIPS_JALR, __extendsftf2
; CMP_CC_FMT-NEXT:  .Ltmp20:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fpext float %a to fp128
  ret fp128 %conv
}

define fp128 @conv_LD_double(double %a) {
; C_CC_FMT-LABEL: conv_LD_double:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_double)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_double)))
; C_CC_FMT-NEXT:    ld $25, %call16(__extenddftf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp21, R_MIPS_JALR, __extenddftf2
; C_CC_FMT-NEXT:  .Ltmp21:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_LD_double:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_LD_double)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_LD_double)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__extenddftf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp21, R_MIPS_JALR, __extenddftf2
; CMP_CC_FMT-NEXT:  .Ltmp21:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fpext double %a to fp128
  ret fp128 %conv
}

define float @conv_float_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_float_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_float_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_float_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__trunctfsf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp22, R_MIPS_JALR, __trunctfsf2
; C_CC_FMT-NEXT:  .Ltmp22:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_float_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_float_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_float_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__trunctfsf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp22, R_MIPS_JALR, __trunctfsf2
; CMP_CC_FMT-NEXT:  .Ltmp22:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptrunc fp128 %a to float
  ret float %conv
}

define double @conv_double_LD(fp128 %a) {
; C_CC_FMT-LABEL: conv_double_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_double_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_double_LD)))
; C_CC_FMT-NEXT:    ld $25, %call16(__trunctfdf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp23, R_MIPS_JALR, __trunctfdf2
; C_CC_FMT-NEXT:  .Ltmp23:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: conv_double_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(conv_double_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(conv_double_LD)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__trunctfdf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp23, R_MIPS_JALR, __trunctfdf2
; CMP_CC_FMT-NEXT:  .Ltmp23:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %conv = fptrunc fp128 %a to double
  ret double %conv
}

define fp128 @libcall1_fabsl() {
; NOT-R2R6-LABEL: libcall1_fabsl:
; NOT-R2R6:       # %bb.0: # %entry
; NOT-R2R6-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_fabsl)))
; NOT-R2R6-NEXT:    daddu $1, $1, $25
; NOT-R2R6-NEXT:    daddiu $1, $1, %lo(%neg(%gp_rel(libcall1_fabsl)))
; NOT-R2R6-NEXT:    ld $1, %got_disp(gld0)($1)
; NOT-R2R6-NEXT:    ld $2, 8($1)
; NOT-R2R6-NEXT:    daddiu $3, $zero, 1
; NOT-R2R6-NEXT:    dsll $3, $3, 63
; NOT-R2R6-NEXT:    daddiu $3, $3, -1
; NOT-R2R6-NEXT:    and $4, $2, $3
; NOT-R2R6-NEXT:    ld $2, 0($1)
; NOT-R2R6-NEXT:    jr $ra
; NOT-R2R6-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_fabsl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_fabsl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $1, $1, %lo(%neg(%gp_rel(libcall1_fabsl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($1)
; CMP_CC_FMT-NEXT:    ld $2, 0($1)
; CMP_CC_FMT-NEXT:    ld $1, 8($1)
; CMP_CC_FMT-NEXT:    dextm $4, $1, 0, 63
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @fabsl(fp128 %0) nounwind readnone
  ret fp128 %call
}

declare fp128 @fabsl(fp128) #1

define fp128 @libcall1_ceill() {
; C_CC_FMT-LABEL: libcall1_ceill:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_ceill)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_ceill)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(ceill)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp24, R_MIPS_JALR, ceill
; C_CC_FMT-NEXT:  .Ltmp24:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_ceill:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_ceill)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_ceill)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(ceill)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp24, R_MIPS_JALR, ceill
; CMP_CC_FMT-NEXT:  .Ltmp24:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @ceill(fp128 %0) nounwind readnone
  ret fp128 %call
}

declare fp128 @ceill(fp128) #1

define fp128 @libcall1_sinl() {
; C_CC_FMT-LABEL: libcall1_sinl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_sinl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_sinl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(sinl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp25, R_MIPS_JALR, sinl
; C_CC_FMT-NEXT:  .Ltmp25:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_sinl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_sinl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_sinl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(sinl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp25, R_MIPS_JALR, sinl
; CMP_CC_FMT-NEXT:  .Ltmp25:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @sinl(fp128 %0) nounwind
  ret fp128 %call
}

declare fp128 @sinl(fp128) #2

define fp128 @libcall1_cosl() {
; C_CC_FMT-LABEL: libcall1_cosl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_cosl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_cosl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(cosl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp26, R_MIPS_JALR, cosl
; C_CC_FMT-NEXT:  .Ltmp26:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_cosl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_cosl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_cosl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(cosl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp26, R_MIPS_JALR, cosl
; CMP_CC_FMT-NEXT:  .Ltmp26:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @cosl(fp128 %0) nounwind
  ret fp128 %call
}

declare fp128 @cosl(fp128) #2

define fp128 @libcall1_expl() {
; C_CC_FMT-LABEL: libcall1_expl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_expl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_expl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(expl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp27, R_MIPS_JALR, expl
; C_CC_FMT-NEXT:  .Ltmp27:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_expl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_expl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_expl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(expl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp27, R_MIPS_JALR, expl
; CMP_CC_FMT-NEXT:  .Ltmp27:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @expl(fp128 %0) nounwind
  ret fp128 %call
}

declare fp128 @expl(fp128) #2

define fp128 @libcall1_exp2l() {
; C_CC_FMT-LABEL: libcall1_exp2l:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_exp2l)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_exp2l)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(exp2l)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp28, R_MIPS_JALR, exp2l
; C_CC_FMT-NEXT:  .Ltmp28:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_exp2l:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_exp2l)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_exp2l)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(exp2l)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp28, R_MIPS_JALR, exp2l
; CMP_CC_FMT-NEXT:  .Ltmp28:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @exp2l(fp128 %0) nounwind
  ret fp128 %call
}

declare fp128 @exp2l(fp128) #2

define fp128 @libcall1_logl() {
; C_CC_FMT-LABEL: libcall1_logl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_logl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_logl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(logl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp29, R_MIPS_JALR, logl
; C_CC_FMT-NEXT:  .Ltmp29:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_logl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_logl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_logl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(logl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp29, R_MIPS_JALR, logl
; CMP_CC_FMT-NEXT:  .Ltmp29:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @logl(fp128 %0) nounwind
  ret fp128 %call
}

declare fp128 @logl(fp128) #2

define fp128 @libcall1_log2l() {
; C_CC_FMT-LABEL: libcall1_log2l:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_log2l)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_log2l)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(log2l)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp30, R_MIPS_JALR, log2l
; C_CC_FMT-NEXT:  .Ltmp30:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_log2l:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_log2l)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_log2l)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(log2l)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp30, R_MIPS_JALR, log2l
; CMP_CC_FMT-NEXT:  .Ltmp30:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @log2l(fp128 %0) nounwind
  ret fp128 %call
}

declare fp128 @log2l(fp128) #2

define fp128 @libcall1_log10l() {
; C_CC_FMT-LABEL: libcall1_log10l:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_log10l)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_log10l)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(log10l)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp31, R_MIPS_JALR, log10l
; C_CC_FMT-NEXT:  .Ltmp31:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_log10l:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_log10l)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_log10l)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(log10l)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp31, R_MIPS_JALR, log10l
; CMP_CC_FMT-NEXT:  .Ltmp31:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @log10l(fp128 %0) nounwind
  ret fp128 %call
}

declare fp128 @log10l(fp128) #2

define fp128 @libcall1_nearbyintl() {
; C_CC_FMT-LABEL: libcall1_nearbyintl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_nearbyintl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_nearbyintl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(nearbyintl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp32, R_MIPS_JALR, nearbyintl
; C_CC_FMT-NEXT:  .Ltmp32:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_nearbyintl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_nearbyintl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_nearbyintl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(nearbyintl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp32, R_MIPS_JALR, nearbyintl
; CMP_CC_FMT-NEXT:  .Ltmp32:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @nearbyintl(fp128 %0) nounwind readnone
  ret fp128 %call
}

declare fp128 @nearbyintl(fp128) #1

define fp128 @libcall1_floorl() {
; C_CC_FMT-LABEL: libcall1_floorl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_floorl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_floorl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(floorl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp33, R_MIPS_JALR, floorl
; C_CC_FMT-NEXT:  .Ltmp33:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_floorl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_floorl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_floorl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(floorl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp33, R_MIPS_JALR, floorl
; CMP_CC_FMT-NEXT:  .Ltmp33:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @floorl(fp128 %0) nounwind readnone
  ret fp128 %call
}

declare fp128 @floorl(fp128) #1

define fp128 @libcall1_sqrtl() {
; C_CC_FMT-LABEL: libcall1_sqrtl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_sqrtl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_sqrtl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(sqrtl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp34, R_MIPS_JALR, sqrtl
; C_CC_FMT-NEXT:  .Ltmp34:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_sqrtl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_sqrtl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_sqrtl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(sqrtl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp34, R_MIPS_JALR, sqrtl
; CMP_CC_FMT-NEXT:  .Ltmp34:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @sqrtl(fp128 %0) nounwind
  ret fp128 %call
}

declare fp128 @sqrtl(fp128) #2

define fp128 @libcall1_rintl() {
; C_CC_FMT-LABEL: libcall1_rintl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_rintl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_rintl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(rintl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp35, R_MIPS_JALR, rintl
; C_CC_FMT-NEXT:  .Ltmp35:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall1_rintl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall1_rintl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall1_rintl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(rintl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp35, R_MIPS_JALR, rintl
; CMP_CC_FMT-NEXT:  .Ltmp35:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %call = tail call fp128 @rintl(fp128 %0) nounwind readnone
  ret fp128 %call
}

declare fp128 @rintl(fp128) #1

define fp128 @libcall_powil(fp128 %a, i32 %b) {
; C_CC_FMT-LABEL: libcall_powil:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall_powil)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall_powil)))
; C_CC_FMT-NEXT:    sll $6, $6, 0
; C_CC_FMT-NEXT:    ld $25, %call16(__powitf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp36, R_MIPS_JALR, __powitf2
; C_CC_FMT-NEXT:  .Ltmp36:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall_powil:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall_powil)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall_powil)))
; CMP_CC_FMT-NEXT:    sll $6, $6, 0
; CMP_CC_FMT-NEXT:    ld $25, %call16(__powitf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp36, R_MIPS_JALR, __powitf2
; CMP_CC_FMT-NEXT:  .Ltmp36:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = tail call fp128 @llvm.powi.f128.i32(fp128 %a, i32 %b)
  ret fp128 %0
}

declare fp128 @llvm.powi.f128.i32(fp128, i32) #3

define fp128 @libcall2_copysignl() {
; NOT-R2R6-LABEL: libcall2_copysignl:
; NOT-R2R6:       # %bb.0: # %entry
; NOT-R2R6-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall2_copysignl)))
; NOT-R2R6-NEXT:    daddu $1, $1, $25
; NOT-R2R6-NEXT:    daddiu $1, $1, %lo(%neg(%gp_rel(libcall2_copysignl)))
; NOT-R2R6-NEXT:    daddiu $2, $zero, 1
; NOT-R2R6-NEXT:    dsll $2, $2, 63
; NOT-R2R6-NEXT:    ld $3, %got_disp(gld1)($1)
; NOT-R2R6-NEXT:    ld $3, 8($3)
; NOT-R2R6-NEXT:    and $3, $3, $2
; NOT-R2R6-NEXT:    ld $1, %got_disp(gld0)($1)
; NOT-R2R6-NEXT:    ld $4, 8($1)
; NOT-R2R6-NEXT:    daddiu $2, $2, -1
; NOT-R2R6-NEXT:    and $2, $4, $2
; NOT-R2R6-NEXT:    or $4, $2, $3
; NOT-R2R6-NEXT:    ld $2, 0($1)
; NOT-R2R6-NEXT:    jr $ra
; NOT-R2R6-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall2_copysignl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall2_copysignl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $1, $1, %lo(%neg(%gp_rel(libcall2_copysignl)))
; CMP_CC_FMT-NEXT:    ld $2, %got_disp(gld0)($1)
; CMP_CC_FMT-NEXT:    ld $4, 8($2)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($1)
; CMP_CC_FMT-NEXT:    ld $1, 8($1)
; CMP_CC_FMT-NEXT:    dsrl $1, $1, 63
; CMP_CC_FMT-NEXT:    dinsu $4, $1, 63, 1
; CMP_CC_FMT-NEXT:    ld $2, 0($2)
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %1 = load fp128, ptr @gld1, align 16
  %call = tail call fp128 @copysignl(fp128 %0, fp128 %1) nounwind readnone
  ret fp128 %call
}

declare fp128 @copysignl(fp128, fp128) #1

define fp128 @libcall2_powl() {
; C_CC_FMT-LABEL: libcall2_powl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall2_powl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall2_powl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; C_CC_FMT-NEXT:    ld $7, 8($1)
; C_CC_FMT-NEXT:    ld $6, 0($1)
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(powl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp37, R_MIPS_JALR, powl
; C_CC_FMT-NEXT:  .Ltmp37:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall2_powl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall2_powl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall2_powl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; CMP_CC_FMT-NEXT:    ld $7, 8($1)
; CMP_CC_FMT-NEXT:    ld $6, 0($1)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(powl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp37, R_MIPS_JALR, powl
; CMP_CC_FMT-NEXT:  .Ltmp37:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %1 = load fp128, ptr @gld1, align 16
  %call = tail call fp128 @powl(fp128 %0, fp128 %1) nounwind
  ret fp128 %call
}

declare fp128 @powl(fp128, fp128) #2

define fp128 @libcall2_fmodl() {
; C_CC_FMT-LABEL: libcall2_fmodl:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall2_fmodl)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall2_fmodl)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; C_CC_FMT-NEXT:    ld $7, 8($1)
; C_CC_FMT-NEXT:    ld $6, 0($1)
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(fmodl)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp38, R_MIPS_JALR, fmodl
; C_CC_FMT-NEXT:  .Ltmp38:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall2_fmodl:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall2_fmodl)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall2_fmodl)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; CMP_CC_FMT-NEXT:    ld $7, 8($1)
; CMP_CC_FMT-NEXT:    ld $6, 0($1)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(fmodl)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp38, R_MIPS_JALR, fmodl
; CMP_CC_FMT-NEXT:  .Ltmp38:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %1 = load fp128, ptr @gld1, align 16
  %call = tail call fp128 @fmodl(fp128 %0, fp128 %1) nounwind
  ret fp128 %call
}

declare fp128 @fmodl(fp128, fp128) #2

define fp128 @libcall3_fmal() {
; C_CC_FMT-LABEL: libcall3_fmal:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall3_fmal)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall3_fmal)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; C_CC_FMT-NEXT:    ld $6, 0($1)
; C_CC_FMT-NEXT:    ld $7, 8($1)
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld2)($gp)
; C_CC_FMT-NEXT:    ld $8, 0($1)
; C_CC_FMT-NEXT:    ld $9, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(fmal)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp39, R_MIPS_JALR, fmal
; C_CC_FMT-NEXT:  .Ltmp39:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: libcall3_fmal:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(libcall3_fmal)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(libcall3_fmal)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; CMP_CC_FMT-NEXT:    ld $6, 0($1)
; CMP_CC_FMT-NEXT:    ld $7, 8($1)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld2)($gp)
; CMP_CC_FMT-NEXT:    ld $8, 0($1)
; CMP_CC_FMT-NEXT:    ld $9, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(fmal)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp39, R_MIPS_JALR, fmal
; CMP_CC_FMT-NEXT:  .Ltmp39:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld0, align 16
  %1 = load fp128, ptr @gld2, align 16
  %2 = load fp128, ptr @gld1, align 16
  %3 = tail call fp128 @llvm.fma.f128(fp128 %0, fp128 %2, fp128 %1)
  ret fp128 %3
}

declare fp128 @llvm.fma.f128(fp128, fp128, fp128) #4

define i32 @cmp_lt(fp128 %a, fp128 %b) {
; C_CC_FMT-LABEL: cmp_lt:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_lt)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_lt)))
; C_CC_FMT-NEXT:    ld $25, %call16(__lttf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp40, R_MIPS_JALR, __lttf2
; C_CC_FMT-NEXT:  .Ltmp40:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    slti $2, $2, 0
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: cmp_lt:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_lt)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_lt)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__lttf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp40, R_MIPS_JALR, __lttf2
; CMP_CC_FMT-NEXT:  .Ltmp40:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    slti $2, $2, 0
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %cmp = fcmp olt fp128 %a, %b
  %conv = zext i1 %cmp to i32
  ret i32 %conv
}

define i32 @cmp_le(fp128 %a, fp128 %b) {
; C_CC_FMT-LABEL: cmp_le:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_le)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_le)))
; C_CC_FMT-NEXT:    ld $25, %call16(__letf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp41, R_MIPS_JALR, __letf2
; C_CC_FMT-NEXT:  .Ltmp41:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    slti $2, $2, 1
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: cmp_le:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_le)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_le)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__letf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp41, R_MIPS_JALR, __letf2
; CMP_CC_FMT-NEXT:  .Ltmp41:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    slti $2, $2, 1
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %cmp = fcmp ole fp128 %a, %b
  %conv = zext i1 %cmp to i32
  ret i32 %conv
}

define i32 @cmp_gt(fp128 %a, fp128 %b) {
; C_CC_FMT-LABEL: cmp_gt:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_gt)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_gt)))
; C_CC_FMT-NEXT:    ld $25, %call16(__gttf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp42, R_MIPS_JALR, __gttf2
; C_CC_FMT-NEXT:  .Ltmp42:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    slt $2, $zero, $2
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: cmp_gt:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_gt)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_gt)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__gttf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp42, R_MIPS_JALR, __gttf2
; CMP_CC_FMT-NEXT:  .Ltmp42:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    slt $2, $zero, $2
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %cmp = fcmp ogt fp128 %a, %b
  %conv = zext i1 %cmp to i32
  ret i32 %conv
}

define i32 @cmp_ge(fp128 %a, fp128 %b) {
; C_CC_FMT-LABEL: cmp_ge:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_ge)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_ge)))
; C_CC_FMT-NEXT:    ld $25, %call16(__getf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp43, R_MIPS_JALR, __getf2
; C_CC_FMT-NEXT:  .Ltmp43:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    addiu $1, $zero, -1
; C_CC_FMT-NEXT:    slt $2, $1, $2
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: cmp_ge:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_ge)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_ge)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__getf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp43, R_MIPS_JALR, __getf2
; CMP_CC_FMT-NEXT:  .Ltmp43:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    addiu $1, $zero, -1
; CMP_CC_FMT-NEXT:    slt $2, $1, $2
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %cmp = fcmp oge fp128 %a, %b
  %conv = zext i1 %cmp to i32
  ret i32 %conv
}

define i32 @cmp_eq(fp128 %a, fp128 %b) {
; C_CC_FMT-LABEL: cmp_eq:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_eq)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_eq)))
; C_CC_FMT-NEXT:    ld $25, %call16(__eqtf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp44, R_MIPS_JALR, __eqtf2
; C_CC_FMT-NEXT:  .Ltmp44:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    sltiu $2, $2, 1
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: cmp_eq:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_eq)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_eq)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__eqtf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp44, R_MIPS_JALR, __eqtf2
; CMP_CC_FMT-NEXT:  .Ltmp44:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    sltiu $2, $2, 1
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %cmp = fcmp oeq fp128 %a, %b
  %conv = zext i1 %cmp to i32
  ret i32 %conv
}

define i32 @cmp_ne(fp128 %a, fp128 %b) {
; C_CC_FMT-LABEL: cmp_ne:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_ne)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_ne)))
; C_CC_FMT-NEXT:    ld $25, %call16(__netf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp45, R_MIPS_JALR, __netf2
; C_CC_FMT-NEXT:  .Ltmp45:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    sltu $2, $zero, $2
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: cmp_ne:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(cmp_ne)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(cmp_ne)))
; CMP_CC_FMT-NEXT:    ld $25, %call16(__netf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp45, R_MIPS_JALR, __netf2
; CMP_CC_FMT-NEXT:  .Ltmp45:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    sltu $2, $zero, $2
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %cmp = fcmp une fp128 %a, %b
  %conv = zext i1 %cmp to i32
  ret i32 %conv
}

define fp128 @load_LD_LD() {
; C_CC_FMT-LABEL: load_LD_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(load_LD_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $1, $1, %lo(%neg(%gp_rel(load_LD_LD)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($1)
; C_CC_FMT-NEXT:    ld $2, 0($1)
; C_CC_FMT-NEXT:    ld $4, 8($1)
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: load_LD_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(load_LD_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $1, $1, %lo(%neg(%gp_rel(load_LD_LD)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($1)
; CMP_CC_FMT-NEXT:    ld $2, 0($1)
; CMP_CC_FMT-NEXT:    ld $4, 8($1)
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld1, align 16
  ret fp128 %0
}

define fp128 @load_LD_float() {
; C_CC_FMT-LABEL: load_LD_float:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(load_LD_float)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(load_LD_float)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gf1)($gp)
; C_CC_FMT-NEXT:    lw $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(__extendsftf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp46, R_MIPS_JALR, __extendsftf2
; C_CC_FMT-NEXT:  .Ltmp46:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: load_LD_float:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(load_LD_float)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(load_LD_float)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gf1)($gp)
; CMP_CC_FMT-NEXT:    lw $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(__extendsftf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp46, R_MIPS_JALR, __extendsftf2
; CMP_CC_FMT-NEXT:  .Ltmp46:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load float, ptr @gf1, align 4
  %conv = fpext float %0 to fp128
  ret fp128 %conv
}

define fp128 @load_LD_double() {
; C_CC_FMT-LABEL: load_LD_double:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(load_LD_double)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(load_LD_double)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gd1)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $25, %call16(__extenddftf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp47, R_MIPS_JALR, __extenddftf2
; C_CC_FMT-NEXT:  .Ltmp47:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: load_LD_double:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(load_LD_double)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(load_LD_double)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gd1)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(__extenddftf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp47, R_MIPS_JALR, __extenddftf2
; CMP_CC_FMT-NEXT:  .Ltmp47:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load double, ptr @gd1, align 8
  %conv = fpext double %0 to fp128
  ret fp128 %conv
}

define void @store_LD_LD() {
; C_CC_FMT-LABEL: store_LD_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(store_LD_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $1, $1, %lo(%neg(%gp_rel(store_LD_LD)))
; C_CC_FMT-NEXT:    ld $2, %got_disp(gld1)($1)
; C_CC_FMT-NEXT:    ld $3, 8($2)
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($1)
; C_CC_FMT-NEXT:    sd $3, 8($1)
; C_CC_FMT-NEXT:    ld $2, 0($2)
; C_CC_FMT-NEXT:    sd $2, 0($1)
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: store_LD_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(store_LD_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $1, $1, %lo(%neg(%gp_rel(store_LD_LD)))
; CMP_CC_FMT-NEXT:    ld $2, %got_disp(gld1)($1)
; CMP_CC_FMT-NEXT:    ld $3, 8($2)
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld0)($1)
; CMP_CC_FMT-NEXT:    sd $3, 8($1)
; CMP_CC_FMT-NEXT:    ld $2, 0($2)
; CMP_CC_FMT-NEXT:    sd $2, 0($1)
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld1, align 16
  store fp128 %0, ptr @gld0, align 16
  ret void
}

define void @store_LD_float() {
; C_CC_FMT-LABEL: store_LD_float:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(store_LD_float)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(store_LD_float)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(__trunctfsf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp48, R_MIPS_JALR, __trunctfsf2
; C_CC_FMT-NEXT:  .Ltmp48:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $1, %got_disp(gf1)($gp)
; C_CC_FMT-NEXT:    sw $2, 0($1)
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: store_LD_float:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(store_LD_float)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(store_LD_float)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(__trunctfsf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp48, R_MIPS_JALR, __trunctfsf2
; CMP_CC_FMT-NEXT:  .Ltmp48:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gf1)($gp)
; CMP_CC_FMT-NEXT:    sw $2, 0($1)
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld1, align 16
  %conv = fptrunc fp128 %0 to float
  store float %conv, ptr @gf1, align 4
  ret void
}

; FIXME: This sll works around an implementation detail in the code generator
;        (setcc's result is i32 so bits 32-63 are undefined). It's not really
;        needed.
define void @store_LD_double() {
; C_CC_FMT-LABEL: store_LD_double:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; C_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(store_LD_double)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(store_LD_double)))
; C_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; C_CC_FMT-NEXT:    ld $4, 0($1)
; C_CC_FMT-NEXT:    ld $5, 8($1)
; C_CC_FMT-NEXT:    ld $25, %call16(__trunctfdf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp49, R_MIPS_JALR, __trunctfdf2
; C_CC_FMT-NEXT:  .Ltmp49:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    ld $1, %got_disp(gd1)($gp)
; C_CC_FMT-NEXT:    sd $2, 0($1)
; C_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: store_LD_double:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -16
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 16
; CMP_CC_FMT-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(store_LD_double)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(store_LD_double)))
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gld1)($gp)
; CMP_CC_FMT-NEXT:    ld $4, 0($1)
; CMP_CC_FMT-NEXT:    ld $5, 8($1)
; CMP_CC_FMT-NEXT:    ld $25, %call16(__trunctfdf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp49, R_MIPS_JALR, __trunctfdf2
; CMP_CC_FMT-NEXT:  .Ltmp49:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    ld $1, %got_disp(gd1)($gp)
; CMP_CC_FMT-NEXT:    sd $2, 0($1)
; CMP_CC_FMT-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 16
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %0 = load fp128, ptr @gld1, align 16
  %conv = fptrunc fp128 %0 to double
  store double %conv, ptr @gd1, align 8
  ret void
}

define fp128 @select_LD(i32 signext %a, i64, fp128 %b, fp128 %c) {
; C_CC_FMT-LABEL: select_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    movn $8, $6, $4
; C_CC_FMT-NEXT:    movn $9, $7, $4
; C_CC_FMT-NEXT:    move $2, $8
; C_CC_FMT-NEXT:    move $4, $9
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: select_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    sll $1, $4, 0
; CMP_CC_FMT-NEXT:    seleqz $2, $8, $1
; CMP_CC_FMT-NEXT:    selnez $3, $6, $1
; CMP_CC_FMT-NEXT:    or $2, $3, $2
; CMP_CC_FMT-NEXT:    seleqz $3, $9, $1
; CMP_CC_FMT-NEXT:    selnez $1, $7, $1
; CMP_CC_FMT-NEXT:    or $4, $1, $3
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %tobool = icmp ne i32 %a, 0
  %cond = select i1 %tobool, fp128 %b, fp128 %c
  ret fp128 %cond
}

define fp128 @selectCC_LD(fp128 %a, fp128 %b, fp128 %c, fp128 %d) {
; C_CC_FMT-LABEL: selectCC_LD:
; C_CC_FMT:       # %bb.0: # %entry
; C_CC_FMT-NEXT:    daddiu $sp, $sp, -48
; C_CC_FMT-NEXT:    .cfi_def_cfa_offset 48
; C_CC_FMT-NEXT:    sd $ra, 40($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $gp, 32($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $19, 24($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $18, 16($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $17, 8($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
; C_CC_FMT-NEXT:    .cfi_offset 31, -8
; C_CC_FMT-NEXT:    .cfi_offset 28, -16
; C_CC_FMT-NEXT:    .cfi_offset 19, -24
; C_CC_FMT-NEXT:    .cfi_offset 18, -32
; C_CC_FMT-NEXT:    .cfi_offset 17, -40
; C_CC_FMT-NEXT:    .cfi_offset 16, -48
; C_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(selectCC_LD)))
; C_CC_FMT-NEXT:    daddu $1, $1, $25
; C_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(selectCC_LD)))
; C_CC_FMT-NEXT:    move $16, $11
; C_CC_FMT-NEXT:    move $17, $10
; C_CC_FMT-NEXT:    move $18, $9
; C_CC_FMT-NEXT:    move $19, $8
; C_CC_FMT-NEXT:    ld $25, %call16(__gttf2)($gp)
; C_CC_FMT-NEXT:    .reloc .Ltmp50, R_MIPS_JALR, __gttf2
; C_CC_FMT-NEXT:  .Ltmp50:
; C_CC_FMT-NEXT:    jalr $25
; C_CC_FMT-NEXT:    nop
; C_CC_FMT-NEXT:    slti $1, $2, 1
; C_CC_FMT-NEXT:    movz $17, $19, $1
; C_CC_FMT-NEXT:    movz $16, $18, $1
; C_CC_FMT-NEXT:    move $2, $17
; C_CC_FMT-NEXT:    move $4, $16
; C_CC_FMT-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $17, 8($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $18, 16($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $19, 24($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $gp, 32($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    ld $ra, 40($sp) # 8-byte Folded Reload
; C_CC_FMT-NEXT:    daddiu $sp, $sp, 48
; C_CC_FMT-NEXT:    jr $ra
; C_CC_FMT-NEXT:    nop
;
; CMP_CC_FMT-LABEL: selectCC_LD:
; CMP_CC_FMT:       # %bb.0: # %entry
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, -48
; CMP_CC_FMT-NEXT:    .cfi_def_cfa_offset 48
; CMP_CC_FMT-NEXT:    sd $ra, 40($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $gp, 32($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $19, 24($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $18, 16($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $17, 8($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
; CMP_CC_FMT-NEXT:    .cfi_offset 31, -8
; CMP_CC_FMT-NEXT:    .cfi_offset 28, -16
; CMP_CC_FMT-NEXT:    .cfi_offset 19, -24
; CMP_CC_FMT-NEXT:    .cfi_offset 18, -32
; CMP_CC_FMT-NEXT:    .cfi_offset 17, -40
; CMP_CC_FMT-NEXT:    .cfi_offset 16, -48
; CMP_CC_FMT-NEXT:    lui $1, %hi(%neg(%gp_rel(selectCC_LD)))
; CMP_CC_FMT-NEXT:    daddu $1, $1, $25
; CMP_CC_FMT-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(selectCC_LD)))
; CMP_CC_FMT-NEXT:    move $16, $11
; CMP_CC_FMT-NEXT:    move $17, $10
; CMP_CC_FMT-NEXT:    move $18, $9
; CMP_CC_FMT-NEXT:    move $19, $8
; CMP_CC_FMT-NEXT:    ld $25, %call16(__gttf2)($gp)
; CMP_CC_FMT-NEXT:    .reloc .Ltmp50, R_MIPS_JALR, __gttf2
; CMP_CC_FMT-NEXT:  .Ltmp50:
; CMP_CC_FMT-NEXT:    jalrc $25
; CMP_CC_FMT-NEXT:    slt $1, $zero, $2
; CMP_CC_FMT-NEXT:    sll $1, $1, 0
; CMP_CC_FMT-NEXT:    seleqz $2, $17, $1
; CMP_CC_FMT-NEXT:    selnez $3, $19, $1
; CMP_CC_FMT-NEXT:    or $2, $3, $2
; CMP_CC_FMT-NEXT:    seleqz $3, $16, $1
; CMP_CC_FMT-NEXT:    selnez $1, $18, $1
; CMP_CC_FMT-NEXT:    or $4, $1, $3
; CMP_CC_FMT-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $17, 8($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $18, 16($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $19, 24($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $gp, 32($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    ld $ra, 40($sp) # 8-byte Folded Reload
; CMP_CC_FMT-NEXT:    daddiu $sp, $sp, 48
; CMP_CC_FMT-NEXT:    jrc $ra
entry:
  %cmp = fcmp ogt fp128 %a, %b
  %cond = select i1 %cmp, fp128 %c, fp128 %d
  ret fp128 %cond
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; ALL: {{.*}}
; PRER6: {{.*}}
; R2R6: {{.*}}
; R6: {{.*}}