; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
define float @convert_double_to_float(double %a) nounwind {
; LA32-LABEL: convert_double_to_float:
; LA32: # %bb.0:
; LA32-NEXT: fcvt.s.d $fa0, $fa0
; LA32-NEXT: ret
;
; LA64-LABEL: convert_double_to_float:
; LA64: # %bb.0:
; LA64-NEXT: fcvt.s.d $fa0, $fa0
; LA64-NEXT: ret
%1 = fptrunc double %a to float
ret float %1
}
define double @convert_float_to_double(float %a) nounwind {
; LA32-LABEL: convert_float_to_double:
; LA32: # %bb.0:
; LA32-NEXT: fcvt.d.s $fa0, $fa0
; LA32-NEXT: ret
;
; LA64-LABEL: convert_float_to_double:
; LA64: # %bb.0:
; LA64-NEXT: fcvt.d.s $fa0, $fa0
; LA64-NEXT: ret
%1 = fpext float %a to double
ret double %1
}
define double @convert_i8_to_double(i8 signext %a) nounwind {
; LA32-LABEL: convert_i8_to_double:
; LA32: # %bb.0:
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: ffint.d.w $fa0, $fa0
; LA32-NEXT: ret
;
; LA64-LABEL: convert_i8_to_double:
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: ffint.d.w $fa0, $fa0
; LA64-NEXT: ret
%1 = sitofp i8 %a to double
ret double %1
}
define double @convert_i16_to_double(i16 signext %a) nounwind {
; LA32-LABEL: convert_i16_to_double:
; LA32: # %bb.0:
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: ffint.d.w $fa0, $fa0
; LA32-NEXT: ret
;
; LA64-LABEL: convert_i16_to_double:
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: ffint.d.w $fa0, $fa0
; LA64-NEXT: ret
%1 = sitofp i16 %a to double
ret double %1
}
define double @convert_i32_to_double(i32 %a) nounwind {
; LA32-LABEL: convert_i32_to_double:
; LA32: # %bb.0:
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: ffint.d.w $fa0, $fa0
; LA32-NEXT: ret
;
; LA64-LABEL: convert_i32_to_double:
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: ffint.d.w $fa0, $fa0
; LA64-NEXT: ret
%1 = sitofp i32 %a to double
ret double %1
}
define double @convert_i64_to_double(i64 %a) nounwind {
; LA32-LABEL: convert_i64_to_double:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: bl %plt(__floatdidf)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: convert_i64_to_double:
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.d $fa0, $a0
; LA64-NEXT: ffint.d.l $fa0, $fa0
; LA64-NEXT: ret
%1 = sitofp i64 %a to double
ret double %1
}
define i32 @convert_double_to_i32(double %a) nounwind {
; LA32-LABEL: convert_double_to_i32:
; LA32: # %bb.0:
; LA32-NEXT: ftintrz.w.d $fa0, $fa0
; LA32-NEXT: movfr2gr.s $a0, $fa0
; LA32-NEXT: ret
;
; LA64-LABEL: convert_double_to_i32:
; LA64: # %bb.0:
; LA64-NEXT: ftintrz.w.d $fa0, $fa0
; LA64-NEXT: movfr2gr.s $a0, $fa0
; LA64-NEXT: ret
%1 = fptosi double %a to i32
ret i32 %1
}
define i32 @convert_double_to_u32(double %a) nounwind {
; LA32-LABEL: convert_double_to_u32:
; LA32: # %bb.0:
; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0)
; LA32-NEXT: fld.d $fa1, $a0, %pc_lo12(.LCPI7_0)
; LA32-NEXT: fcmp.clt.d $fcc0, $fa0, $fa1
; LA32-NEXT: fsub.d $fa1, $fa0, $fa1
; LA32-NEXT: ftintrz.w.d $fa1, $fa1
; LA32-NEXT: movfr2gr.s $a0, $fa1
; LA32-NEXT: lu12i.w $a1, -524288
; LA32-NEXT: xor $a0, $a0, $a1
; LA32-NEXT: movcf2gr $a1, $fcc0
; LA32-NEXT: masknez $a0, $a0, $a1
; LA32-NEXT: ftintrz.w.d $fa0, $fa0
; LA32-NEXT: movfr2gr.s $a2, $fa0
; LA32-NEXT: maskeqz $a1, $a2, $a1
; LA32-NEXT: or $a0, $a1, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: convert_double_to_u32:
; LA64: # %bb.0:
; LA64-NEXT: ftintrz.l.d $fa0, $fa0
; LA64-NEXT: movfr2gr.d $a0, $fa0
; LA64-NEXT: ret
%1 = fptoui double %a to i32
ret i32 %1
}
define i64 @convert_double_to_i64(double %a) nounwind {
; LA32-LABEL: convert_double_to_i64:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: bl %plt(__fixdfdi)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: convert_double_to_i64:
; LA64: # %bb.0:
; LA64-NEXT: ftintrz.l.d $fa0, $fa0
; LA64-NEXT: movfr2gr.d $a0, $fa0
; LA64-NEXT: ret
%1 = fptosi double %a to i64
ret i64 %1
}
define i64 @convert_double_to_u64(double %a) nounwind {
; LA32-LABEL: convert_double_to_u64:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: bl %plt(__fixunsdfdi)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: convert_double_to_u64:
; LA64: # %bb.0:
; LA64-NEXT: pcalau12i $a0, %pc_hi20(.LCPI9_0)
; LA64-NEXT: fld.d $fa1, $a0, %pc_lo12(.LCPI9_0)
; LA64-NEXT: fcmp.clt.d $fcc0, $fa0, $fa1
; LA64-NEXT: fsub.d $fa1, $fa0, $fa1
; LA64-NEXT: ftintrz.l.d $fa1, $fa1
; LA64-NEXT: movfr2gr.d $a0, $fa1
; LA64-NEXT: lu52i.d $a1, $zero, -2048
; LA64-NEXT: xor $a0, $a0, $a1
; LA64-NEXT: movcf2gr $a1, $fcc0
; LA64-NEXT: masknez $a0, $a0, $a1
; LA64-NEXT: ftintrz.l.d $fa0, $fa0
; LA64-NEXT: movfr2gr.d $a2, $fa0
; LA64-NEXT: maskeqz $a1, $a2, $a1
; LA64-NEXT: or $a0, $a1, $a0
; LA64-NEXT: ret
%1 = fptoui double %a to i64
ret i64 %1
}
define double @convert_u8_to_double(i8 zeroext %a) nounwind {
; LA32-LABEL: convert_u8_to_double:
; LA32: # %bb.0:
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: ffint.d.w $fa0, $fa0
; LA32-NEXT: ret
;
; LA64-LABEL: convert_u8_to_double:
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: ffint.d.w $fa0, $fa0
; LA64-NEXT: ret
%1 = uitofp i8 %a to double
ret double %1
}
define double @convert_u16_to_double(i16 zeroext %a) nounwind {
; LA32-LABEL: convert_u16_to_double:
; LA32: # %bb.0:
; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: ffint.d.w $fa0, $fa0
; LA32-NEXT: ret
;
; LA64-LABEL: convert_u16_to_double:
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: ffint.d.w $fa0, $fa0
; LA64-NEXT: ret
%1 = uitofp i16 %a to double
ret double %1
}
define double @convert_u32_to_double(i32 %a) nounwind {
; LA32-LABEL: convert_u32_to_double:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: lu12i.w $a1, 275200
; LA32-NEXT: st.w $a1, $sp, 12
; LA32-NEXT: st.w $a0, $sp, 8
; LA32-NEXT: fld.d $fa0, $sp, 8
; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI12_0)
; LA32-NEXT: fld.d $fa1, $a0, %pc_lo12(.LCPI12_0)
; LA32-NEXT: fsub.d $fa0, $fa0, $fa1
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: convert_u32_to_double:
; LA64: # %bb.0:
; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
; LA64-NEXT: movgr2fr.d $fa0, $a0
; LA64-NEXT: ffint.d.l $fa0, $fa0
; LA64-NEXT: ret
%1 = uitofp i32 %a to double
ret double %1
}
define double @convert_u64_to_double(i64 %a) nounwind {
; LA32-LABEL: convert_u64_to_double:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: bl %plt(__floatundidf)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: convert_u64_to_double:
; LA64: # %bb.0:
; LA64-NEXT: srli.d $a1, $a0, 32
; LA64-NEXT: pcalau12i $a2, %pc_hi20(.LCPI13_0)
; LA64-NEXT: fld.d $fa0, $a2, %pc_lo12(.LCPI13_0)
; LA64-NEXT: lu52i.d $a2, $zero, 1107
; LA64-NEXT: or $a1, $a1, $a2
; LA64-NEXT: movgr2fr.d $fa1, $a1
; LA64-NEXT: fsub.d $fa0, $fa1, $fa0
; LA64-NEXT: lu12i.w $a1, 275200
; LA64-NEXT: bstrins.d $a0, $a1, 63, 32
; LA64-NEXT: movgr2fr.d $fa1, $a0
; LA64-NEXT: fadd.d $fa0, $fa1, $fa0
; LA64-NEXT: ret
%1 = uitofp i64 %a to double
ret double %1
}
define double @bitcast_i64_to_double(i64 %a, i64 %b) nounwind {
; LA32-LABEL: bitcast_i64_to_double:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $a1, $sp, 12
; LA32-NEXT: st.w $a0, $sp, 8
; LA32-NEXT: fld.d $fa0, $sp, 8
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: bitcast_i64_to_double:
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.d $fa0, $a0
; LA64-NEXT: ret
%1 = bitcast i64 %a to double
ret double %1
}
define i64 @bitcast_double_to_i64(double %a) nounwind {
; LA32-LABEL: bitcast_double_to_i64:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: fst.d $fa0, $sp, 8
; LA32-NEXT: ld.w $a0, $sp, 8
; LA32-NEXT: ld.w $a1, $sp, 12
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: bitcast_double_to_i64:
; LA64: # %bb.0:
; LA64-NEXT: movfr2gr.d $a0, $fa0
; LA64-NEXT: ret
%1 = bitcast double %a to i64
ret i64 %1
}