llvm/llvm/test/CodeGen/X86/i128-fpconv-win64-strict.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
; RUN: llc < %s -mtriple=x86_64-mingw32 | FileCheck %s -check-prefix=WIN64

define i64 @double_to_i128(double %d) nounwind strictfp {
; WIN64-LABEL: double_to_i128:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $40, %rsp
; WIN64-NEXT:    callq __fixdfti
; WIN64-NEXT:    movq %xmm0, %rax
; WIN64-NEXT:    addq $40, %rsp
; WIN64-NEXT:    retq
  %1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f64(double %d, metadata !"fpexcept.strict")
  %2 = trunc i128 %1 to i64
  ret i64 %2
}

define i64 @double_to_ui128(double %d) nounwind strictfp {
; WIN64-LABEL: double_to_ui128:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $40, %rsp
; WIN64-NEXT:    callq __fixunsdfti
; WIN64-NEXT:    movq %xmm0, %rax
; WIN64-NEXT:    addq $40, %rsp
; WIN64-NEXT:    retq
  %1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f64(double %d, metadata !"fpexcept.strict")
  %2 = trunc i128 %1 to i64
  ret i64 %2
}

define i64 @float_to_i128(float %d) nounwind strictfp {
; WIN64-LABEL: float_to_i128:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $40, %rsp
; WIN64-NEXT:    callq __fixsfti
; WIN64-NEXT:    movq %xmm0, %rax
; WIN64-NEXT:    addq $40, %rsp
; WIN64-NEXT:    retq
  %1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f32(float %d, metadata !"fpexcept.strict")
  %2 = trunc i128 %1 to i64
  ret i64 %2
}

define i64 @float_to_ui128(float %d) nounwind strictfp {
; WIN64-LABEL: float_to_ui128:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $40, %rsp
; WIN64-NEXT:    callq __fixunssfti
; WIN64-NEXT:    movq %xmm0, %rax
; WIN64-NEXT:    addq $40, %rsp
; WIN64-NEXT:    retq
  %1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f32(float %d, metadata !"fpexcept.strict")
  %2 = trunc i128 %1 to i64
  ret i64 %2
}

define i64 @longdouble_to_i128(ptr nocapture readonly %0) nounwind strictfp {
; WIN64-LABEL: longdouble_to_i128:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $56, %rsp
; WIN64-NEXT:    fldt (%rcx)
; WIN64-NEXT:    fstpt {{[0-9]+}}(%rsp)
; WIN64-NEXT:    wait
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
; WIN64-NEXT:    callq __fixxfti
; WIN64-NEXT:    movq %xmm0, %rax
; WIN64-NEXT:    addq $56, %rsp
; WIN64-NEXT:    retq
  %2 = load x86_fp80, ptr %0, align 16
  %3 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
  %4 = trunc i128 %3 to i64
  ret i64 %4
}

define i64 @longdouble_to_ui128(ptr nocapture readonly %0) nounwind strictfp {
; WIN64-LABEL: longdouble_to_ui128:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $56, %rsp
; WIN64-NEXT:    fldt (%rcx)
; WIN64-NEXT:    fstpt {{[0-9]+}}(%rsp)
; WIN64-NEXT:    wait
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
; WIN64-NEXT:    callq __fixunsxfti
; WIN64-NEXT:    movq %xmm0, %rax
; WIN64-NEXT:    addq $56, %rsp
; WIN64-NEXT:    retq
  %2 = load x86_fp80, ptr %0, align 16
  %3 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
  %4 = trunc i128 %3 to i64
  ret i64 %4
}

define double @i128_to_double(ptr nocapture readonly %0) nounwind strictfp {
; WIN64-LABEL: i128_to_double:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $56, %rsp
; WIN64-NEXT:    movaps (%rcx), %xmm0
; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
; WIN64-NEXT:    callq __floattidf
; WIN64-NEXT:    addq $56, %rsp
; WIN64-NEXT:    retq
  %2 = load i128, ptr %0, align 16
  %3 = tail call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
  ret double %3
}

define double @ui128_to_double(ptr nocapture readonly %0) nounwind strictfp {
; WIN64-LABEL: ui128_to_double:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $56, %rsp
; WIN64-NEXT:    movaps (%rcx), %xmm0
; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
; WIN64-NEXT:    callq __floatuntidf
; WIN64-NEXT:    addq $56, %rsp
; WIN64-NEXT:    retq
  %2 = load i128, ptr %0, align 16
  %3 = tail call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
  ret double %3
}

define float @i128_to_float(ptr nocapture readonly %0) nounwind strictfp {
; WIN64-LABEL: i128_to_float:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $56, %rsp
; WIN64-NEXT:    movaps (%rcx), %xmm0
; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
; WIN64-NEXT:    callq __floattisf
; WIN64-NEXT:    addq $56, %rsp
; WIN64-NEXT:    retq
  %2 = load i128, ptr %0, align 16
  %3 = tail call float @llvm.experimental.constrained.sitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
  ret float %3
}

define float @ui128_to_float(ptr nocapture readonly %0) nounwind strictfp {
; WIN64-LABEL: ui128_to_float:
; WIN64:       # %bb.0:
; WIN64-NEXT:    subq $56, %rsp
; WIN64-NEXT:    movaps (%rcx), %xmm0
; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
; WIN64-NEXT:    callq __floatuntisf
; WIN64-NEXT:    addq $56, %rsp
; WIN64-NEXT:    retq
  %2 = load i128, ptr %0, align 16
  %3 = tail call float @llvm.experimental.constrained.uitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
  ret float %3
}

define void @i128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind strictfp {
; WIN64-LABEL: i128_to_longdouble:
; WIN64:       # %bb.0:
; WIN64-NEXT:    pushq %rsi
; WIN64-NEXT:    subq $64, %rsp
; WIN64-NEXT:    movq %rcx, %rsi
; WIN64-NEXT:    movaps (%rdx), %xmm0
; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
; WIN64-NEXT:    callq __floattixf
; WIN64-NEXT:    fldt {{[0-9]+}}(%rsp)
; WIN64-NEXT:    fstpt (%rsi)
; WIN64-NEXT:    wait
; WIN64-NEXT:    movq %rsi, %rax
; WIN64-NEXT:    addq $64, %rsp
; WIN64-NEXT:    popq %rsi
; WIN64-NEXT:    retq
  %2 = load i128, ptr %0, align 16
  %3 = tail call x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
  store x86_fp80 %3, ptr %agg.result, align 16
  ret void
}

define void @ui128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind strictfp {
; WIN64-LABEL: ui128_to_longdouble:
; WIN64:       # %bb.0:
; WIN64-NEXT:    pushq %rsi
; WIN64-NEXT:    subq $64, %rsp
; WIN64-NEXT:    movq %rcx, %rsi
; WIN64-NEXT:    movaps (%rdx), %xmm0
; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
; WIN64-NEXT:    callq __floatuntixf
; WIN64-NEXT:    fldt {{[0-9]+}}(%rsp)
; WIN64-NEXT:    fstpt (%rsi)
; WIN64-NEXT:    wait
; WIN64-NEXT:    movq %rsi, %rax
; WIN64-NEXT:    addq $64, %rsp
; WIN64-NEXT:    popq %rsi
; WIN64-NEXT:    retq
  %2 = load i128, ptr %0, align 16
  %3 = tail call x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
  store x86_fp80 %3, ptr %agg.result, align 16
  ret void
}

declare i128 @llvm.experimental.constrained.fptosi.i128.f64(double, metadata)
declare i128 @llvm.experimental.constrained.fptoui.i128.f64(double, metadata)
declare i128 @llvm.experimental.constrained.fptosi.i128.f32(float, metadata)
declare i128 @llvm.experimental.constrained.fptoui.i128.f32(float, metadata)
declare i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80, metadata)
declare i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80, metadata)
declare double @llvm.experimental.constrained.sitofp.f64.i128(i128, metadata, metadata)
declare double @llvm.experimental.constrained.uitofp.f64.i128(i128, metadata, metadata)
declare float @llvm.experimental.constrained.sitofp.f32.i128(i128, metadata, metadata)
declare float @llvm.experimental.constrained.uitofp.f32.i128(i128, metadata, metadata)
declare x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128, metadata, metadata)
declare x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128, metadata, metadata)