llvm/llvm/test/CodeGen/X86/uint64-to-float.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=i686-windows -mattr=+sse2 | FileCheck %s --check-prefix=X86-WIN
; RUN: llc < %s -mtriple=x86_64-windows -mattr=+sse2 | FileCheck %s --check-prefix=X64-WIN

; Verify that we are using the efficient uitofp --> sitofp lowering illustrated
; by the compiler_rt implementation of __floatundisf.
; <rdar://problem/8493982>

define float @test(i64 %a) nounwind {
; X86-LABEL: test:
; X86:       # %bb.0: # %entry
; X86-NEXT:    pushl %ebp
; X86-NEXT:    movl %esp, %ebp
; X86-NEXT:    andl $-8, %esp
; X86-NEXT:    subl $16, %esp
; X86-NEXT:    movl 12(%ebp), %eax
; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT:    movlps %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT:    shrl $31, %eax
; X86-NEXT:    fildll {{[0-9]+}}(%esp)
; X86-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-NEXT:    fstps {{[0-9]+}}(%esp)
; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT:    movss %xmm0, (%esp)
; X86-NEXT:    flds (%esp)
; X86-NEXT:    movl %ebp, %esp
; X86-NEXT:    popl %ebp
; X86-NEXT:    retl
;
; X64-LABEL: test:
; X64:       # %bb.0: # %entry
; X64-NEXT:    testq %rdi, %rdi
; X64-NEXT:    js .LBB0_1
; X64-NEXT:  # %bb.2: # %entry
; X64-NEXT:    cvtsi2ss %rdi, %xmm0
; X64-NEXT:    retq
; X64-NEXT:  .LBB0_1:
; X64-NEXT:    movq %rdi, %rax
; X64-NEXT:    shrq %rax
; X64-NEXT:    andl $1, %edi
; X64-NEXT:    orq %rax, %rdi
; X64-NEXT:    cvtsi2ss %rdi, %xmm0
; X64-NEXT:    addss %xmm0, %xmm0
; X64-NEXT:    retq
;
; X86-WIN-LABEL: test:
; X86-WIN:       # %bb.0: # %entry
; X86-WIN-NEXT:    pushl %ebp
; X86-WIN-NEXT:    movl %esp, %ebp
; X86-WIN-NEXT:    andl $-8, %esp
; X86-WIN-NEXT:    subl $24, %esp
; X86-WIN-NEXT:    movl 12(%ebp), %eax
; X86-WIN-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-WIN-NEXT:    movlps %xmm0, {{[0-9]+}}(%esp)
; X86-WIN-NEXT:    shrl $31, %eax
; X86-WIN-NEXT:    fildll {{[0-9]+}}(%esp)
; X86-WIN-NEXT:    fnstcw {{[0-9]+}}(%esp)
; X86-WIN-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
; X86-WIN-NEXT:    orl $768, %ecx # imm = 0x300
; X86-WIN-NEXT:    movw %cx, {{[0-9]+}}(%esp)
; X86-WIN-NEXT:    fldcw {{[0-9]+}}(%esp)
; X86-WIN-NEXT:    fadds __real@5f80000000000000(,%eax,4)
; X86-WIN-NEXT:    fldcw {{[0-9]+}}(%esp)
; X86-WIN-NEXT:    fstps {{[0-9]+}}(%esp)
; X86-WIN-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-WIN-NEXT:    movss %xmm0, {{[0-9]+}}(%esp)
; X86-WIN-NEXT:    flds {{[0-9]+}}(%esp)
; X86-WIN-NEXT:    movl %ebp, %esp
; X86-WIN-NEXT:    popl %ebp
; X86-WIN-NEXT:    retl
;
; X64-WIN-LABEL: test:
; X64-WIN:       # %bb.0: # %entry
; X64-WIN-NEXT:    testq %rcx, %rcx
; X64-WIN-NEXT:    js .LBB0_1
; X64-WIN-NEXT:  # %bb.2: # %entry
; X64-WIN-NEXT:    cvtsi2ss %rcx, %xmm0
; X64-WIN-NEXT:    retq
; X64-WIN-NEXT:  .LBB0_1:
; X64-WIN-NEXT:    movq %rcx, %rax
; X64-WIN-NEXT:    shrq %rax
; X64-WIN-NEXT:    andl $1, %ecx
; X64-WIN-NEXT:    orq %rax, %rcx
; X64-WIN-NEXT:    cvtsi2ss %rcx, %xmm0
; X64-WIN-NEXT:    addss %xmm0, %xmm0
; X64-WIN-NEXT:    retq
entry:
  %b = uitofp i64 %a to float
  ret float %b
}