llvm/llvm/test/CodeGen/AArch64/ldexp.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc -mtriple=aarch64 -mattr=+sve < %s -o - | FileCheck -check-prefixes=SVE,SVELINUX %s
; RUN: llc -mtriple=aarch64-windows-msvc -mattr=+sve < %s -o - | FileCheck -check-prefixes=SVE,SVEWINDOWS %s
; RUN: llc -mtriple=aarch64-windows-msvc < %s -o - | FileCheck -check-prefixes=WINDOWS %s

define double @testExp(double %val, i32 %a) {
; SVE-LABEL: testExp:
; SVE:       // %bb.0: // %entry
; SVE-NEXT:    // kill: def $w0 killed $w0 def $x0
; SVE-NEXT:    sxtw x8, w0
; SVE-NEXT:    ptrue p0.d
; SVE-NEXT:    // kill: def $d0 killed $d0 def $z0
; SVE-NEXT:    fmov d1, x8
; SVE-NEXT:    fscale z0.d, p0/m, z0.d, z1.d
; SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
; SVE-NEXT:    ret
;
; WINDOWS-LABEL: testExp:
; WINDOWS:       // %bb.0: // %entry
; WINDOWS-NEXT:    b ldexp
entry:
  %call = tail call fast double @ldexp(double %val, i32 %a)
  ret double %call
}

declare double @ldexp(double, i32) memory(none)

define double @testExpIntrinsic(double %val, i32 %a) {
; SVE-LABEL: testExpIntrinsic:
; SVE:       // %bb.0: // %entry
; SVE-NEXT:    // kill: def $w0 killed $w0 def $x0
; SVE-NEXT:    sxtw x8, w0
; SVE-NEXT:    ptrue p0.d
; SVE-NEXT:    // kill: def $d0 killed $d0 def $z0
; SVE-NEXT:    fmov d1, x8
; SVE-NEXT:    fscale z0.d, p0/m, z0.d, z1.d
; SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
; SVE-NEXT:    ret
;
; WINDOWS-LABEL: testExpIntrinsic:
; WINDOWS:       // %bb.0: // %entry
; WINDOWS-NEXT:    b ldexp
entry:
  %call = tail call fast double @llvm.ldexp.f64(double %val, i32 %a)
  ret double %call
}

define float @testExpf(float %val, i32 %a) {
; SVELINUX-LABEL: testExpf:
; SVELINUX:       // %bb.0: // %entry
; SVELINUX-NEXT:    fmov s1, w0
; SVELINUX-NEXT:    ptrue p0.s
; SVELINUX-NEXT:    // kill: def $s0 killed $s0 def $z0
; SVELINUX-NEXT:    fscale z0.s, p0/m, z0.s, z1.s
; SVELINUX-NEXT:    // kill: def $s0 killed $s0 killed $z0
; SVELINUX-NEXT:    ret
;
; SVEWINDOWS-LABEL: testExpf:
; SVEWINDOWS:       // %bb.0: // %entry
; SVEWINDOWS-NEXT:    b ldexpf
;
; WINDOWS-LABEL: testExpf:
; WINDOWS:       // %bb.0: // %entry
; WINDOWS-NEXT:    b ldexpf
entry:
  %call = tail call fast float @ldexpf(float %val, i32 %a)
  ret float %call
}

define float @testExpfIntrinsic(float %val, i32 %a) {
; SVE-LABEL: testExpfIntrinsic:
; SVE:       // %bb.0: // %entry
; SVE-NEXT:    fmov s1, w0
; SVE-NEXT:    ptrue p0.s
; SVE-NEXT:    // kill: def $s0 killed $s0 def $z0
; SVE-NEXT:    fscale z0.s, p0/m, z0.s, z1.s
; SVE-NEXT:    // kill: def $s0 killed $s0 killed $z0
; SVE-NEXT:    ret
;
; WINDOWS-LABEL: testExpfIntrinsic:
; WINDOWS:       .seh_proc testExpfIntrinsic
; WINDOWS-NEXT:  // %bb.0: // %entry
; WINDOWS-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
; WINDOWS-NEXT:    .seh_save_reg_x x30, 16
; WINDOWS-NEXT:    .seh_endprologue
; WINDOWS-NEXT:    fcvt d0, s0
; WINDOWS-NEXT:    bl ldexp
; WINDOWS-NEXT:    fcvt s0, d0
; WINDOWS-NEXT:    .seh_startepilogue
; WINDOWS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
; WINDOWS-NEXT:    .seh_save_reg_x x30, 16
; WINDOWS-NEXT:    .seh_endepilogue
; WINDOWS-NEXT:    ret
; WINDOWS-NEXT:    .seh_endfunclet
; WINDOWS-NEXT:    .seh_endproc
entry:
  %call = tail call fast float @llvm.ldexp.f32(float %val, i32 %a)
  ret float %call
}


declare float @ldexpf(float, i32) memory(none)

define fp128 @testExpl(fp128 %val, i32 %a) {
; SVE-LABEL: testExpl:
; SVE:       // %bb.0: // %entry
; SVE-NEXT:    b ldexpl
;
; WINDOWS-LABEL: testExpl:
; WINDOWS:       // %bb.0: // %entry
; WINDOWS-NEXT:    b ldexpl
entry:
  %call = tail call fast fp128 @ldexpl(fp128 %val, i32 %a)
  ret fp128 %call
}

declare fp128 @ldexpl(fp128, i32) memory(none)

define half @testExpf16(half %val, i32 %a) {
; SVE-LABEL: testExpf16:
; SVE:       // %bb.0: // %entry
; SVE-NEXT:    fcvt s0, h0
; SVE-NEXT:    fmov s1, w0
; SVE-NEXT:    ptrue p0.s
; SVE-NEXT:    fscale z0.s, p0/m, z0.s, z1.s
; SVE-NEXT:    fcvt h0, s0
; SVE-NEXT:    ret
;
; WINDOWS-LABEL: testExpf16:
; WINDOWS:       .seh_proc testExpf16
; WINDOWS-NEXT:  // %bb.0: // %entry
; WINDOWS-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
; WINDOWS-NEXT:    .seh_save_reg_x x30, 16
; WINDOWS-NEXT:    .seh_endprologue
; WINDOWS-NEXT:    fcvt d0, h0
; WINDOWS-NEXT:    bl ldexp
; WINDOWS-NEXT:    fcvt h0, d0
; WINDOWS-NEXT:    .seh_startepilogue
; WINDOWS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
; WINDOWS-NEXT:    .seh_save_reg_x x30, 16
; WINDOWS-NEXT:    .seh_endepilogue
; WINDOWS-NEXT:    ret
; WINDOWS-NEXT:    .seh_endfunclet
; WINDOWS-NEXT:    .seh_endproc
entry:
  %0 = tail call fast half @llvm.ldexp.f16.i32(half %val, i32 %a)
  ret half %0
}

declare half @llvm.ldexp.f16.i32(half, i32) memory(none)