; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: < %s -mtriple=powerpc64-unknown-linux -mcpu=pwr8 | FileCheck %s \
; RUN: --check-prefixes=CHECK,P8
; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 | FileCheck %s \
; RUN: --check-prefixes=CHECK,P9
; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx | \
; RUN: FileCheck %s -check-prefix=NOVSX
; RUN: llc -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 < %s -simplify-mir \
; RUN: -stop-after=machine-cp | FileCheck %s -check-prefix=MIR
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
define i32 @d_to_i32(double %m) #0 {
; CHECK-LABEL: d_to_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws f0, f1
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: d_to_i32:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiwz f0, f1
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: stfiwx f0, 0, r3
; NOVSX-NEXT: lwz r3, -4(r1)
; NOVSX-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict")
ret i32 %conv
}
define i64 @d_to_i64(double %m) #0 {
; CHECK-LABEL: d_to_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxds f0, f1
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: d_to_i64:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctidz f0, f1
; NOVSX-NEXT: stfd f0, -8(r1)
; NOVSX-NEXT: ld r3, -8(r1)
; NOVSX-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict")
ret i64 %conv
}
define i64 @d_to_u64(double %m) #0 {
; CHECK-LABEL: d_to_u64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxds f0, f1
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: d_to_u64:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiduz f0, f1
; NOVSX-NEXT: stfd f0, -8(r1)
; NOVSX-NEXT: ld r3, -8(r1)
; NOVSX-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict")
ret i64 %conv
}
define zeroext i32 @d_to_u32(double %m) #0 {
; CHECK-LABEL: d_to_u32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: d_to_u32:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiwuz f0, f1
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: stfiwx f0, 0, r3
; NOVSX-NEXT: lwz r3, -4(r1)
; NOVSX-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict")
ret i32 %conv
}
define signext i32 @f_to_i32(float %m) #0 {
; CHECK-LABEL: f_to_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws f0, f1
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
;
; NOVSX-LABEL: f_to_i32:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiwz f0, f1
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: stfiwx f0, 0, r3
; NOVSX-NEXT: lwa r3, -4(r1)
; NOVSX-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict")
ret i32 %conv
}
define i64 @f_to_i64(float %m) #0 {
; CHECK-LABEL: f_to_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxds f0, f1
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: f_to_i64:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctidz f0, f1
; NOVSX-NEXT: stfd f0, -8(r1)
; NOVSX-NEXT: ld r3, -8(r1)
; NOVSX-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict")
ret i64 %conv
}
define i64 @f_to_u64(float %m) #0 {
; CHECK-LABEL: f_to_u64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxds f0, f1
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: f_to_u64:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiduz f0, f1
; NOVSX-NEXT: stfd f0, -8(r1)
; NOVSX-NEXT: ld r3, -8(r1)
; NOVSX-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict")
ret i64 %conv
}
define zeroext i32 @f_to_u32(float %m) #0 {
; CHECK-LABEL: f_to_u32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: f_to_u32:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiwuz f0, f1
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: stfiwx f0, 0, r3
; NOVSX-NEXT: lwz r3, -4(r1)
; NOVSX-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict")
ret i32 %conv
}
define double @i32_to_d(i32 signext %m) #0 {
; CHECK-LABEL: i32_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r3
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: i32_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: stw r3, -4(r1)
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: lfiwax f0, 0, r3
; NOVSX-NEXT: fcfid f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %conv
}
define double @i64_to_d(i64 %m) #0 {
; CHECK-LABEL: i64_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: i64_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: std r3, -8(r1)
; NOVSX-NEXT: lfd f0, -8(r1)
; NOVSX-NEXT: fcfid f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %conv
}
define double @u32_to_d(i32 zeroext %m) #0 {
; CHECK-LABEL: u32_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r3
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: u32_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: stw r3, -4(r1)
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: lfiwzx f0, 0, r3
; NOVSX-NEXT: fcfidu f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %conv
}
define double @u64_to_d(i64 %m) #0 {
; CHECK-LABEL: u64_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: u64_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: std r3, -8(r1)
; NOVSX-NEXT: lfd f0, -8(r1)
; NOVSX-NEXT: fcfidu f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %conv
}
define float @i32_to_f(i32 signext %m) #0 {
; CHECK-LABEL: i32_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r3
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: i32_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: stw r3, -4(r1)
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: lfiwax f0, 0, r3
; NOVSX-NEXT: fcfids f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %conv
}
define float @i64_to_f(i64 %m) #0 {
; CHECK-LABEL: i64_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: i64_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: std r3, -8(r1)
; NOVSX-NEXT: lfd f0, -8(r1)
; NOVSX-NEXT: fcfids f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %conv
}
define float @u32_to_f(i32 zeroext %m) #0 {
; CHECK-LABEL: u32_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r3
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: u32_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: stw r3, -4(r1)
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: lfiwzx f0, 0, r3
; NOVSX-NEXT: fcfidus f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %conv
}
define float @u64_to_f(i64 %m) #0 {
; CHECK-LABEL: u64_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: u64_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: std r3, -8(r1)
; NOVSX-NEXT: lfd f0, -8(r1)
; NOVSX-NEXT: fcfidus f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %conv
}
define void @d_to_i32_store(double %m, ptr %addr) #0 {
; CHECK-LABEL: d_to_i32_store:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws f0, f1
; CHECK-NEXT: stfiwx f0, 0, r4
; CHECK-NEXT: blr
;
; NOVSX-LABEL: d_to_i32_store:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiwz f0, f1
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: stfiwx f0, 0, r3
; NOVSX-NEXT: lwz r3, -4(r1)
; NOVSX-NEXT: stw r3, 0(r4)
; NOVSX-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict")
store i32 %conv, ptr %addr, align 4
ret void
}
define void @d_to_i64_store(double %m, ptr %addr) #0 {
; P8-LABEL: d_to_i64_store:
; P8: # %bb.0: # %entry
; P8-NEXT: xscvdpsxds f0, f1
; P8-NEXT: stxsdx f0, 0, r4
; P8-NEXT: blr
;
; P9-LABEL: d_to_i64_store:
; P9: # %bb.0: # %entry
; P9-NEXT: xscvdpsxds v2, f1
; P9-NEXT: stxsd v2, 0(r4)
; P9-NEXT: blr
;
; NOVSX-LABEL: d_to_i64_store:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctidz f0, f1
; NOVSX-NEXT: stfd f0, -8(r1)
; NOVSX-NEXT: ld r3, -8(r1)
; NOVSX-NEXT: std r3, 0(r4)
; NOVSX-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict")
store i64 %conv, ptr %addr, align 8
ret void
}
define void @d_to_u64_store(double %m, ptr %addr) #0 {
; P8-LABEL: d_to_u64_store:
; P8: # %bb.0: # %entry
; P8-NEXT: xscvdpuxds f0, f1
; P8-NEXT: stxsdx f0, 0, r4
; P8-NEXT: blr
;
; P9-LABEL: d_to_u64_store:
; P9: # %bb.0: # %entry
; P9-NEXT: xscvdpuxds v2, f1
; P9-NEXT: stxsd v2, 0(r4)
; P9-NEXT: blr
;
; NOVSX-LABEL: d_to_u64_store:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiduz f0, f1
; NOVSX-NEXT: stfd f0, -8(r1)
; NOVSX-NEXT: ld r3, -8(r1)
; NOVSX-NEXT: std r3, 0(r4)
; NOVSX-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict")
store i64 %conv, ptr %addr, align 8
ret void
}
define void @d_to_u32_store(double %m, ptr %addr) #0 {
; CHECK-LABEL: d_to_u32_store:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: stfiwx f0, 0, r4
; CHECK-NEXT: blr
;
; NOVSX-LABEL: d_to_u32_store:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiwuz f0, f1
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: stfiwx f0, 0, r3
; NOVSX-NEXT: lwz r3, -4(r1)
; NOVSX-NEXT: stw r3, 0(r4)
; NOVSX-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict")
store i32 %conv, ptr %addr, align 4
ret void
}
define void @f_to_i32_store(float %m, ptr %addr) #0 {
; CHECK-LABEL: f_to_i32_store:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws f0, f1
; CHECK-NEXT: stfiwx f0, 0, r4
; CHECK-NEXT: blr
;
; NOVSX-LABEL: f_to_i32_store:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiwz f0, f1
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: stfiwx f0, 0, r3
; NOVSX-NEXT: lwz r3, -4(r1)
; NOVSX-NEXT: stw r3, 0(r4)
; NOVSX-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict")
store i32 %conv, ptr %addr, align 4
ret void
}
define void @f_to_i64_store(float %m, ptr %addr) #0 {
; P8-LABEL: f_to_i64_store:
; P8: # %bb.0: # %entry
; P8-NEXT: xscvdpsxds f0, f1
; P8-NEXT: stxsdx f0, 0, r4
; P8-NEXT: blr
;
; P9-LABEL: f_to_i64_store:
; P9: # %bb.0: # %entry
; P9-NEXT: xscvdpsxds v2, f1
; P9-NEXT: stxsd v2, 0(r4)
; P9-NEXT: blr
;
; NOVSX-LABEL: f_to_i64_store:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctidz f0, f1
; NOVSX-NEXT: stfd f0, -8(r1)
; NOVSX-NEXT: ld r3, -8(r1)
; NOVSX-NEXT: std r3, 0(r4)
; NOVSX-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict")
store i64 %conv, ptr %addr, align 8
ret void
}
define void @f_to_u64_store(float %m, ptr %addr) #0 {
; P8-LABEL: f_to_u64_store:
; P8: # %bb.0: # %entry
; P8-NEXT: xscvdpuxds f0, f1
; P8-NEXT: stxsdx f0, 0, r4
; P8-NEXT: blr
;
; P9-LABEL: f_to_u64_store:
; P9: # %bb.0: # %entry
; P9-NEXT: xscvdpuxds v2, f1
; P9-NEXT: stxsd v2, 0(r4)
; P9-NEXT: blr
;
; NOVSX-LABEL: f_to_u64_store:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiduz f0, f1
; NOVSX-NEXT: stfd f0, -8(r1)
; NOVSX-NEXT: ld r3, -8(r1)
; NOVSX-NEXT: std r3, 0(r4)
; NOVSX-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict")
store i64 %conv, ptr %addr, align 8
ret void
}
define void @f_to_u32_store(float %m, ptr %addr) #0 {
; CHECK-LABEL: f_to_u32_store:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: stfiwx f0, 0, r4
; CHECK-NEXT: blr
;
; NOVSX-LABEL: f_to_u32_store:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: fctiwuz f0, f1
; NOVSX-NEXT: addi r3, r1, -4
; NOVSX-NEXT: stfiwx f0, 0, r3
; NOVSX-NEXT: lwz r3, -4(r1)
; NOVSX-NEXT: stw r3, 0(r4)
; NOVSX-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict")
store i32 %conv, ptr %addr, align 4
ret void
}
define double @load_i32_to_d(ptr %addr) #0 {
; CHECK-LABEL: load_i32_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfiwax f0, 0, r3
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: load_i32_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: lfiwax f0, 0, r3
; NOVSX-NEXT: fcfid f1, f0
; NOVSX-NEXT: blr
entry:
%m = load i32, ptr %addr, align 4
%conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %conv
}
define double @load_i64_to_d(ptr %addr) #0 {
; CHECK-LABEL: load_i64_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 0(r3)
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: load_i64_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: lfd f0, 0(r3)
; NOVSX-NEXT: fcfid f1, f0
; NOVSX-NEXT: blr
entry:
%m = load i64, ptr %addr, align 8
%conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %conv
}
define double @load_u32_to_d(ptr %addr) #0 {
; CHECK-LABEL: load_u32_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfiwzx f0, 0, r3
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: load_u32_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: lfiwzx f0, 0, r3
; NOVSX-NEXT: fcfidu f1, f0
; NOVSX-NEXT: blr
entry:
%m = load i32, ptr %addr, align 4
%conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %conv
}
define double @load_u64_to_d(ptr %addr) #0 {
; CHECK-LABEL: load_u64_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 0(r3)
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: load_u64_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: lfd f0, 0(r3)
; NOVSX-NEXT: fcfidu f1, f0
; NOVSX-NEXT: blr
entry:
%m = load i64, ptr %addr, align 8
%conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %conv
}
define float @load_i32_to_f(ptr %addr) #0 {
; CHECK-LABEL: load_i32_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfiwax f0, 0, r3
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: load_i32_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: lfiwax f0, 0, r3
; NOVSX-NEXT: fcfids f1, f0
; NOVSX-NEXT: blr
entry:
%m = load i32, ptr %addr, align 4
%conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %conv
}
define float @load_i64_to_f(ptr %addr) #0 {
; CHECK-LABEL: load_i64_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 0(r3)
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: load_i64_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: lfd f0, 0(r3)
; NOVSX-NEXT: fcfids f1, f0
; NOVSX-NEXT: blr
entry:
%m = load i64, ptr %addr, align 8
%conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %conv
}
define float @load_u32_to_f(ptr %addr) #0 {
; CHECK-LABEL: load_u32_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfiwzx f0, 0, r3
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: load_u32_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: lfiwzx f0, 0, r3
; NOVSX-NEXT: fcfidus f1, f0
; NOVSX-NEXT: blr
entry:
%m = load i32, ptr %addr, align 4
%conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %conv
}
define float @load_u64_to_f(ptr %addr) #0 {
; CHECK-LABEL: load_u64_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 0(r3)
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: load_u64_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: lfd f0, 0(r3)
; NOVSX-NEXT: fcfidus f1, f0
; NOVSX-NEXT: blr
entry:
%m = load i64, ptr %addr, align 8
%conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %conv
}
define void @fptoint_nofpexcept_f64(double %m, ptr %addr1, ptr %addr2) #0 {
; MIR-LABEL: name: fptoint_nofpexcept_f64
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPSXDS
; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPUXDS
entry:
%conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.ignore")
%conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.ignore")
%conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.ignore")
%conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.ignore")
store volatile i32 %conv1, ptr %addr1, align 4
store volatile i32 %conv2, ptr %addr1, align 4
store volatile i64 %conv3, ptr %addr2, align 8
store volatile i64 %conv4, ptr %addr2, align 8
ret void
}
define void @fptoint_nofpexcept_f32(float %m, ptr %addr1, ptr %addr2) #0 {
; MIR-LABEL: name: fptoint_nofpexcept_f32
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPSXDS
; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPUXDS
entry:
%conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.ignore")
%conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.ignore")
%conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.ignore")
%conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.ignore")
store volatile i32 %conv1, ptr %addr1, align 4
store volatile i32 %conv2, ptr %addr1, align 4
store volatile i64 %conv3, ptr %addr2, align 8
store volatile i64 %conv4, ptr %addr2, align 8
ret void
}
define void @inttofp_nofpexcept_i32(i32 %m, ptr %addr1, ptr %addr2) #0 {
; MIR-LABEL: name: inttofp_nofpexcept_i32
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
entry:
%conv1 = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
%conv2 = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
%conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
%conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
store volatile float %conv1, ptr %addr1, align 4
store volatile float %conv2, ptr %addr1, align 4
store volatile double %conv3, ptr %addr2, align 8
store volatile double %conv4, ptr %addr2, align 8
ret void
}
define void @inttofp_nofpexcept_i64(i64 %m, ptr %addr1, ptr %addr2) #0 {
; MIR-LABEL: name: inttofp_nofpexcept_i64
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
entry:
%conv1 = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
%conv2 = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
%conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
%conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
store volatile float %conv1, ptr %addr1, align 4
store volatile float %conv2, ptr %addr1, align 4
store volatile double %conv3, ptr %addr2, align 8
store volatile double %conv4, ptr %addr2, align 8
ret void
}
define <2 x double> @inttofp_nofpexcept_vec(<2 x i16> %m) #0 {
; MIR-LABEL: name: inttofp_nofpexcept_vec
; MIR: renamable $v{{[0-9]+}} = nofpexcept XVCVSXDDP
entry:
%conv = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
ret <2 x double> %conv
}
declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
attributes #0 = { strictfp }