; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefix=SSE2
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=SSE41
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=-prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX --check-prefix=AVX512
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=+prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@src64 = common global [8 x double] zeroinitializer, align 64
@dst64 = common global [8 x double] zeroinitializer, align 64
@src32 = common global [16 x float] zeroinitializer, align 64
@dst32 = common global [16 x float] zeroinitializer, align 64
declare double @llvm.ceil.f64(double %p)
declare double @llvm.floor.f64(double %p)
declare double @llvm.nearbyint.f64(double %p)
declare double @llvm.rint.f64(double %p)
declare double @llvm.trunc.f64(double %p)
declare float @llvm.ceil.f32(float %p)
declare float @llvm.floor.f32(float %p)
declare float @llvm.nearbyint.f32(float %p)
declare float @llvm.rint.f32(float %p)
declare float @llvm.trunc.f32(float %p)
define void @ceil_2f64() #0 {
; SSE2-LABEL: @ceil_2f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[CEIL0:%.*]] = call double @llvm.ceil.f64(double [[LD0]])
; SSE2-NEXT: [[CEIL1:%.*]] = call double @llvm.ceil.f64(double [[LD1]])
; SSE2-NEXT: store double [[CEIL0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[CEIL1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @ceil_2f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @ceil_2f64(
; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1]])
; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ceil0 = call double @llvm.ceil.f64(double %ld0)
%ceil1 = call double @llvm.ceil.f64(double %ld1)
store double %ceil0, ptr @dst64, align 8
store double %ceil1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
ret void
}
define void @ceil_4f64() #0 {
; SSE2-LABEL: @ceil_4f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[CEIL0:%.*]] = call double @llvm.ceil.f64(double [[LD0]])
; SSE2-NEXT: [[CEIL1:%.*]] = call double @llvm.ceil.f64(double [[LD1]])
; SSE2-NEXT: [[CEIL2:%.*]] = call double @llvm.ceil.f64(double [[LD2]])
; SSE2-NEXT: [[CEIL3:%.*]] = call double @llvm.ceil.f64(double [[LD3]])
; SSE2-NEXT: store double [[CEIL0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[CEIL1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[CEIL2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[CEIL3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @ceil_4f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @ceil_4f64(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP1]])
; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%ceil0 = call double @llvm.ceil.f64(double %ld0)
%ceil1 = call double @llvm.ceil.f64(double %ld1)
%ceil2 = call double @llvm.ceil.f64(double %ld2)
%ceil3 = call double @llvm.ceil.f64(double %ld3)
store double %ceil0, ptr @dst64, align 8
store double %ceil1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %ceil2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %ceil3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
ret void
}
define void @ceil_8f64() #0 {
; SSE2-LABEL: @ceil_8f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
; SSE2-NEXT: [[CEIL0:%.*]] = call double @llvm.ceil.f64(double [[LD0]])
; SSE2-NEXT: [[CEIL1:%.*]] = call double @llvm.ceil.f64(double [[LD1]])
; SSE2-NEXT: [[CEIL2:%.*]] = call double @llvm.ceil.f64(double [[LD2]])
; SSE2-NEXT: [[CEIL3:%.*]] = call double @llvm.ceil.f64(double [[LD3]])
; SSE2-NEXT: [[CEIL4:%.*]] = call double @llvm.ceil.f64(double [[LD4]])
; SSE2-NEXT: [[CEIL5:%.*]] = call double @llvm.ceil.f64(double [[LD5]])
; SSE2-NEXT: [[CEIL6:%.*]] = call double @llvm.ceil.f64(double [[LD6]])
; SSE2-NEXT: [[CEIL7:%.*]] = call double @llvm.ceil.f64(double [[LD7]])
; SSE2-NEXT: store double [[CEIL0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[CEIL1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[CEIL2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[CEIL3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: store double [[CEIL4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE2-NEXT: store double [[CEIL5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
; SSE2-NEXT: store double [[CEIL6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE2-NEXT: store double [[CEIL7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @ceil_8f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP5]])
; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP7]])
; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @ceil_8f64(
; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP1]])
; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP3]])
; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @ceil_8f64(
; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP1]])
; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.ceil.v4f64(<4 x double> [[TMP3]])
; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @ceil_8f64(
; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.ceil.v8f64(<8 x double> [[TMP1]])
; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
; AVX512-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
%ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
%ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
%ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
%ceil0 = call double @llvm.ceil.f64(double %ld0)
%ceil1 = call double @llvm.ceil.f64(double %ld1)
%ceil2 = call double @llvm.ceil.f64(double %ld2)
%ceil3 = call double @llvm.ceil.f64(double %ld3)
%ceil4 = call double @llvm.ceil.f64(double %ld4)
%ceil5 = call double @llvm.ceil.f64(double %ld5)
%ceil6 = call double @llvm.ceil.f64(double %ld6)
%ceil7 = call double @llvm.ceil.f64(double %ld7)
store double %ceil0, ptr @dst64, align 8
store double %ceil1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %ceil2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %ceil3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
store double %ceil4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
store double %ceil5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
store double %ceil6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
store double %ceil7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
ret void
}
define void @floor_2f64() #0 {
; SSE2-LABEL: @floor_2f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[FLOOR0:%.*]] = call double @llvm.floor.f64(double [[LD0]])
; SSE2-NEXT: [[FLOOR1:%.*]] = call double @llvm.floor.f64(double [[LD1]])
; SSE2-NEXT: store double [[FLOOR0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[FLOOR1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @floor_2f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @floor_2f64(
; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1]])
; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%floor0 = call double @llvm.floor.f64(double %ld0)
%floor1 = call double @llvm.floor.f64(double %ld1)
store double %floor0, ptr @dst64, align 8
store double %floor1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
ret void
}
define void @floor_4f64() #0 {
; SSE2-LABEL: @floor_4f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[FLOOR0:%.*]] = call double @llvm.floor.f64(double [[LD0]])
; SSE2-NEXT: [[FLOOR1:%.*]] = call double @llvm.floor.f64(double [[LD1]])
; SSE2-NEXT: [[FLOOR2:%.*]] = call double @llvm.floor.f64(double [[LD2]])
; SSE2-NEXT: [[FLOOR3:%.*]] = call double @llvm.floor.f64(double [[LD3]])
; SSE2-NEXT: store double [[FLOOR0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[FLOOR1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[FLOOR2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[FLOOR3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @floor_4f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @floor_4f64(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP1]])
; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%floor0 = call double @llvm.floor.f64(double %ld0)
%floor1 = call double @llvm.floor.f64(double %ld1)
%floor2 = call double @llvm.floor.f64(double %ld2)
%floor3 = call double @llvm.floor.f64(double %ld3)
store double %floor0, ptr @dst64, align 8
store double %floor1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %floor2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %floor3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
ret void
}
define void @floor_8f64() #0 {
; SSE2-LABEL: @floor_8f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
; SSE2-NEXT: [[FLOOR0:%.*]] = call double @llvm.floor.f64(double [[LD0]])
; SSE2-NEXT: [[FLOOR1:%.*]] = call double @llvm.floor.f64(double [[LD1]])
; SSE2-NEXT: [[FLOOR2:%.*]] = call double @llvm.floor.f64(double [[LD2]])
; SSE2-NEXT: [[FLOOR3:%.*]] = call double @llvm.floor.f64(double [[LD3]])
; SSE2-NEXT: [[FLOOR4:%.*]] = call double @llvm.floor.f64(double [[LD4]])
; SSE2-NEXT: [[FLOOR5:%.*]] = call double @llvm.floor.f64(double [[LD5]])
; SSE2-NEXT: [[FLOOR6:%.*]] = call double @llvm.floor.f64(double [[LD6]])
; SSE2-NEXT: [[FLOOR7:%.*]] = call double @llvm.floor.f64(double [[LD7]])
; SSE2-NEXT: store double [[FLOOR0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[FLOOR1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[FLOOR2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[FLOOR3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: store double [[FLOOR4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE2-NEXT: store double [[FLOOR5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
; SSE2-NEXT: store double [[FLOOR6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE2-NEXT: store double [[FLOOR7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @floor_8f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP5]])
; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP7]])
; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @floor_8f64(
; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP1]])
; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP3]])
; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @floor_8f64(
; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP1]])
; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.floor.v4f64(<4 x double> [[TMP3]])
; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @floor_8f64(
; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.floor.v8f64(<8 x double> [[TMP1]])
; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
; AVX512-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
%ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
%ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
%ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
%floor0 = call double @llvm.floor.f64(double %ld0)
%floor1 = call double @llvm.floor.f64(double %ld1)
%floor2 = call double @llvm.floor.f64(double %ld2)
%floor3 = call double @llvm.floor.f64(double %ld3)
%floor4 = call double @llvm.floor.f64(double %ld4)
%floor5 = call double @llvm.floor.f64(double %ld5)
%floor6 = call double @llvm.floor.f64(double %ld6)
%floor7 = call double @llvm.floor.f64(double %ld7)
store double %floor0, ptr @dst64, align 8
store double %floor1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %floor2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %floor3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
store double %floor4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
store double %floor5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
store double %floor6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
store double %floor7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
ret void
}
define void @nearbyint_2f64() #0 {
; SSE2-LABEL: @nearbyint_2f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[NEARBYINT0:%.*]] = call double @llvm.nearbyint.f64(double [[LD0]])
; SSE2-NEXT: [[NEARBYINT1:%.*]] = call double @llvm.nearbyint.f64(double [[LD1]])
; SSE2-NEXT: store double [[NEARBYINT0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[NEARBYINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @nearbyint_2f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @nearbyint_2f64(
; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1]])
; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%nearbyint0 = call double @llvm.nearbyint.f64(double %ld0)
%nearbyint1 = call double @llvm.nearbyint.f64(double %ld1)
store double %nearbyint0, ptr @dst64, align 8
store double %nearbyint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
ret void
}
define void @nearbyint_4f64() #0 {
; SSE2-LABEL: @nearbyint_4f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[NEARBYINT0:%.*]] = call double @llvm.nearbyint.f64(double [[LD0]])
; SSE2-NEXT: [[NEARBYINT1:%.*]] = call double @llvm.nearbyint.f64(double [[LD1]])
; SSE2-NEXT: [[NEARBYINT2:%.*]] = call double @llvm.nearbyint.f64(double [[LD2]])
; SSE2-NEXT: [[NEARBYINT3:%.*]] = call double @llvm.nearbyint.f64(double [[LD3]])
; SSE2-NEXT: store double [[NEARBYINT0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[NEARBYINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[NEARBYINT2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[NEARBYINT3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @nearbyint_4f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @nearbyint_4f64(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP1]])
; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%nearbyint0 = call double @llvm.nearbyint.f64(double %ld0)
%nearbyint1 = call double @llvm.nearbyint.f64(double %ld1)
%nearbyint2 = call double @llvm.nearbyint.f64(double %ld2)
%nearbyint3 = call double @llvm.nearbyint.f64(double %ld3)
store double %nearbyint0, ptr @dst64, align 8
store double %nearbyint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %nearbyint2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %nearbyint3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
ret void
}
define void @nearbyint_8f64() #0 {
; SSE2-LABEL: @nearbyint_8f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
; SSE2-NEXT: [[NEARBYINT0:%.*]] = call double @llvm.nearbyint.f64(double [[LD0]])
; SSE2-NEXT: [[NEARBYINT1:%.*]] = call double @llvm.nearbyint.f64(double [[LD1]])
; SSE2-NEXT: [[NEARBYINT2:%.*]] = call double @llvm.nearbyint.f64(double [[LD2]])
; SSE2-NEXT: [[NEARBYINT3:%.*]] = call double @llvm.nearbyint.f64(double [[LD3]])
; SSE2-NEXT: [[NEARBYINT4:%.*]] = call double @llvm.nearbyint.f64(double [[LD4]])
; SSE2-NEXT: [[NEARBYINT5:%.*]] = call double @llvm.nearbyint.f64(double [[LD5]])
; SSE2-NEXT: [[NEARBYINT6:%.*]] = call double @llvm.nearbyint.f64(double [[LD6]])
; SSE2-NEXT: [[NEARBYINT7:%.*]] = call double @llvm.nearbyint.f64(double [[LD7]])
; SSE2-NEXT: store double [[NEARBYINT0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[NEARBYINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[NEARBYINT2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[NEARBYINT3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: store double [[NEARBYINT4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE2-NEXT: store double [[NEARBYINT5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
; SSE2-NEXT: store double [[NEARBYINT6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE2-NEXT: store double [[NEARBYINT7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @nearbyint_8f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP5]])
; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP7]])
; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @nearbyint_8f64(
; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP1]])
; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP3]])
; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @nearbyint_8f64(
; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP1]])
; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> [[TMP3]])
; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @nearbyint_8f64(
; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> [[TMP1]])
; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
; AVX512-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
%ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
%ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
%ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
%nearbyint0 = call double @llvm.nearbyint.f64(double %ld0)
%nearbyint1 = call double @llvm.nearbyint.f64(double %ld1)
%nearbyint2 = call double @llvm.nearbyint.f64(double %ld2)
%nearbyint3 = call double @llvm.nearbyint.f64(double %ld3)
%nearbyint4 = call double @llvm.nearbyint.f64(double %ld4)
%nearbyint5 = call double @llvm.nearbyint.f64(double %ld5)
%nearbyint6 = call double @llvm.nearbyint.f64(double %ld6)
%nearbyint7 = call double @llvm.nearbyint.f64(double %ld7)
store double %nearbyint0, ptr @dst64, align 8
store double %nearbyint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %nearbyint2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %nearbyint3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
store double %nearbyint4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
store double %nearbyint5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
store double %nearbyint6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
store double %nearbyint7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
ret void
}
define void @rint_2f64() #0 {
; SSE2-LABEL: @rint_2f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[RINT0:%.*]] = call double @llvm.rint.f64(double [[LD0]])
; SSE2-NEXT: [[RINT1:%.*]] = call double @llvm.rint.f64(double [[LD1]])
; SSE2-NEXT: store double [[RINT0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[RINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @rint_2f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @rint_2f64(
; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1]])
; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%rint0 = call double @llvm.rint.f64(double %ld0)
%rint1 = call double @llvm.rint.f64(double %ld1)
store double %rint0, ptr @dst64, align 8
store double %rint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
ret void
}
define void @rint_4f64() #0 {
; SSE2-LABEL: @rint_4f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[RINT0:%.*]] = call double @llvm.rint.f64(double [[LD0]])
; SSE2-NEXT: [[RINT1:%.*]] = call double @llvm.rint.f64(double [[LD1]])
; SSE2-NEXT: [[RINT2:%.*]] = call double @llvm.rint.f64(double [[LD2]])
; SSE2-NEXT: [[RINT3:%.*]] = call double @llvm.rint.f64(double [[LD3]])
; SSE2-NEXT: store double [[RINT0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[RINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[RINT2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[RINT3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @rint_4f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @rint_4f64(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP1]])
; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%rint0 = call double @llvm.rint.f64(double %ld0)
%rint1 = call double @llvm.rint.f64(double %ld1)
%rint2 = call double @llvm.rint.f64(double %ld2)
%rint3 = call double @llvm.rint.f64(double %ld3)
store double %rint0, ptr @dst64, align 8
store double %rint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %rint2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %rint3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
ret void
}
define void @rint_8f64() #0 {
; SSE2-LABEL: @rint_8f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
; SSE2-NEXT: [[RINT0:%.*]] = call double @llvm.rint.f64(double [[LD0]])
; SSE2-NEXT: [[RINT1:%.*]] = call double @llvm.rint.f64(double [[LD1]])
; SSE2-NEXT: [[RINT2:%.*]] = call double @llvm.rint.f64(double [[LD2]])
; SSE2-NEXT: [[RINT3:%.*]] = call double @llvm.rint.f64(double [[LD3]])
; SSE2-NEXT: [[RINT4:%.*]] = call double @llvm.rint.f64(double [[LD4]])
; SSE2-NEXT: [[RINT5:%.*]] = call double @llvm.rint.f64(double [[LD5]])
; SSE2-NEXT: [[RINT6:%.*]] = call double @llvm.rint.f64(double [[LD6]])
; SSE2-NEXT: [[RINT7:%.*]] = call double @llvm.rint.f64(double [[LD7]])
; SSE2-NEXT: store double [[RINT0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[RINT1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[RINT2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[RINT3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: store double [[RINT4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE2-NEXT: store double [[RINT5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
; SSE2-NEXT: store double [[RINT6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE2-NEXT: store double [[RINT7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @rint_8f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP5]])
; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP7]])
; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @rint_8f64(
; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP1]])
; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP3]])
; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @rint_8f64(
; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP1]])
; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.rint.v4f64(<4 x double> [[TMP3]])
; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @rint_8f64(
; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.rint.v8f64(<8 x double> [[TMP1]])
; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
; AVX512-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
%ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
%ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
%ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
%rint0 = call double @llvm.rint.f64(double %ld0)
%rint1 = call double @llvm.rint.f64(double %ld1)
%rint2 = call double @llvm.rint.f64(double %ld2)
%rint3 = call double @llvm.rint.f64(double %ld3)
%rint4 = call double @llvm.rint.f64(double %ld4)
%rint5 = call double @llvm.rint.f64(double %ld5)
%rint6 = call double @llvm.rint.f64(double %ld6)
%rint7 = call double @llvm.rint.f64(double %ld7)
store double %rint0, ptr @dst64, align 8
store double %rint1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %rint2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %rint3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
store double %rint4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
store double %rint5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
store double %rint6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
store double %rint7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
ret void
}
define void @trunc_2f64() #0 {
; SSE2-LABEL: @trunc_2f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[TRUNC0:%.*]] = call double @llvm.trunc.f64(double [[LD0]])
; SSE2-NEXT: [[TRUNC1:%.*]] = call double @llvm.trunc.f64(double [[LD1]])
; SSE2-NEXT: store double [[TRUNC0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[TRUNC1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @trunc_2f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @trunc_2f64(
; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1]])
; AVX-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%trunc0 = call double @llvm.trunc.f64(double %ld0)
%trunc1 = call double @llvm.trunc.f64(double %ld1)
store double %trunc0, ptr @dst64, align 8
store double %trunc1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
ret void
}
define void @trunc_4f64() #0 {
; SSE2-LABEL: @trunc_4f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[TRUNC0:%.*]] = call double @llvm.trunc.f64(double [[LD0]])
; SSE2-NEXT: [[TRUNC1:%.*]] = call double @llvm.trunc.f64(double [[LD1]])
; SSE2-NEXT: [[TRUNC2:%.*]] = call double @llvm.trunc.f64(double [[LD2]])
; SSE2-NEXT: [[TRUNC3:%.*]] = call double @llvm.trunc.f64(double [[LD3]])
; SSE2-NEXT: store double [[TRUNC0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[TRUNC1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[TRUNC2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[TRUNC3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @trunc_4f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: ret void
;
; AVX-LABEL: @trunc_4f64(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP1]])
; AVX-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%trunc0 = call double @llvm.trunc.f64(double %ld0)
%trunc1 = call double @llvm.trunc.f64(double %ld1)
%trunc2 = call double @llvm.trunc.f64(double %ld2)
%trunc3 = call double @llvm.trunc.f64(double %ld3)
store double %trunc0, ptr @dst64, align 8
store double %trunc1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %trunc2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %trunc3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
ret void
}
define void @trunc_8f64() #0 {
; SSE2-LABEL: @trunc_8f64(
; SSE2-NEXT: [[LD0:%.*]] = load double, ptr @src64, align 8
; SSE2-NEXT: [[LD1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
; SSE2-NEXT: [[LD2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE2-NEXT: [[LD3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
; SSE2-NEXT: [[LD4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE2-NEXT: [[LD5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
; SSE2-NEXT: [[LD6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE2-NEXT: [[LD7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
; SSE2-NEXT: [[TRUNC0:%.*]] = call double @llvm.trunc.f64(double [[LD0]])
; SSE2-NEXT: [[TRUNC1:%.*]] = call double @llvm.trunc.f64(double [[LD1]])
; SSE2-NEXT: [[TRUNC2:%.*]] = call double @llvm.trunc.f64(double [[LD2]])
; SSE2-NEXT: [[TRUNC3:%.*]] = call double @llvm.trunc.f64(double [[LD3]])
; SSE2-NEXT: [[TRUNC4:%.*]] = call double @llvm.trunc.f64(double [[LD4]])
; SSE2-NEXT: [[TRUNC5:%.*]] = call double @llvm.trunc.f64(double [[LD5]])
; SSE2-NEXT: [[TRUNC6:%.*]] = call double @llvm.trunc.f64(double [[LD6]])
; SSE2-NEXT: [[TRUNC7:%.*]] = call double @llvm.trunc.f64(double [[LD7]])
; SSE2-NEXT: store double [[TRUNC0]], ptr @dst64, align 8
; SSE2-NEXT: store double [[TRUNC1]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
; SSE2-NEXT: store double [[TRUNC2]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE2-NEXT: store double [[TRUNC3]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
; SSE2-NEXT: store double [[TRUNC4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE2-NEXT: store double [[TRUNC5]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
; SSE2-NEXT: store double [[TRUNC6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE2-NEXT: store double [[TRUNC7]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @trunc_8f64(
; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @src64, align 8
; SSE41-NEXT: [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1]])
; SSE41-NEXT: store <2 x double> [[TMP2]], ptr @dst64, align 8
; SSE41-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP3]])
; SSE41-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE41-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP5]])
; SSE41-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; SSE41-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
; SSE41-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP7]])
; SSE41-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @trunc_8f64(
; AVX1-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX1-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP1]])
; AVX1-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX1-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX1-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP3]])
; AVX1-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @trunc_8f64(
; AVX2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
; AVX2-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP1]])
; AVX2-NEXT: store <4 x double> [[TMP2]], ptr @dst64, align 8
; AVX2-NEXT: [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
; AVX2-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.trunc.v4f64(<4 x double> [[TMP3]])
; AVX2-NEXT: store <4 x double> [[TMP4]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @trunc_8f64(
; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
; AVX512-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.trunc.v8f64(<8 x double> [[TMP1]])
; AVX512-NEXT: store <8 x double> [[TMP2]], ptr @dst64, align 8
; AVX512-NEXT: ret void
;
%ld0 = load double, ptr @src64, align 8
%ld1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
%ld2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
%ld3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
%ld4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
%ld5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
%ld6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
%ld7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
%trunc0 = call double @llvm.trunc.f64(double %ld0)
%trunc1 = call double @llvm.trunc.f64(double %ld1)
%trunc2 = call double @llvm.trunc.f64(double %ld2)
%trunc3 = call double @llvm.trunc.f64(double %ld3)
%trunc4 = call double @llvm.trunc.f64(double %ld4)
%trunc5 = call double @llvm.trunc.f64(double %ld5)
%trunc6 = call double @llvm.trunc.f64(double %ld6)
%trunc7 = call double @llvm.trunc.f64(double %ld7)
store double %trunc0, ptr @dst64, align 8
store double %trunc1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %trunc2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %trunc3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
store double %trunc4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 8
store double %trunc5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 8
store double %trunc6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 8
store double %trunc7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 8
ret void
}
define void @ceil_4f32() #0 {
; SSE2-LABEL: @ceil_4f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[CEIL0:%.*]] = call float @llvm.ceil.f32(float [[LD0]])
; SSE2-NEXT: [[CEIL1:%.*]] = call float @llvm.ceil.f32(float [[LD1]])
; SSE2-NEXT: [[CEIL2:%.*]] = call float @llvm.ceil.f32(float [[LD2]])
; SSE2-NEXT: [[CEIL3:%.*]] = call float @llvm.ceil.f32(float [[LD3]])
; SSE2-NEXT: store float [[CEIL0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[CEIL1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[CEIL2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[CEIL3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @ceil_4f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @ceil_4f32(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1]])
; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%ceil0 = call float @llvm.ceil.f32(float %ld0)
%ceil1 = call float @llvm.ceil.f32(float %ld1)
%ceil2 = call float @llvm.ceil.f32(float %ld2)
%ceil3 = call float @llvm.ceil.f32(float %ld3)
store float %ceil0, ptr @dst32, align 4
store float %ceil1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %ceil2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %ceil3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
ret void
}
define void @ceil_8f32() #0 {
; SSE2-LABEL: @ceil_8f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[CEIL0:%.*]] = call float @llvm.ceil.f32(float [[LD0]])
; SSE2-NEXT: [[CEIL1:%.*]] = call float @llvm.ceil.f32(float [[LD1]])
; SSE2-NEXT: [[CEIL2:%.*]] = call float @llvm.ceil.f32(float [[LD2]])
; SSE2-NEXT: [[CEIL3:%.*]] = call float @llvm.ceil.f32(float [[LD3]])
; SSE2-NEXT: [[CEIL4:%.*]] = call float @llvm.ceil.f32(float [[LD4]])
; SSE2-NEXT: [[CEIL5:%.*]] = call float @llvm.ceil.f32(float [[LD5]])
; SSE2-NEXT: [[CEIL6:%.*]] = call float @llvm.ceil.f32(float [[LD6]])
; SSE2-NEXT: [[CEIL7:%.*]] = call float @llvm.ceil.f32(float [[LD7]])
; SSE2-NEXT: store float [[CEIL0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[CEIL1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[CEIL2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[CEIL3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[CEIL4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[CEIL5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[CEIL6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[CEIL7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @ceil_8f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @ceil_8f32(
; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP1]])
; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
%ceil0 = call float @llvm.ceil.f32(float %ld0)
%ceil1 = call float @llvm.ceil.f32(float %ld1)
%ceil2 = call float @llvm.ceil.f32(float %ld2)
%ceil3 = call float @llvm.ceil.f32(float %ld3)
%ceil4 = call float @llvm.ceil.f32(float %ld4)
%ceil5 = call float @llvm.ceil.f32(float %ld5)
%ceil6 = call float @llvm.ceil.f32(float %ld6)
%ceil7 = call float @llvm.ceil.f32(float %ld7)
store float %ceil0, ptr @dst32, align 4
store float %ceil1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %ceil2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %ceil3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
store float %ceil4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
store float %ceil5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
store float %ceil6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
store float %ceil7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
ret void
}
define void @ceil_16f32() #0 {
; SSE2-LABEL: @ceil_16f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
; SSE2-NEXT: [[CEIL0:%.*]] = call float @llvm.ceil.f32(float [[LD0]])
; SSE2-NEXT: [[CEIL1:%.*]] = call float @llvm.ceil.f32(float [[LD1]])
; SSE2-NEXT: [[CEIL2:%.*]] = call float @llvm.ceil.f32(float [[LD2]])
; SSE2-NEXT: [[CEIL3:%.*]] = call float @llvm.ceil.f32(float [[LD3]])
; SSE2-NEXT: [[CEIL4:%.*]] = call float @llvm.ceil.f32(float [[LD4]])
; SSE2-NEXT: [[CEIL5:%.*]] = call float @llvm.ceil.f32(float [[LD5]])
; SSE2-NEXT: [[CEIL6:%.*]] = call float @llvm.ceil.f32(float [[LD6]])
; SSE2-NEXT: [[CEIL7:%.*]] = call float @llvm.ceil.f32(float [[LD7]])
; SSE2-NEXT: [[CEIL8:%.*]] = call float @llvm.ceil.f32(float [[LD8]])
; SSE2-NEXT: [[CEIL9:%.*]] = call float @llvm.ceil.f32(float [[LD9]])
; SSE2-NEXT: [[CEIL10:%.*]] = call float @llvm.ceil.f32(float [[LD10]])
; SSE2-NEXT: [[CEIL11:%.*]] = call float @llvm.ceil.f32(float [[LD11]])
; SSE2-NEXT: [[CEIL12:%.*]] = call float @llvm.ceil.f32(float [[LD12]])
; SSE2-NEXT: [[CEIL13:%.*]] = call float @llvm.ceil.f32(float [[LD13]])
; SSE2-NEXT: [[CEIL14:%.*]] = call float @llvm.ceil.f32(float [[LD14]])
; SSE2-NEXT: [[CEIL15:%.*]] = call float @llvm.ceil.f32(float [[LD15]])
; SSE2-NEXT: store float [[CEIL0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[CEIL1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[CEIL2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[CEIL3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[CEIL4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[CEIL5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[CEIL6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[CEIL7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: store float [[CEIL8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE2-NEXT: store float [[CEIL9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
; SSE2-NEXT: store float [[CEIL10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
; SSE2-NEXT: store float [[CEIL11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
; SSE2-NEXT: store float [[CEIL12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE2-NEXT: store float [[CEIL13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
; SSE2-NEXT: store float [[CEIL14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
; SSE2-NEXT: store float [[CEIL15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @ceil_16f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP5]])
; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP7]])
; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @ceil_16f32(
; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP1]])
; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP3]])
; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @ceil_16f32(
; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP1]])
; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[TMP3]])
; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @ceil_16f32(
; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.ceil.v16f32(<16 x float> [[TMP1]])
; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
; AVX512-NEXT: ret void
;
%ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
%ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
%ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
%ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
%ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
%ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
%ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
%ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
%ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
%ceil0 = call float @llvm.ceil.f32(float %ld0 )
%ceil1 = call float @llvm.ceil.f32(float %ld1 )
%ceil2 = call float @llvm.ceil.f32(float %ld2 )
%ceil3 = call float @llvm.ceil.f32(float %ld3 )
%ceil4 = call float @llvm.ceil.f32(float %ld4 )
%ceil5 = call float @llvm.ceil.f32(float %ld5 )
%ceil6 = call float @llvm.ceil.f32(float %ld6 )
%ceil7 = call float @llvm.ceil.f32(float %ld7 )
%ceil8 = call float @llvm.ceil.f32(float %ld8 )
%ceil9 = call float @llvm.ceil.f32(float %ld9 )
%ceil10 = call float @llvm.ceil.f32(float %ld10)
%ceil11 = call float @llvm.ceil.f32(float %ld11)
%ceil12 = call float @llvm.ceil.f32(float %ld12)
%ceil13 = call float @llvm.ceil.f32(float %ld13)
%ceil14 = call float @llvm.ceil.f32(float %ld14)
%ceil15 = call float @llvm.ceil.f32(float %ld15)
store float %ceil0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
store float %ceil1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
store float %ceil2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
store float %ceil3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
store float %ceil4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
store float %ceil5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
store float %ceil6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
store float %ceil7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
store float %ceil8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
store float %ceil9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
store float %ceil10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
store float %ceil11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
store float %ceil12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
store float %ceil13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
store float %ceil14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
store float %ceil15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
ret void
}
define void @floor_4f32() #0 {
; SSE2-LABEL: @floor_4f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[FLOOR0:%.*]] = call float @llvm.floor.f32(float [[LD0]])
; SSE2-NEXT: [[FLOOR1:%.*]] = call float @llvm.floor.f32(float [[LD1]])
; SSE2-NEXT: [[FLOOR2:%.*]] = call float @llvm.floor.f32(float [[LD2]])
; SSE2-NEXT: [[FLOOR3:%.*]] = call float @llvm.floor.f32(float [[LD3]])
; SSE2-NEXT: store float [[FLOOR0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[FLOOR1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[FLOOR2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[FLOOR3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @floor_4f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @floor_4f32(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1]])
; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%floor0 = call float @llvm.floor.f32(float %ld0)
%floor1 = call float @llvm.floor.f32(float %ld1)
%floor2 = call float @llvm.floor.f32(float %ld2)
%floor3 = call float @llvm.floor.f32(float %ld3)
store float %floor0, ptr @dst32, align 4
store float %floor1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %floor2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %floor3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
ret void
}
define void @floor_8f32() #0 {
; SSE2-LABEL: @floor_8f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[FLOOR0:%.*]] = call float @llvm.floor.f32(float [[LD0]])
; SSE2-NEXT: [[FLOOR1:%.*]] = call float @llvm.floor.f32(float [[LD1]])
; SSE2-NEXT: [[FLOOR2:%.*]] = call float @llvm.floor.f32(float [[LD2]])
; SSE2-NEXT: [[FLOOR3:%.*]] = call float @llvm.floor.f32(float [[LD3]])
; SSE2-NEXT: [[FLOOR4:%.*]] = call float @llvm.floor.f32(float [[LD4]])
; SSE2-NEXT: [[FLOOR5:%.*]] = call float @llvm.floor.f32(float [[LD5]])
; SSE2-NEXT: [[FLOOR6:%.*]] = call float @llvm.floor.f32(float [[LD6]])
; SSE2-NEXT: [[FLOOR7:%.*]] = call float @llvm.floor.f32(float [[LD7]])
; SSE2-NEXT: store float [[FLOOR0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[FLOOR1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[FLOOR2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[FLOOR3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[FLOOR4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[FLOOR5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[FLOOR6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[FLOOR7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @floor_8f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @floor_8f32(
; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP1]])
; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
%floor0 = call float @llvm.floor.f32(float %ld0)
%floor1 = call float @llvm.floor.f32(float %ld1)
%floor2 = call float @llvm.floor.f32(float %ld2)
%floor3 = call float @llvm.floor.f32(float %ld3)
%floor4 = call float @llvm.floor.f32(float %ld4)
%floor5 = call float @llvm.floor.f32(float %ld5)
%floor6 = call float @llvm.floor.f32(float %ld6)
%floor7 = call float @llvm.floor.f32(float %ld7)
store float %floor0, ptr @dst32, align 4
store float %floor1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %floor2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %floor3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
store float %floor4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
store float %floor5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
store float %floor6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
store float %floor7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
ret void
}
define void @floor_16f32() #0 {
; SSE2-LABEL: @floor_16f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
; SSE2-NEXT: [[FLOOR0:%.*]] = call float @llvm.floor.f32(float [[LD0]])
; SSE2-NEXT: [[FLOOR1:%.*]] = call float @llvm.floor.f32(float [[LD1]])
; SSE2-NEXT: [[FLOOR2:%.*]] = call float @llvm.floor.f32(float [[LD2]])
; SSE2-NEXT: [[FLOOR3:%.*]] = call float @llvm.floor.f32(float [[LD3]])
; SSE2-NEXT: [[FLOOR4:%.*]] = call float @llvm.floor.f32(float [[LD4]])
; SSE2-NEXT: [[FLOOR5:%.*]] = call float @llvm.floor.f32(float [[LD5]])
; SSE2-NEXT: [[FLOOR6:%.*]] = call float @llvm.floor.f32(float [[LD6]])
; SSE2-NEXT: [[FLOOR7:%.*]] = call float @llvm.floor.f32(float [[LD7]])
; SSE2-NEXT: [[FLOOR8:%.*]] = call float @llvm.floor.f32(float [[LD8]])
; SSE2-NEXT: [[FLOOR9:%.*]] = call float @llvm.floor.f32(float [[LD9]])
; SSE2-NEXT: [[FLOOR10:%.*]] = call float @llvm.floor.f32(float [[LD10]])
; SSE2-NEXT: [[FLOOR11:%.*]] = call float @llvm.floor.f32(float [[LD11]])
; SSE2-NEXT: [[FLOOR12:%.*]] = call float @llvm.floor.f32(float [[LD12]])
; SSE2-NEXT: [[FLOOR13:%.*]] = call float @llvm.floor.f32(float [[LD13]])
; SSE2-NEXT: [[FLOOR14:%.*]] = call float @llvm.floor.f32(float [[LD14]])
; SSE2-NEXT: [[FLOOR15:%.*]] = call float @llvm.floor.f32(float [[LD15]])
; SSE2-NEXT: store float [[FLOOR0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[FLOOR1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[FLOOR2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[FLOOR3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[FLOOR4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[FLOOR5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[FLOOR6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[FLOOR7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: store float [[FLOOR8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE2-NEXT: store float [[FLOOR9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
; SSE2-NEXT: store float [[FLOOR10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
; SSE2-NEXT: store float [[FLOOR11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
; SSE2-NEXT: store float [[FLOOR12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE2-NEXT: store float [[FLOOR13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
; SSE2-NEXT: store float [[FLOOR14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
; SSE2-NEXT: store float [[FLOOR15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @floor_16f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP5]])
; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP7]])
; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @floor_16f32(
; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP1]])
; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP3]])
; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @floor_16f32(
; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP1]])
; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[TMP3]])
; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @floor_16f32(
; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.floor.v16f32(<16 x float> [[TMP1]])
; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
; AVX512-NEXT: ret void
;
%ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
%ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
%ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
%ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
%ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
%ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
%ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
%ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
%ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
%floor0 = call float @llvm.floor.f32(float %ld0 )
%floor1 = call float @llvm.floor.f32(float %ld1 )
%floor2 = call float @llvm.floor.f32(float %ld2 )
%floor3 = call float @llvm.floor.f32(float %ld3 )
%floor4 = call float @llvm.floor.f32(float %ld4 )
%floor5 = call float @llvm.floor.f32(float %ld5 )
%floor6 = call float @llvm.floor.f32(float %ld6 )
%floor7 = call float @llvm.floor.f32(float %ld7 )
%floor8 = call float @llvm.floor.f32(float %ld8 )
%floor9 = call float @llvm.floor.f32(float %ld9 )
%floor10 = call float @llvm.floor.f32(float %ld10)
%floor11 = call float @llvm.floor.f32(float %ld11)
%floor12 = call float @llvm.floor.f32(float %ld12)
%floor13 = call float @llvm.floor.f32(float %ld13)
%floor14 = call float @llvm.floor.f32(float %ld14)
%floor15 = call float @llvm.floor.f32(float %ld15)
store float %floor0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
store float %floor1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
store float %floor2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
store float %floor3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
store float %floor4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
store float %floor5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
store float %floor6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
store float %floor7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
store float %floor8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
store float %floor9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
store float %floor10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
store float %floor11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
store float %floor12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
store float %floor13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
store float %floor14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
store float %floor15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
ret void
}
define void @nearbyint_4f32() #0 {
; SSE2-LABEL: @nearbyint_4f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[NEARBYINT0:%.*]] = call float @llvm.nearbyint.f32(float [[LD0]])
; SSE2-NEXT: [[NEARBYINT1:%.*]] = call float @llvm.nearbyint.f32(float [[LD1]])
; SSE2-NEXT: [[NEARBYINT2:%.*]] = call float @llvm.nearbyint.f32(float [[LD2]])
; SSE2-NEXT: [[NEARBYINT3:%.*]] = call float @llvm.nearbyint.f32(float [[LD3]])
; SSE2-NEXT: store float [[NEARBYINT0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[NEARBYINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[NEARBYINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[NEARBYINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @nearbyint_4f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @nearbyint_4f32(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1]])
; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%nearbyint0 = call float @llvm.nearbyint.f32(float %ld0)
%nearbyint1 = call float @llvm.nearbyint.f32(float %ld1)
%nearbyint2 = call float @llvm.nearbyint.f32(float %ld2)
%nearbyint3 = call float @llvm.nearbyint.f32(float %ld3)
store float %nearbyint0, ptr @dst32, align 4
store float %nearbyint1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %nearbyint2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %nearbyint3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
ret void
}
define void @nearbyint_8f32() #0 {
; SSE2-LABEL: @nearbyint_8f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[NEARBYINT0:%.*]] = call float @llvm.nearbyint.f32(float [[LD0]])
; SSE2-NEXT: [[NEARBYINT1:%.*]] = call float @llvm.nearbyint.f32(float [[LD1]])
; SSE2-NEXT: [[NEARBYINT2:%.*]] = call float @llvm.nearbyint.f32(float [[LD2]])
; SSE2-NEXT: [[NEARBYINT3:%.*]] = call float @llvm.nearbyint.f32(float [[LD3]])
; SSE2-NEXT: [[NEARBYINT4:%.*]] = call float @llvm.nearbyint.f32(float [[LD4]])
; SSE2-NEXT: [[NEARBYINT5:%.*]] = call float @llvm.nearbyint.f32(float [[LD5]])
; SSE2-NEXT: [[NEARBYINT6:%.*]] = call float @llvm.nearbyint.f32(float [[LD6]])
; SSE2-NEXT: [[NEARBYINT7:%.*]] = call float @llvm.nearbyint.f32(float [[LD7]])
; SSE2-NEXT: store float [[NEARBYINT0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[NEARBYINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[NEARBYINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[NEARBYINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[NEARBYINT4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[NEARBYINT5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[NEARBYINT6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[NEARBYINT7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @nearbyint_8f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @nearbyint_8f32(
; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP1]])
; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
%nearbyint0 = call float @llvm.nearbyint.f32(float %ld0)
%nearbyint1 = call float @llvm.nearbyint.f32(float %ld1)
%nearbyint2 = call float @llvm.nearbyint.f32(float %ld2)
%nearbyint3 = call float @llvm.nearbyint.f32(float %ld3)
%nearbyint4 = call float @llvm.nearbyint.f32(float %ld4)
%nearbyint5 = call float @llvm.nearbyint.f32(float %ld5)
%nearbyint6 = call float @llvm.nearbyint.f32(float %ld6)
%nearbyint7 = call float @llvm.nearbyint.f32(float %ld7)
store float %nearbyint0, ptr @dst32, align 4
store float %nearbyint1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %nearbyint2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %nearbyint3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
store float %nearbyint4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
store float %nearbyint5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
store float %nearbyint6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
store float %nearbyint7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
ret void
}
define void @nearbyint_16f32() #0 {
; SSE2-LABEL: @nearbyint_16f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
; SSE2-NEXT: [[NEARBYINT0:%.*]] = call float @llvm.nearbyint.f32(float [[LD0]])
; SSE2-NEXT: [[NEARBYINT1:%.*]] = call float @llvm.nearbyint.f32(float [[LD1]])
; SSE2-NEXT: [[NEARBYINT2:%.*]] = call float @llvm.nearbyint.f32(float [[LD2]])
; SSE2-NEXT: [[NEARBYINT3:%.*]] = call float @llvm.nearbyint.f32(float [[LD3]])
; SSE2-NEXT: [[NEARBYINT4:%.*]] = call float @llvm.nearbyint.f32(float [[LD4]])
; SSE2-NEXT: [[NEARBYINT5:%.*]] = call float @llvm.nearbyint.f32(float [[LD5]])
; SSE2-NEXT: [[NEARBYINT6:%.*]] = call float @llvm.nearbyint.f32(float [[LD6]])
; SSE2-NEXT: [[NEARBYINT7:%.*]] = call float @llvm.nearbyint.f32(float [[LD7]])
; SSE2-NEXT: [[NEARBYINT8:%.*]] = call float @llvm.nearbyint.f32(float [[LD8]])
; SSE2-NEXT: [[NEARBYINT9:%.*]] = call float @llvm.nearbyint.f32(float [[LD9]])
; SSE2-NEXT: [[NEARBYINT10:%.*]] = call float @llvm.nearbyint.f32(float [[LD10]])
; SSE2-NEXT: [[NEARBYINT11:%.*]] = call float @llvm.nearbyint.f32(float [[LD11]])
; SSE2-NEXT: [[NEARBYINT12:%.*]] = call float @llvm.nearbyint.f32(float [[LD12]])
; SSE2-NEXT: [[NEARBYINT13:%.*]] = call float @llvm.nearbyint.f32(float [[LD13]])
; SSE2-NEXT: [[NEARBYINT14:%.*]] = call float @llvm.nearbyint.f32(float [[LD14]])
; SSE2-NEXT: [[NEARBYINT15:%.*]] = call float @llvm.nearbyint.f32(float [[LD15]])
; SSE2-NEXT: store float [[NEARBYINT0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[NEARBYINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[NEARBYINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[NEARBYINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[NEARBYINT4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[NEARBYINT5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[NEARBYINT6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[NEARBYINT7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: store float [[NEARBYINT8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE2-NEXT: store float [[NEARBYINT9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
; SSE2-NEXT: store float [[NEARBYINT10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
; SSE2-NEXT: store float [[NEARBYINT11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
; SSE2-NEXT: store float [[NEARBYINT12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE2-NEXT: store float [[NEARBYINT13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
; SSE2-NEXT: store float [[NEARBYINT14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
; SSE2-NEXT: store float [[NEARBYINT15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @nearbyint_16f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP5]])
; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP7]])
; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @nearbyint_16f32(
; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP1]])
; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP3]])
; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @nearbyint_16f32(
; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP1]])
; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> [[TMP3]])
; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @nearbyint_16f32(
; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> [[TMP1]])
; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
; AVX512-NEXT: ret void
;
%ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
%ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
%ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
%ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
%ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
%ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
%ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
%ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
%ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
%nearbyint0 = call float @llvm.nearbyint.f32(float %ld0 )
%nearbyint1 = call float @llvm.nearbyint.f32(float %ld1 )
%nearbyint2 = call float @llvm.nearbyint.f32(float %ld2 )
%nearbyint3 = call float @llvm.nearbyint.f32(float %ld3 )
%nearbyint4 = call float @llvm.nearbyint.f32(float %ld4 )
%nearbyint5 = call float @llvm.nearbyint.f32(float %ld5 )
%nearbyint6 = call float @llvm.nearbyint.f32(float %ld6 )
%nearbyint7 = call float @llvm.nearbyint.f32(float %ld7 )
%nearbyint8 = call float @llvm.nearbyint.f32(float %ld8 )
%nearbyint9 = call float @llvm.nearbyint.f32(float %ld9 )
%nearbyint10 = call float @llvm.nearbyint.f32(float %ld10)
%nearbyint11 = call float @llvm.nearbyint.f32(float %ld11)
%nearbyint12 = call float @llvm.nearbyint.f32(float %ld12)
%nearbyint13 = call float @llvm.nearbyint.f32(float %ld13)
%nearbyint14 = call float @llvm.nearbyint.f32(float %ld14)
%nearbyint15 = call float @llvm.nearbyint.f32(float %ld15)
store float %nearbyint0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
store float %nearbyint1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
store float %nearbyint2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
store float %nearbyint3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
store float %nearbyint4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
store float %nearbyint5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
store float %nearbyint6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
store float %nearbyint7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
store float %nearbyint8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
store float %nearbyint9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
store float %nearbyint10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
store float %nearbyint11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
store float %nearbyint12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
store float %nearbyint13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
store float %nearbyint14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
store float %nearbyint15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
ret void
}
define void @rint_4f32() #0 {
; SSE2-LABEL: @rint_4f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[RINT0:%.*]] = call float @llvm.rint.f32(float [[LD0]])
; SSE2-NEXT: [[RINT1:%.*]] = call float @llvm.rint.f32(float [[LD1]])
; SSE2-NEXT: [[RINT2:%.*]] = call float @llvm.rint.f32(float [[LD2]])
; SSE2-NEXT: [[RINT3:%.*]] = call float @llvm.rint.f32(float [[LD3]])
; SSE2-NEXT: store float [[RINT0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[RINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[RINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[RINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @rint_4f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @rint_4f32(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1]])
; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%rint0 = call float @llvm.rint.f32(float %ld0)
%rint1 = call float @llvm.rint.f32(float %ld1)
%rint2 = call float @llvm.rint.f32(float %ld2)
%rint3 = call float @llvm.rint.f32(float %ld3)
store float %rint0, ptr @dst32, align 4
store float %rint1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %rint2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %rint3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
ret void
}
define void @rint_8f32() #0 {
; SSE2-LABEL: @rint_8f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[RINT0:%.*]] = call float @llvm.rint.f32(float [[LD0]])
; SSE2-NEXT: [[RINT1:%.*]] = call float @llvm.rint.f32(float [[LD1]])
; SSE2-NEXT: [[RINT2:%.*]] = call float @llvm.rint.f32(float [[LD2]])
; SSE2-NEXT: [[RINT3:%.*]] = call float @llvm.rint.f32(float [[LD3]])
; SSE2-NEXT: [[RINT4:%.*]] = call float @llvm.rint.f32(float [[LD4]])
; SSE2-NEXT: [[RINT5:%.*]] = call float @llvm.rint.f32(float [[LD5]])
; SSE2-NEXT: [[RINT6:%.*]] = call float @llvm.rint.f32(float [[LD6]])
; SSE2-NEXT: [[RINT7:%.*]] = call float @llvm.rint.f32(float [[LD7]])
; SSE2-NEXT: store float [[RINT0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[RINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[RINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[RINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[RINT4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[RINT5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[RINT6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[RINT7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @rint_8f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @rint_8f32(
; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP1]])
; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
%rint0 = call float @llvm.rint.f32(float %ld0)
%rint1 = call float @llvm.rint.f32(float %ld1)
%rint2 = call float @llvm.rint.f32(float %ld2)
%rint3 = call float @llvm.rint.f32(float %ld3)
%rint4 = call float @llvm.rint.f32(float %ld4)
%rint5 = call float @llvm.rint.f32(float %ld5)
%rint6 = call float @llvm.rint.f32(float %ld6)
%rint7 = call float @llvm.rint.f32(float %ld7)
store float %rint0, ptr @dst32, align 4
store float %rint1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %rint2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %rint3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
store float %rint4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
store float %rint5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
store float %rint6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
store float %rint7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
ret void
}
define void @rint_16f32() #0 {
; SSE2-LABEL: @rint_16f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
; SSE2-NEXT: [[RINT0:%.*]] = call float @llvm.rint.f32(float [[LD0]])
; SSE2-NEXT: [[RINT1:%.*]] = call float @llvm.rint.f32(float [[LD1]])
; SSE2-NEXT: [[RINT2:%.*]] = call float @llvm.rint.f32(float [[LD2]])
; SSE2-NEXT: [[RINT3:%.*]] = call float @llvm.rint.f32(float [[LD3]])
; SSE2-NEXT: [[RINT4:%.*]] = call float @llvm.rint.f32(float [[LD4]])
; SSE2-NEXT: [[RINT5:%.*]] = call float @llvm.rint.f32(float [[LD5]])
; SSE2-NEXT: [[RINT6:%.*]] = call float @llvm.rint.f32(float [[LD6]])
; SSE2-NEXT: [[RINT7:%.*]] = call float @llvm.rint.f32(float [[LD7]])
; SSE2-NEXT: [[RINT8:%.*]] = call float @llvm.rint.f32(float [[LD8]])
; SSE2-NEXT: [[RINT9:%.*]] = call float @llvm.rint.f32(float [[LD9]])
; SSE2-NEXT: [[RINT10:%.*]] = call float @llvm.rint.f32(float [[LD10]])
; SSE2-NEXT: [[RINT11:%.*]] = call float @llvm.rint.f32(float [[LD11]])
; SSE2-NEXT: [[RINT12:%.*]] = call float @llvm.rint.f32(float [[LD12]])
; SSE2-NEXT: [[RINT13:%.*]] = call float @llvm.rint.f32(float [[LD13]])
; SSE2-NEXT: [[RINT14:%.*]] = call float @llvm.rint.f32(float [[LD14]])
; SSE2-NEXT: [[RINT15:%.*]] = call float @llvm.rint.f32(float [[LD15]])
; SSE2-NEXT: store float [[RINT0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[RINT1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[RINT2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[RINT3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[RINT4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[RINT5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[RINT6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[RINT7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: store float [[RINT8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE2-NEXT: store float [[RINT9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
; SSE2-NEXT: store float [[RINT10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
; SSE2-NEXT: store float [[RINT11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
; SSE2-NEXT: store float [[RINT12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE2-NEXT: store float [[RINT13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
; SSE2-NEXT: store float [[RINT14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
; SSE2-NEXT: store float [[RINT15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @rint_16f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP5]])
; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP7]])
; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @rint_16f32(
; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP1]])
; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP3]])
; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @rint_16f32(
; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP1]])
; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.rint.v8f32(<8 x float> [[TMP3]])
; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @rint_16f32(
; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.rint.v16f32(<16 x float> [[TMP1]])
; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
; AVX512-NEXT: ret void
;
%ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
%ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
%ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
%ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
%ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
%ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
%ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
%ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
%ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
%rint0 = call float @llvm.rint.f32(float %ld0 )
%rint1 = call float @llvm.rint.f32(float %ld1 )
%rint2 = call float @llvm.rint.f32(float %ld2 )
%rint3 = call float @llvm.rint.f32(float %ld3 )
%rint4 = call float @llvm.rint.f32(float %ld4 )
%rint5 = call float @llvm.rint.f32(float %ld5 )
%rint6 = call float @llvm.rint.f32(float %ld6 )
%rint7 = call float @llvm.rint.f32(float %ld7 )
%rint8 = call float @llvm.rint.f32(float %ld8 )
%rint9 = call float @llvm.rint.f32(float %ld9 )
%rint10 = call float @llvm.rint.f32(float %ld10)
%rint11 = call float @llvm.rint.f32(float %ld11)
%rint12 = call float @llvm.rint.f32(float %ld12)
%rint13 = call float @llvm.rint.f32(float %ld13)
%rint14 = call float @llvm.rint.f32(float %ld14)
%rint15 = call float @llvm.rint.f32(float %ld15)
store float %rint0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
store float %rint1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
store float %rint2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
store float %rint3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
store float %rint4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
store float %rint5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
store float %rint6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
store float %rint7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
store float %rint8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
store float %rint9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
store float %rint10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
store float %rint11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
store float %rint12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
store float %rint13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
store float %rint14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
store float %rint15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
ret void
}
define void @trunc_4f32() #0 {
; SSE2-LABEL: @trunc_4f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[TRUNC0:%.*]] = call float @llvm.trunc.f32(float [[LD0]])
; SSE2-NEXT: [[TRUNC1:%.*]] = call float @llvm.trunc.f32(float [[LD1]])
; SSE2-NEXT: [[TRUNC2:%.*]] = call float @llvm.trunc.f32(float [[LD2]])
; SSE2-NEXT: [[TRUNC3:%.*]] = call float @llvm.trunc.f32(float [[LD3]])
; SSE2-NEXT: store float [[TRUNC0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[TRUNC1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[TRUNC2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[TRUNC3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @trunc_4f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @trunc_4f32(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1]])
; AVX-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%trunc0 = call float @llvm.trunc.f32(float %ld0)
%trunc1 = call float @llvm.trunc.f32(float %ld1)
%trunc2 = call float @llvm.trunc.f32(float %ld2)
%trunc3 = call float @llvm.trunc.f32(float %ld3)
store float %trunc0, ptr @dst32, align 4
store float %trunc1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %trunc2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %trunc3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
ret void
}
define void @trunc_8f32() #0 {
; SSE2-LABEL: @trunc_8f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[TRUNC0:%.*]] = call float @llvm.trunc.f32(float [[LD0]])
; SSE2-NEXT: [[TRUNC1:%.*]] = call float @llvm.trunc.f32(float [[LD1]])
; SSE2-NEXT: [[TRUNC2:%.*]] = call float @llvm.trunc.f32(float [[LD2]])
; SSE2-NEXT: [[TRUNC3:%.*]] = call float @llvm.trunc.f32(float [[LD3]])
; SSE2-NEXT: [[TRUNC4:%.*]] = call float @llvm.trunc.f32(float [[LD4]])
; SSE2-NEXT: [[TRUNC5:%.*]] = call float @llvm.trunc.f32(float [[LD5]])
; SSE2-NEXT: [[TRUNC6:%.*]] = call float @llvm.trunc.f32(float [[LD6]])
; SSE2-NEXT: [[TRUNC7:%.*]] = call float @llvm.trunc.f32(float [[LD7]])
; SSE2-NEXT: store float [[TRUNC0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[TRUNC1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[TRUNC2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[TRUNC3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[TRUNC4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[TRUNC5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[TRUNC6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[TRUNC7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @trunc_8f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: ret void
;
; AVX-LABEL: @trunc_8f32(
; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP1]])
; AVX-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%ld0 = load float, ptr @src32, align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
%trunc0 = call float @llvm.trunc.f32(float %ld0)
%trunc1 = call float @llvm.trunc.f32(float %ld1)
%trunc2 = call float @llvm.trunc.f32(float %ld2)
%trunc3 = call float @llvm.trunc.f32(float %ld3)
%trunc4 = call float @llvm.trunc.f32(float %ld4)
%trunc5 = call float @llvm.trunc.f32(float %ld5)
%trunc6 = call float @llvm.trunc.f32(float %ld6)
%trunc7 = call float @llvm.trunc.f32(float %ld7)
store float %trunc0, ptr @dst32, align 4
store float %trunc1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %trunc2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %trunc3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
store float %trunc4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
store float %trunc5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
store float %trunc6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
store float %trunc7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
ret void
}
define void @trunc_16f32() #0 {
; SSE2-LABEL: @trunc_16f32(
; SSE2-NEXT: [[LD0:%.*]] = load float, ptr @src32, align 4
; SSE2-NEXT: [[LD1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
; SSE2-NEXT: [[LD2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
; SSE2-NEXT: [[LD3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
; SSE2-NEXT: [[LD4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE2-NEXT: [[LD5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
; SSE2-NEXT: [[LD6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
; SSE2-NEXT: [[LD7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
; SSE2-NEXT: [[LD8:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE2-NEXT: [[LD9:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9), align 4
; SSE2-NEXT: [[LD10:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
; SSE2-NEXT: [[LD11:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
; SSE2-NEXT: [[LD12:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE2-NEXT: [[LD13:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
; SSE2-NEXT: [[LD14:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
; SSE2-NEXT: [[LD15:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
; SSE2-NEXT: [[TRUNC0:%.*]] = call float @llvm.trunc.f32(float [[LD0]])
; SSE2-NEXT: [[TRUNC1:%.*]] = call float @llvm.trunc.f32(float [[LD1]])
; SSE2-NEXT: [[TRUNC2:%.*]] = call float @llvm.trunc.f32(float [[LD2]])
; SSE2-NEXT: [[TRUNC3:%.*]] = call float @llvm.trunc.f32(float [[LD3]])
; SSE2-NEXT: [[TRUNC4:%.*]] = call float @llvm.trunc.f32(float [[LD4]])
; SSE2-NEXT: [[TRUNC5:%.*]] = call float @llvm.trunc.f32(float [[LD5]])
; SSE2-NEXT: [[TRUNC6:%.*]] = call float @llvm.trunc.f32(float [[LD6]])
; SSE2-NEXT: [[TRUNC7:%.*]] = call float @llvm.trunc.f32(float [[LD7]])
; SSE2-NEXT: [[TRUNC8:%.*]] = call float @llvm.trunc.f32(float [[LD8]])
; SSE2-NEXT: [[TRUNC9:%.*]] = call float @llvm.trunc.f32(float [[LD9]])
; SSE2-NEXT: [[TRUNC10:%.*]] = call float @llvm.trunc.f32(float [[LD10]])
; SSE2-NEXT: [[TRUNC11:%.*]] = call float @llvm.trunc.f32(float [[LD11]])
; SSE2-NEXT: [[TRUNC12:%.*]] = call float @llvm.trunc.f32(float [[LD12]])
; SSE2-NEXT: [[TRUNC13:%.*]] = call float @llvm.trunc.f32(float [[LD13]])
; SSE2-NEXT: [[TRUNC14:%.*]] = call float @llvm.trunc.f32(float [[LD14]])
; SSE2-NEXT: [[TRUNC15:%.*]] = call float @llvm.trunc.f32(float [[LD15]])
; SSE2-NEXT: store float [[TRUNC0]], ptr @dst32, align 4
; SSE2-NEXT: store float [[TRUNC1]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
; SSE2-NEXT: store float [[TRUNC2]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
; SSE2-NEXT: store float [[TRUNC3]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
; SSE2-NEXT: store float [[TRUNC4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE2-NEXT: store float [[TRUNC5]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
; SSE2-NEXT: store float [[TRUNC6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
; SSE2-NEXT: store float [[TRUNC7]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
; SSE2-NEXT: store float [[TRUNC8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE2-NEXT: store float [[TRUNC9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
; SSE2-NEXT: store float [[TRUNC10]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
; SSE2-NEXT: store float [[TRUNC11]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
; SSE2-NEXT: store float [[TRUNC12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE2-NEXT: store float [[TRUNC13]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
; SSE2-NEXT: store float [[TRUNC14]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
; SSE2-NEXT: store float [[TRUNC15]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
; SSE2-NEXT: ret void
;
; SSE41-LABEL: @trunc_16f32(
; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
; SSE41-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1]])
; SSE41-NEXT: store <4 x float> [[TMP2]], ptr @dst32, align 4
; SSE41-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP3]])
; SSE41-NEXT: store <4 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE41-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP5]])
; SSE41-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE41-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
; SSE41-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP7]])
; SSE41-NEXT: store <4 x float> [[TMP8]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE41-NEXT: ret void
;
; AVX1-LABEL: @trunc_16f32(
; AVX1-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX1-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP1]])
; AVX1-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX1-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX1-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP3]])
; AVX1-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX1-NEXT: ret void
;
; AVX2-LABEL: @trunc_16f32(
; AVX2-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
; AVX2-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP1]])
; AVX2-NEXT: store <8 x float> [[TMP2]], ptr @dst32, align 4
; AVX2-NEXT: [[TMP3:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8), align 4
; AVX2-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.trunc.v8f32(<8 x float> [[TMP3]])
; AVX2-NEXT: store <8 x float> [[TMP4]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @trunc_16f32(
; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @src32, align 4
; AVX512-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.trunc.v16f32(<16 x float> [[TMP1]])
; AVX512-NEXT: store <16 x float> [[TMP2]], ptr @dst32, align 4
; AVX512-NEXT: ret void
;
%ld0 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 0 ), align 4
%ld1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1 ), align 4
%ld2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2 ), align 4
%ld3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3 ), align 4
%ld4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4 ), align 4
%ld5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5 ), align 4
%ld6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6 ), align 4
%ld7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7 ), align 4
%ld8 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 8 ), align 4
%ld9 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 9 ), align 4
%ld10 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 10), align 4
%ld11 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 11), align 4
%ld12 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 12), align 4
%ld13 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 13), align 4
%ld14 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 14), align 4
%ld15 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 15), align 4
%trunc0 = call float @llvm.trunc.f32(float %ld0 )
%trunc1 = call float @llvm.trunc.f32(float %ld1 )
%trunc2 = call float @llvm.trunc.f32(float %ld2 )
%trunc3 = call float @llvm.trunc.f32(float %ld3 )
%trunc4 = call float @llvm.trunc.f32(float %ld4 )
%trunc5 = call float @llvm.trunc.f32(float %ld5 )
%trunc6 = call float @llvm.trunc.f32(float %ld6 )
%trunc7 = call float @llvm.trunc.f32(float %ld7 )
%trunc8 = call float @llvm.trunc.f32(float %ld8 )
%trunc9 = call float @llvm.trunc.f32(float %ld9 )
%trunc10 = call float @llvm.trunc.f32(float %ld10)
%trunc11 = call float @llvm.trunc.f32(float %ld11)
%trunc12 = call float @llvm.trunc.f32(float %ld12)
%trunc13 = call float @llvm.trunc.f32(float %ld13)
%trunc14 = call float @llvm.trunc.f32(float %ld14)
%trunc15 = call float @llvm.trunc.f32(float %ld15)
store float %trunc0 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 0 ), align 4
store float %trunc1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1 ), align 4
store float %trunc2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2 ), align 4
store float %trunc3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3 ), align 4
store float %trunc4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4 ), align 4
store float %trunc5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5 ), align 4
store float %trunc6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6 ), align 4
store float %trunc7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7 ), align 4
store float %trunc8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8 ), align 4
store float %trunc9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9 ), align 4
store float %trunc10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
store float %trunc11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
store float %trunc12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
store float %trunc13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
store float %trunc14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
store float %trunc15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
ret void
}
attributes #0 = { nounwind }