; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=bdver1 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=-prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=+prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256
@srcA64 = common global [8 x double] zeroinitializer, align 64
@srcB64 = common global [8 x double] zeroinitializer, align 64
@srcC64 = common global [8 x double] zeroinitializer, align 64
@srcA32 = common global [16 x float] zeroinitializer, align 64
@srcB32 = common global [16 x float] zeroinitializer, align 64
@srcC32 = common global [16 x float] zeroinitializer, align 64
@dst64 = common global [8 x double] zeroinitializer, align 64
@dst32 = common global [16 x float] zeroinitializer, align 64
declare float @llvm.minnum.f32(float, float)
declare double @llvm.minnum.f64(double, double)
;
; CHECK
;
define void @fminnum_2f64() #0 {
; CHECK-LABEL: @fminnum_2f64(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]])
; CHECK-NEXT: store <2 x double> [[TMP3]], ptr @dst64, align 8
; CHECK-NEXT: ret void
;
%a0 = load double, ptr @srcA64, align 8
%a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 8
%b0 = load double, ptr @srcB64, align 8
%b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 8
%fminnum0 = call double @llvm.minnum.f64(double %a0, double %b0)
%fminnum1 = call double @llvm.minnum.f64(double %a1, double %b1)
store double %fminnum0, ptr @dst64, align 8
store double %fminnum1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
ret void
}
define void @fminnum_4f64() #0 {
; SSE-LABEL: @fminnum_4f64(
; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8
; SSE-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 8
; SSE-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]])
; SSE-NEXT: store <2 x double> [[TMP3]], ptr @dst64, align 8
; SSE-NEXT: [[TMP4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 8
; SSE-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 8
; SSE-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP4]], <2 x double> [[TMP5]])
; SSE-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
; SSE-NEXT: ret void
;
; AVX-LABEL: @fminnum_4f64(
; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @srcA64, align 8
; AVX-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr @srcB64, align 8
; AVX-NEXT: [[TMP3:%.*]] = call <4 x double> @llvm.minnum.v4f64(<4 x double> [[TMP1]], <4 x double> [[TMP2]])
; AVX-NEXT: store <4 x double> [[TMP3]], ptr @dst64, align 8
; AVX-NEXT: ret void
;
%a0 = load double, ptr @srcA64, align 8
%a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 8
%a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 8
%a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 3), align 8
%b0 = load double, ptr @srcB64, align 8
%b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 8
%b2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 8
%b3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 3), align 8
%fminnum0 = call double @llvm.minnum.f64(double %a0, double %b0)
%fminnum1 = call double @llvm.minnum.f64(double %a1, double %b1)
%fminnum2 = call double @llvm.minnum.f64(double %a2, double %b2)
%fminnum3 = call double @llvm.minnum.f64(double %a3, double %b3)
store double %fminnum0, ptr @dst64, align 8
store double %fminnum1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
store double %fminnum2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
store double %fminnum3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
ret void
}
define void @fminnum_8f64() #0 {
; SSE-LABEL: @fminnum_8f64(
; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 4
; SSE-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 4
; SSE-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]])
; SSE-NEXT: store <2 x double> [[TMP3]], ptr @dst64, align 4
; SSE-NEXT: [[TMP4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 4
; SSE-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 4
; SSE-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP4]], <2 x double> [[TMP5]])
; SSE-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 4
; SSE-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 4), align 4
; SSE-NEXT: [[TMP8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 4), align 4
; SSE-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP7]], <2 x double> [[TMP8]])
; SSE-NEXT: store <2 x double> [[TMP9]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 4
; SSE-NEXT: [[TMP10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 6), align 4
; SSE-NEXT: [[TMP11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 6), align 4
; SSE-NEXT: [[TMP12:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP10]], <2 x double> [[TMP11]])
; SSE-NEXT: store <2 x double> [[TMP12]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 4
; SSE-NEXT: ret void
;
; AVX256-LABEL: @fminnum_8f64(
; AVX256-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @srcA64, align 4
; AVX256-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr @srcB64, align 4
; AVX256-NEXT: [[TMP3:%.*]] = call <4 x double> @llvm.minnum.v4f64(<4 x double> [[TMP1]], <4 x double> [[TMP2]])
; AVX256-NEXT: store <4 x double> [[TMP3]], ptr @dst64, align 4
; AVX256-NEXT: [[TMP4:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 4), align 4
; AVX256-NEXT: [[TMP5:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 4), align 4
; AVX256-NEXT: [[TMP6:%.*]] = call <4 x double> @llvm.minnum.v4f64(<4 x double> [[TMP4]], <4 x double> [[TMP5]])
; AVX256-NEXT: store <4 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 4
; AVX256-NEXT: ret void
;
; AVX512-LABEL: @fminnum_8f64(
; AVX512-NEXT: [[TMP1:%.*]] = load <8 x double>, ptr @srcA64, align 4
; AVX512-NEXT: [[TMP2:%.*]] = load <8 x double>, ptr @srcB64, align 4
; AVX512-NEXT: [[TMP3:%.*]] = call <8 x double> @llvm.minnum.v8f64(<8 x double> [[TMP1]], <8 x double> [[TMP2]])
; AVX512-NEXT: store <8 x double> [[TMP3]], ptr @dst64, align 4
; AVX512-NEXT: ret void
;
%a0 = load double, ptr @srcA64, align 4
%a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 4
%a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 4
%a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 3), align 4
%a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 4), align 4
%a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 5), align 4
%a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 6), align 4
%a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 7), align 4
%b0 = load double, ptr @srcB64, align 4
%b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 4
%b2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 4
%b3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 3), align 4
%b4 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 4), align 4
%b5 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 5), align 4
%b6 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 6), align 4
%b7 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 7), align 4
%fminnum0 = call double @llvm.minnum.f64(double %a0, double %b0)
%fminnum1 = call double @llvm.minnum.f64(double %a1, double %b1)
%fminnum2 = call double @llvm.minnum.f64(double %a2, double %b2)
%fminnum3 = call double @llvm.minnum.f64(double %a3, double %b3)
%fminnum4 = call double @llvm.minnum.f64(double %a4, double %b4)
%fminnum5 = call double @llvm.minnum.f64(double %a5, double %b5)
%fminnum6 = call double @llvm.minnum.f64(double %a6, double %b6)
%fminnum7 = call double @llvm.minnum.f64(double %a7, double %b7)
store double %fminnum0, ptr @dst64, align 4
store double %fminnum1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 4
store double %fminnum2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 4
store double %fminnum3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 4
store double %fminnum4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 4
store double %fminnum5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 4
store double %fminnum6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 4
store double %fminnum7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 4
ret void
}
define void @fminnum_4f32() #0 {
; CHECK-LABEL: @fminnum_4f32(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @srcA32, align 4
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr @srcB32, align 4
; CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP1]], <4 x float> [[TMP2]])
; CHECK-NEXT: store <4 x float> [[TMP3]], ptr @dst32, align 4
; CHECK-NEXT: ret void
;
%a0 = load float, ptr @srcA32, align 4
%a1 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 1), align 4
%a2 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 2), align 4
%a3 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 3), align 4
%b0 = load float, ptr @srcB32, align 4
%b1 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 1), align 4
%b2 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 2), align 4
%b3 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 3), align 4
%fminnum0 = call float @llvm.minnum.f32(float %a0, float %b0)
%fminnum1 = call float @llvm.minnum.f32(float %a1, float %b1)
%fminnum2 = call float @llvm.minnum.f32(float %a2, float %b2)
%fminnum3 = call float @llvm.minnum.f32(float %a3, float %b3)
store float %fminnum0, ptr @dst32, align 4
store float %fminnum1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %fminnum2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %fminnum3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
ret void
}
define void @fminnum_8f32() #0 {
; SSE-LABEL: @fminnum_8f32(
; SSE-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @srcA32, align 4
; SSE-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr @srcB32, align 4
; SSE-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP1]], <4 x float> [[TMP2]])
; SSE-NEXT: store <4 x float> [[TMP3]], ptr @dst32, align 4
; SSE-NEXT: [[TMP4:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 4), align 4
; SSE-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 4), align 4
; SSE-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP4]], <4 x float> [[TMP5]])
; SSE-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE-NEXT: ret void
;
; AVX-LABEL: @fminnum_8f32(
; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @srcA32, align 4
; AVX-NEXT: [[TMP2:%.*]] = load <8 x float>, ptr @srcB32, align 4
; AVX-NEXT: [[TMP3:%.*]] = call <8 x float> @llvm.minnum.v8f32(<8 x float> [[TMP1]], <8 x float> [[TMP2]])
; AVX-NEXT: store <8 x float> [[TMP3]], ptr @dst32, align 4
; AVX-NEXT: ret void
;
%a0 = load float, ptr @srcA32, align 4
%a1 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 1), align 4
%a2 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 2), align 4
%a3 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 3), align 4
%a4 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 4), align 4
%a5 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 5), align 4
%a6 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 6), align 4
%a7 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 7), align 4
%b0 = load float, ptr @srcB32, align 4
%b1 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 1), align 4
%b2 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 2), align 4
%b3 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 3), align 4
%b4 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 4), align 4
%b5 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 5), align 4
%b6 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 6), align 4
%b7 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 7), align 4
%fminnum0 = call float @llvm.minnum.f32(float %a0, float %b0)
%fminnum1 = call float @llvm.minnum.f32(float %a1, float %b1)
%fminnum2 = call float @llvm.minnum.f32(float %a2, float %b2)
%fminnum3 = call float @llvm.minnum.f32(float %a3, float %b3)
%fminnum4 = call float @llvm.minnum.f32(float %a4, float %b4)
%fminnum5 = call float @llvm.minnum.f32(float %a5, float %b5)
%fminnum6 = call float @llvm.minnum.f32(float %a6, float %b6)
%fminnum7 = call float @llvm.minnum.f32(float %a7, float %b7)
store float %fminnum0, ptr @dst32, align 4
store float %fminnum1, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %fminnum2, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %fminnum3, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
store float %fminnum4, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
store float %fminnum5, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
store float %fminnum6, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
store float %fminnum7, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
ret void
}
define void @fminnum_16f32() #0 {
; SSE-LABEL: @fminnum_16f32(
; SSE-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @srcA32, align 4
; SSE-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr @srcB32, align 4
; SSE-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP1]], <4 x float> [[TMP2]])
; SSE-NEXT: store <4 x float> [[TMP3]], ptr @dst32, align 4
; SSE-NEXT: [[TMP4:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 4), align 4
; SSE-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 4), align 4
; SSE-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP4]], <4 x float> [[TMP5]])
; SSE-NEXT: store <4 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
; SSE-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 8), align 4
; SSE-NEXT: [[TMP8:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 8), align 4
; SSE-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP7]], <4 x float> [[TMP8]])
; SSE-NEXT: store <4 x float> [[TMP9]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; SSE-NEXT: [[TMP10:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 12), align 4
; SSE-NEXT: [[TMP11:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 12), align 4
; SSE-NEXT: [[TMP12:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP10]], <4 x float> [[TMP11]])
; SSE-NEXT: store <4 x float> [[TMP12]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
; SSE-NEXT: ret void
;
; AVX256-LABEL: @fminnum_16f32(
; AVX256-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @srcA32, align 4
; AVX256-NEXT: [[TMP2:%.*]] = load <8 x float>, ptr @srcB32, align 4
; AVX256-NEXT: [[TMP3:%.*]] = call <8 x float> @llvm.minnum.v8f32(<8 x float> [[TMP1]], <8 x float> [[TMP2]])
; AVX256-NEXT: store <8 x float> [[TMP3]], ptr @dst32, align 4
; AVX256-NEXT: [[TMP4:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 8), align 4
; AVX256-NEXT: [[TMP5:%.*]] = load <8 x float>, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 8), align 4
; AVX256-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.minnum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP5]])
; AVX256-NEXT: store <8 x float> [[TMP6]], ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
; AVX256-NEXT: ret void
;
; AVX512-LABEL: @fminnum_16f32(
; AVX512-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr @srcA32, align 4
; AVX512-NEXT: [[TMP2:%.*]] = load <16 x float>, ptr @srcB32, align 4
; AVX512-NEXT: [[TMP3:%.*]] = call <16 x float> @llvm.minnum.v16f32(<16 x float> [[TMP1]], <16 x float> [[TMP2]])
; AVX512-NEXT: store <16 x float> [[TMP3]], ptr @dst32, align 4
; AVX512-NEXT: ret void
;
%a0 = load float, ptr @srcA32, align 4
%a1 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 1), align 4
%a2 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 2), align 4
%a3 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 3), align 4
%a4 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 4), align 4
%a5 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 5), align 4
%a6 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 6), align 4
%a7 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 7), align 4
%a8 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 8), align 4
%a9 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 9), align 4
%a10 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 10), align 4
%a11 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 11), align 4
%a12 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 12), align 4
%a13 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 13), align 4
%a14 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 14), align 4
%a15 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcA32, i32 0, i64 15), align 4
%b0 = load float, ptr @srcB32, align 4
%b1 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 1), align 4
%b2 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 2), align 4
%b3 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 3), align 4
%b4 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 4), align 4
%b5 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 5), align 4
%b6 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 6), align 4
%b7 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 7), align 4
%b8 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 8), align 4
%b9 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 9), align 4
%b10 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 10), align 4
%b11 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 11), align 4
%b12 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 12), align 4
%b13 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 13), align 4
%b14 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 14), align 4
%b15 = load float, ptr getelementptr inbounds ([16 x float], ptr @srcB32, i32 0, i64 15), align 4
%fminnum0 = call float @llvm.minnum.f32(float %a0 , float %b0 )
%fminnum1 = call float @llvm.minnum.f32(float %a1 , float %b1 )
%fminnum2 = call float @llvm.minnum.f32(float %a2 , float %b2 )
%fminnum3 = call float @llvm.minnum.f32(float %a3 , float %b3 )
%fminnum4 = call float @llvm.minnum.f32(float %a4 , float %b4 )
%fminnum5 = call float @llvm.minnum.f32(float %a5 , float %b5 )
%fminnum6 = call float @llvm.minnum.f32(float %a6 , float %b6 )
%fminnum7 = call float @llvm.minnum.f32(float %a7 , float %b7 )
%fminnum8 = call float @llvm.minnum.f32(float %a8 , float %b8 )
%fminnum9 = call float @llvm.minnum.f32(float %a9 , float %b9 )
%fminnum10 = call float @llvm.minnum.f32(float %a10, float %b10)
%fminnum11 = call float @llvm.minnum.f32(float %a11, float %b11)
%fminnum12 = call float @llvm.minnum.f32(float %a12, float %b12)
%fminnum13 = call float @llvm.minnum.f32(float %a13, float %b13)
%fminnum14 = call float @llvm.minnum.f32(float %a14, float %b14)
%fminnum15 = call float @llvm.minnum.f32(float %a15, float %b15)
store float %fminnum0 , ptr @dst32, align 4
store float %fminnum1 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 1), align 4
store float %fminnum2 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 2), align 4
store float %fminnum3 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 3), align 4
store float %fminnum4 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 4), align 4
store float %fminnum5 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 5), align 4
store float %fminnum6 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 6), align 4
store float %fminnum7 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 7), align 4
store float %fminnum8 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 8), align 4
store float %fminnum9 , ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 9), align 4
store float %fminnum10, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 10), align 4
store float %fminnum11, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 11), align 4
store float %fminnum12, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 12), align 4
store float %fminnum13, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 13), align 4
store float %fminnum14, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 14), align 4
store float %fminnum15, ptr getelementptr inbounds ([16 x float], ptr @dst32, i32 0, i64 15), align 4
ret void
}
define float @reduction_v4f32_fast(ptr %p) {
; CHECK-LABEL: @reduction_v4f32_fast(
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[TMP2]])
; CHECK-NEXT: ret float [[TMP3]]
;
%g1 = getelementptr inbounds float, ptr %p, i64 1
%g2 = getelementptr inbounds float, ptr %p, i64 2
%g3 = getelementptr inbounds float, ptr %p, i64 3
%t0 = load float, ptr %p, align 4
%t1 = load float, ptr %g1, align 4
%t2 = load float, ptr %g2, align 4
%t3 = load float, ptr %g3, align 4
%m1 = tail call fast float @llvm.minnum.f32(float %t1, float %t0)
%m2 = tail call fast float @llvm.minnum.f32(float %t2, float %m1)
%m3 = tail call fast float @llvm.minnum.f32(float %t3, float %m2)
ret float %m3
}
define float @reduction_v4f32_nnan(ptr %p) {
; CHECK-LABEL: @reduction_v4f32_nnan(
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[TMP2]])
; CHECK-NEXT: ret float [[TMP3]]
;
%g1 = getelementptr inbounds float, ptr %p, i64 1
%g2 = getelementptr inbounds float, ptr %p, i64 2
%g3 = getelementptr inbounds float, ptr %p, i64 3
%t0 = load float, ptr %p, align 4
%t1 = load float, ptr %g1, align 4
%t2 = load float, ptr %g2, align 4
%t3 = load float, ptr %g3, align 4
%m1 = tail call nnan float @llvm.minnum.f32(float %t1, float %t0)
%m2 = tail call nnan float @llvm.minnum.f32(float %t2, float %m1)
%m3 = tail call nnan float @llvm.minnum.f32(float %t3, float %m2)
ret float %m3
}
; Negative test - must have nnan.
define float @reduction_v4f32_wrong_fmf(ptr %p) {
; CHECK-LABEL: @reduction_v4f32_wrong_fmf(
; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, ptr [[P:%.*]], i64 1
; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, ptr [[P]], i64 2
; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, ptr [[P]], i64 3
; CHECK-NEXT: [[T0:%.*]] = load float, ptr [[P]], align 4
; CHECK-NEXT: [[T1:%.*]] = load float, ptr [[G1]], align 4
; CHECK-NEXT: [[T2:%.*]] = load float, ptr [[G2]], align 4
; CHECK-NEXT: [[T3:%.*]] = load float, ptr [[G3]], align 4
; CHECK-NEXT: [[M1:%.*]] = tail call reassoc nsz float @llvm.minnum.f32(float [[T1]], float [[T0]])
; CHECK-NEXT: [[M2:%.*]] = tail call reassoc nsz float @llvm.minnum.f32(float [[T2]], float [[M1]])
; CHECK-NEXT: [[M3:%.*]] = tail call reassoc nsz float @llvm.minnum.f32(float [[T3]], float [[M2]])
; CHECK-NEXT: ret float [[M3]]
;
%g1 = getelementptr inbounds float, ptr %p, i64 1
%g2 = getelementptr inbounds float, ptr %p, i64 2
%g3 = getelementptr inbounds float, ptr %p, i64 3
%t0 = load float, ptr %p, align 4
%t1 = load float, ptr %g1, align 4
%t2 = load float, ptr %g2, align 4
%t3 = load float, ptr %g3, align 4
%m1 = tail call reassoc nsz float @llvm.minnum.f32(float %t1, float %t0)
%m2 = tail call reassoc nsz float @llvm.minnum.f32(float %t2, float %m1)
%m3 = tail call reassoc nsz float @llvm.minnum.f32(float %t3, float %m2)
ret float %m3
}
define float @reduction_v8f32_fast(ptr %p) {
; CHECK-LABEL: @reduction_v8f32_fast(
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x float>, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fmin.v8f32(<8 x float> [[TMP2]])
; CHECK-NEXT: ret float [[TMP3]]
;
%g1 = getelementptr inbounds float, ptr %p, i64 1
%g2 = getelementptr inbounds float, ptr %p, i64 2
%g3 = getelementptr inbounds float, ptr %p, i64 3
%g4 = getelementptr inbounds float, ptr %p, i64 4
%g5 = getelementptr inbounds float, ptr %p, i64 5
%g6 = getelementptr inbounds float, ptr %p, i64 6
%g7 = getelementptr inbounds float, ptr %p, i64 7
%t0 = load float, ptr %p, align 4
%t1 = load float, ptr %g1, align 4
%t2 = load float, ptr %g2, align 4
%t3 = load float, ptr %g3, align 4
%t4 = load float, ptr %g4, align 4
%t5 = load float, ptr %g5, align 4
%t6 = load float, ptr %g6, align 4
%t7 = load float, ptr %g7, align 4
%m1 = tail call fast float @llvm.minnum.f32(float %t1, float %t0)
%m2 = tail call fast float @llvm.minnum.f32(float %t2, float %m1)
%m3 = tail call fast float @llvm.minnum.f32(float %t3, float %m2)
%m4 = tail call fast float @llvm.minnum.f32(float %t4, float %m3)
%m5 = tail call fast float @llvm.minnum.f32(float %m4, float %t6)
%m6 = tail call fast float @llvm.minnum.f32(float %m5, float %t5)
%m7 = tail call fast float @llvm.minnum.f32(float %m6, float %t7)
ret float %m7
}
define double @reduction_v2f64_fast(ptr %p) {
; CHECK-LABEL: @reduction_v2f64_fast(
; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds double, ptr [[P:%.*]], i64 1
; CHECK-NEXT: [[T0:%.*]] = load double, ptr [[P]], align 4
; CHECK-NEXT: [[T1:%.*]] = load double, ptr [[G1]], align 4
; CHECK-NEXT: [[M1:%.*]] = tail call fast double @llvm.minnum.f64(double [[T1]], double [[T0]])
; CHECK-NEXT: ret double [[M1]]
;
%g1 = getelementptr inbounds double, ptr %p, i64 1
%t0 = load double, ptr %p, align 4
%t1 = load double, ptr %g1, align 4
%m1 = tail call fast double @llvm.minnum.f64(double %t1, double %t0)
ret double %m1
}
define double @reduction_v4f64_fast(ptr %p) {
; CHECK-LABEL: @reduction_v4f64_fast(
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call fast double @llvm.vector.reduce.fmin.v4f64(<4 x double> [[TMP2]])
; CHECK-NEXT: ret double [[TMP3]]
;
%g1 = getelementptr inbounds double, ptr %p, i64 1
%g2 = getelementptr inbounds double, ptr %p, i64 2
%g3 = getelementptr inbounds double, ptr %p, i64 3
%t0 = load double, ptr %p, align 4
%t1 = load double, ptr %g1, align 4
%t2 = load double, ptr %g2, align 4
%t3 = load double, ptr %g3, align 4
%m1 = tail call fast double @llvm.minnum.f64(double %t1, double %t0)
%m2 = tail call fast double @llvm.minnum.f64(double %t2, double %m1)
%m3 = tail call fast double @llvm.minnum.f64(double %t3, double %m2)
ret double %m3
}
; Negative test - must have nnan.
define double @reduction_v4f64_not_fast(ptr %p) {
; CHECK-LABEL: @reduction_v4f64_not_fast(
; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds double, ptr [[P:%.*]], i64 1
; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds double, ptr [[P]], i64 2
; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds double, ptr [[P]], i64 3
; CHECK-NEXT: [[T0:%.*]] = load double, ptr [[P]], align 4
; CHECK-NEXT: [[T1:%.*]] = load double, ptr [[G1]], align 4
; CHECK-NEXT: [[T2:%.*]] = load double, ptr [[G2]], align 4
; CHECK-NEXT: [[T3:%.*]] = load double, ptr [[G3]], align 4
; CHECK-NEXT: [[M1:%.*]] = tail call double @llvm.minnum.f64(double [[T1]], double [[T0]])
; CHECK-NEXT: [[M2:%.*]] = tail call double @llvm.minnum.f64(double [[T2]], double [[M1]])
; CHECK-NEXT: [[M3:%.*]] = tail call double @llvm.minnum.f64(double [[T3]], double [[M2]])
; CHECK-NEXT: ret double [[M3]]
;
%g1 = getelementptr inbounds double, ptr %p, i64 1
%g2 = getelementptr inbounds double, ptr %p, i64 2
%g3 = getelementptr inbounds double, ptr %p, i64 3
%t0 = load double, ptr %p, align 4
%t1 = load double, ptr %g1, align 4
%t2 = load double, ptr %g2, align 4
%t3 = load double, ptr %g3, align 4
%m1 = tail call double @llvm.minnum.f64(double %t1, double %t0)
%m2 = tail call double @llvm.minnum.f64(double %t2, double %m1)
%m3 = tail call double @llvm.minnum.f64(double %t3, double %m2)
ret double %m3
}
attributes #0 = { nounwind }