llvm/llvm/test/CodeGen/X86/avx512fp16-machine-combiner.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx512fp16 -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -machine-combiner-verify-pattern-order=true < %s | FileCheck %s

; Incremental updates of the instruction depths should be enough for this test
; case.
; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx512fp16 -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -machine-combiner-inc-threshold=0 < %s | FileCheck %s

; Verify that the first two adds are independent regardless of how the inputs are
; commuted. The destination registers are used as source registers for the third add.

define half @reassociate_adds1(half %x0, half %x1, half %x2, half %x3) {
; CHECK-LABEL: reassociate_adds1:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vaddsh %xmm3, %xmm2, %xmm1
; CHECK-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    retq
  %t0 = fadd reassoc nsz half %x0, %x1
  %t1 = fadd reassoc nsz half %t0, %x2
  %t2 = fadd reassoc nsz half %t1, %x3
  ret half %t2
}

define half @reassociate_adds2(half %x0, half %x1, half %x2, half %x3) {
; CHECK-LABEL: reassociate_adds2:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vaddsh %xmm3, %xmm2, %xmm1
; CHECK-NEXT:    vaddsh %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fadd reassoc nsz half %x0, %x1
  %t1 = fadd reassoc nsz half %x2, %t0
  %t2 = fadd reassoc nsz half %t1, %x3
  ret half %t2
}

define half @reassociate_adds3(half %x0, half %x1, half %x2, half %x3) {
; CHECK-LABEL: reassociate_adds3:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vaddsh %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vaddsh %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fadd reassoc nsz half %x0, %x1
  %t1 = fadd reassoc nsz half %t0, %x2
  %t2 = fadd reassoc nsz half %x3, %t1
  ret half %t2
}

define half @reassociate_adds4(half %x0, half %x1, half %x2, half %x3) {
; CHECK-LABEL: reassociate_adds4:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vaddsh %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vaddsh %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fadd reassoc nsz half %x0, %x1
  %t1 = fadd reassoc nsz half %x2, %t0
  %t2 = fadd reassoc nsz half %x3, %t1
  ret half %t2
}

; Verify that we reassociate some of these ops. The optimal balanced tree of adds is not
; produced because that would cost more compile time.

define half @reassociate_adds5(half %x0, half %x1, half %x2, half %x3, half %x4, half %x5, half %x6, half %x7) {
; CHECK-LABEL: reassociate_adds5:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vaddsh %xmm3, %xmm2, %xmm1
; CHECK-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vaddsh %xmm5, %xmm4, %xmm1
; CHECK-NEXT:    vaddsh %xmm6, %xmm1, %xmm1
; CHECK-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vaddsh %xmm7, %xmm0, %xmm0
; CHECK-NEXT:    retq
  %t0 = fadd reassoc nsz half %x0, %x1
  %t1 = fadd reassoc nsz half %t0, %x2
  %t2 = fadd reassoc nsz half %t1, %x3
  %t3 = fadd reassoc nsz half %t2, %x4
  %t4 = fadd reassoc nsz half %t3, %x5
  %t5 = fadd reassoc nsz half %t4, %x6
  %t6 = fadd reassoc nsz half %t5, %x7
  ret half %t6
}

; Verify that we only need two associative operations to reassociate the operands.
; Also, we should reassociate such that the result of the high latency division
; is used by the final 'add' rather than reassociating the %x3 operand with the
; division. The latter reassociation would not improve anything.

define half @reassociate_adds6(half %x0, half %x1, half %x2, half %x3) {
; CHECK-LABEL: reassociate_adds6:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vdivsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vaddsh %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vaddsh %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fdiv reassoc nsz half %x0, %x1
  %t1 = fadd reassoc nsz half %x2, %t0
  %t2 = fadd reassoc nsz half %x3, %t1
  ret half %t2
}

; Verify that SSE and AVX scalar single-precision multiplies are reassociated.

define half @reassociate_muls1(half %x0, half %x1, half %x2, half %x3) {
; CHECK-LABEL: reassociate_muls1:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vdivsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vmulsh %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vmulsh %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fdiv reassoc nsz half %x0, %x1
  %t1 = fmul reassoc nsz half %x2, %t0
  %t2 = fmul reassoc nsz half %x3, %t1
  ret half %t2
}

; Verify that SSE and AVX 128-bit vector half-precision adds are reassociated.

define <8 x half> @reassociate_adds_v8f16(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, <8 x half> %x3) {
; CHECK-LABEL: reassociate_adds_v8f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vdivph %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vaddph %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vaddph %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fdiv reassoc nsz <8 x half> %x0, %x1
  %t1 = fadd reassoc nsz <8 x half> %x2, %t0
  %t2 = fadd reassoc nsz <8 x half> %x3, %t1
  ret <8 x half> %t2
}

; Verify that SSE and AVX 128-bit vector half-precision multiplies are reassociated.

define <8 x half> @reassociate_muls_v8f16(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, <8 x half> %x3) {
; CHECK-LABEL: reassociate_muls_v8f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddph %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vmulph %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vmulph %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fadd reassoc nsz <8 x half> %x0, %x1
  %t1 = fmul reassoc nsz <8 x half> %x2, %t0
  %t2 = fmul reassoc nsz <8 x half> %x3, %t1
  ret <8 x half> %t2
}

; Verify that AVX 256-bit vector half-precision adds are reassociated.

define <16 x half> @reassociate_adds_v16f16(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, <16 x half> %x3) {
; CHECK-LABEL: reassociate_adds_v16f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vdivph %ymm1, %ymm0, %ymm0
; CHECK-NEXT:    vaddph %ymm2, %ymm3, %ymm1
; CHECK-NEXT:    vaddph %ymm0, %ymm1, %ymm0
; CHECK-NEXT:    retq
  %t0 = fdiv reassoc nsz <16 x half> %x0, %x1
  %t1 = fadd reassoc nsz <16 x half> %x2, %t0
  %t2 = fadd reassoc nsz <16 x half> %x3, %t1
  ret <16 x half> %t2
}

; Verify that AVX 256-bit vector half-precision multiplies are reassociated.

define <16 x half> @reassociate_muls_v16f16(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, <16 x half> %x3) {
; CHECK-LABEL: reassociate_muls_v16f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddph %ymm1, %ymm0, %ymm0
; CHECK-NEXT:    vmulph %ymm2, %ymm3, %ymm1
; CHECK-NEXT:    vmulph %ymm0, %ymm1, %ymm0
; CHECK-NEXT:    retq
  %t0 = fadd reassoc nsz <16 x half> %x0, %x1
  %t1 = fmul reassoc nsz <16 x half> %x2, %t0
  %t2 = fmul reassoc nsz <16 x half> %x3, %t1
  ret <16 x half> %t2
}

; Verify that AVX512 512-bit vector half-precision adds are reassociated.

define <32 x half> @reassociate_adds_v32f16(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, <32 x half> %x3) {
; CHECK-LABEL: reassociate_adds_v32f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vdivph %zmm1, %zmm0, %zmm0
; CHECK-NEXT:    vaddph %zmm2, %zmm3, %zmm1
; CHECK-NEXT:    vaddph %zmm0, %zmm1, %zmm0
; CHECK-NEXT:    retq
  %t0 = fdiv reassoc nsz <32 x half> %x0, %x1
  %t1 = fadd reassoc nsz <32 x half> %x2, %t0
  %t2 = fadd reassoc nsz <32 x half> %x3, %t1
  ret <32 x half> %t2
}

; Verify that AVX512 512-bit vector half-precision multiplies are reassociated.

define <32 x half> @reassociate_muls_v32f16(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, <32 x half> %x3) {
; CHECK-LABEL: reassociate_muls_v32f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddph %zmm1, %zmm0, %zmm0
; CHECK-NEXT:    vmulph %zmm2, %zmm3, %zmm1
; CHECK-NEXT:    vmulph %zmm0, %zmm1, %zmm0
; CHECK-NEXT:    retq
  %t0 = fadd reassoc nsz <32 x half> %x0, %x1
  %t1 = fmul reassoc nsz <32 x half> %x2, %t0
  %t2 = fmul reassoc nsz <32 x half> %x3, %t1
  ret <32 x half> %t2
}

; Verify that SSE and AVX scalar half-precision minimum ops are reassociated.

define half @reassociate_mins_half(half %x0, half %x1, half %x2, half %x3) {
; CHECK-LABEL: reassociate_mins_half:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vdivsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vminsh %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vminsh %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fdiv half %x0, %x1
  %cmp1 = fcmp olt half %x2, %t0
  %sel1 = select i1 %cmp1, half %x2, half %t0
  %cmp2 = fcmp olt half %x3, %sel1
  %sel2 = select i1 %cmp2, half %x3, half %sel1
  ret half %sel2
}

; Verify that SSE and AVX scalar half-precision maximum ops are reassociated.

define half @reassociate_maxs_half(half %x0, half %x1, half %x2, half %x3) {
; CHECK-LABEL: reassociate_maxs_half:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vdivsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vmaxsh %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vmaxsh %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fdiv half %x0, %x1
  %cmp1 = fcmp ogt half %x2, %t0
  %sel1 = select i1 %cmp1, half %x2, half %t0
  %cmp2 = fcmp ogt half %x3, %sel1
  %sel2 = select i1 %cmp2, half %x3, half %sel1
  ret half %sel2
}

; Verify that SSE and AVX 128-bit vector half-precision minimum ops are reassociated.

define <8 x half> @reassociate_mins_v8f16(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, <8 x half> %x3) {
; CHECK-LABEL: reassociate_mins_v8f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddph %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vminph %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vminph %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fadd <8 x half> %x0, %x1
  %cmp1 = fcmp olt <8 x half> %x2, %t0
  %sel1 = select <8 x i1> %cmp1, <8 x half> %x2, <8 x half> %t0
  %cmp2 = fcmp olt <8 x half> %x3, %sel1
  %sel2 = select <8 x i1> %cmp2, <8 x half> %x3, <8 x half> %sel1
  ret <8 x half> %sel2
}

; Verify that SSE and AVX 128-bit vector half-precision maximum ops are reassociated.

define <8 x half> @reassociate_maxs_v8f16(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, <8 x half> %x3) {
; CHECK-LABEL: reassociate_maxs_v8f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddph %xmm1, %xmm0, %xmm0
; CHECK-NEXT:    vmaxph %xmm2, %xmm3, %xmm1
; CHECK-NEXT:    vmaxph %xmm0, %xmm1, %xmm0
; CHECK-NEXT:    retq
  %t0 = fadd <8 x half> %x0, %x1
  %cmp1 = fcmp ogt <8 x half> %x2, %t0
  %sel1 = select <8 x i1> %cmp1, <8 x half> %x2, <8 x half> %t0
  %cmp2 = fcmp ogt <8 x half> %x3, %sel1
  %sel2 = select <8 x i1> %cmp2, <8 x half> %x3, <8 x half> %sel1
  ret <8 x half> %sel2
}

; Verify that AVX 256-bit vector half-precision minimum ops are reassociated.

define <16 x half> @reassociate_mins_v16f16(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, <16 x half> %x3) {
; CHECK-LABEL: reassociate_mins_v16f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddph %ymm1, %ymm0, %ymm0
; CHECK-NEXT:    vminph %ymm2, %ymm3, %ymm1
; CHECK-NEXT:    vminph %ymm0, %ymm1, %ymm0
; CHECK-NEXT:    retq
  %t0 = fadd <16 x half> %x0, %x1
  %cmp1 = fcmp olt <16 x half> %x2, %t0
  %sel1 = select <16 x i1> %cmp1, <16 x half> %x2, <16 x half> %t0
  %cmp2 = fcmp olt <16 x half> %x3, %sel1
  %sel2 = select <16 x i1> %cmp2, <16 x half> %x3, <16 x half> %sel1
  ret <16 x half> %sel2
}

; Verify that AVX 256-bit vector half-precision maximum ops are reassociated.

define <16 x half> @reassociate_maxs_v16f16(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, <16 x half> %x3) {
; CHECK-LABEL: reassociate_maxs_v16f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddph %ymm1, %ymm0, %ymm0
; CHECK-NEXT:    vmaxph %ymm2, %ymm3, %ymm1
; CHECK-NEXT:    vmaxph %ymm0, %ymm1, %ymm0
; CHECK-NEXT:    retq
  %t0 = fadd <16 x half> %x0, %x1
  %cmp1 = fcmp ogt <16 x half> %x2, %t0
  %sel1 = select <16 x i1> %cmp1, <16 x half> %x2, <16 x half> %t0
  %cmp2 = fcmp ogt <16 x half> %x3, %sel1
  %sel2 = select <16 x i1> %cmp2, <16 x half> %x3, <16 x half> %sel1
  ret <16 x half> %sel2
}

; Verify that AVX512 512-bit vector half-precision minimum ops are reassociated.

define <32 x half> @reassociate_mins_v32f16(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, <32 x half> %x3) {
; CHECK-LABEL: reassociate_mins_v32f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddph %zmm1, %zmm0, %zmm0
; CHECK-NEXT:    vminph %zmm2, %zmm3, %zmm1
; CHECK-NEXT:    vminph %zmm0, %zmm1, %zmm0
; CHECK-NEXT:    retq
  %t0 = fadd <32 x half> %x0, %x1
  %cmp1 = fcmp olt <32 x half> %x2, %t0
  %sel1 = select <32 x i1> %cmp1, <32 x half> %x2, <32 x half> %t0
  %cmp2 = fcmp olt <32 x half> %x3, %sel1
  %sel2 = select <32 x i1> %cmp2, <32 x half> %x3, <32 x half> %sel1
  ret <32 x half> %sel2
}

; Verify that AVX512 512-bit vector half-precision maximum ops are reassociated.

define <32 x half> @reassociate_maxs_v16f32(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, <32 x half> %x3) {
; CHECK-LABEL: reassociate_maxs_v16f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vaddph %zmm1, %zmm0, %zmm0
; CHECK-NEXT:    vmaxph %zmm2, %zmm3, %zmm1
; CHECK-NEXT:    vmaxph %zmm0, %zmm1, %zmm0
; CHECK-NEXT:    retq
  %t0 = fadd <32 x half> %x0, %x1
  %cmp1 = fcmp ogt <32 x half> %x2, %t0
  %sel1 = select <32 x i1> %cmp1, <32 x half> %x2, <32 x half> %t0
  %cmp2 = fcmp ogt <32 x half> %x3, %sel1
  %sel2 = select <32 x i1> %cmp2, <32 x half> %x3, <32 x half> %sel1
  ret <32 x half> %sel2
}