llvm/llvm/test/CodeGen/X86/avx512fp16-frem.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16 | FileCheck %s

define half @frem(half %x, half %y) nounwind {
; CHECK-LABEL: frem:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rax
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    popq %rax
; CHECK-NEXT:    retq
  %r = frem half %x, %y
  ret half %r
}

define <2 x half> @frem_vec2(<2 x half> %x, <2 x half> %y) nounwind {
; CHECK-LABEL: frem_vec2:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $88, %rsp
; CHECK-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovapd %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm0
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    addq $88, %rsp
; CHECK-NEXT:    retq
  %r = frem <2 x half> %x, %y
  ret <2 x half> %r
}

define <4 x half> @frem_vec4(<4 x half> %x, <4 x half> %y) nounwind {
; CHECK-LABEL: frem_vec4:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $88, %rsp
; CHECK-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovapd %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm0
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    addq $88, %rsp
; CHECK-NEXT:    retq
  %r = frem <4 x half> %x, %y
  ret <4 x half> %r
}

define <8 x half> @frem_vec8(<8 x half> %x, <8 x half> %y) nounwind {
; CHECK-LABEL: frem_vec8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $88, %rsp
; CHECK-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovapd %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm0
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    addq $88, %rsp
; CHECK-NEXT:    retq
  %r = frem <8 x half> %x, %y
  ret <8 x half> %r
}

define <16 x half> @frem_vec16(<16 x half> %x, <16 x half> %y) nounwind {
; CHECK-LABEL: frem_vec16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $184, %rsp
; CHECK-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm1
; CHECK-NEXT:    vmovapd %xmm1, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilps $255, (%rsp), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $14, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $14, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $10, (%rsp), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrlq $48, (%rsp), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovshdup (%rsp), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrld $16, (%rsp), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
; CHECK-NEXT:    addq $184, %rsp
; CHECK-NEXT:    retq
  %r = frem <16 x half> %x, %y
  ret <16 x half> %r
}

define <32 x half> @frem_vec32(<32 x half> %x, <32 x half> %y) nounwind {
; CHECK-LABEL: frem_vec32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $408, %rsp # imm = 0x198
; CHECK-NEXT:    vmovupd %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vmovupd %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vextractf32x4 $3, %zmm1, %xmm1
; CHECK-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; CHECK-NEXT:    vextractf32x4 $2, %zmm0, %xmm0
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; CHECK-NEXT:    vextractf32x4 $2, %zmm1, %xmm1
; CHECK-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm1
; CHECK-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $14, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $14, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; CHECK-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
; CHECK-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
; CHECK-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; CHECK-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
; CHECK-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
; CHECK-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
; CHECK-NEXT:    addq $408, %rsp # imm = 0x198
; CHECK-NEXT:    retq
  %r = frem <32 x half> %x, %y
  ret <32 x half> %r
}

define half @frem_strict(half %x, half %y) nounwind #0 {
; CHECK-LABEL: frem_strict:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rax
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    popq %rax
; CHECK-NEXT:    retq
  %result = call half @llvm.experimental.constrained.frem.f16(half %x, half %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
  ret half %result
}

define <2 x half> @frem_strict_vec2(<2 x half> %x, <2 x half> %y) nounwind #0 {
; CHECK-LABEL: frem_strict_vec2:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $56, %rsp
; CHECK-NEXT:    vmovaps %xmm1, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    addq $56, %rsp
; CHECK-NEXT:    retq
  %result = call <2 x half> @llvm.experimental.constrained.frem.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
  ret <2 x half> %result
}

define <4 x half> @frem_strict_vec4(<4 x half> %x, <4 x half> %y) nounwind #0 {
; CHECK-LABEL: frem_strict_vec4:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $72, %rsp
; CHECK-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, %xmm1, %xmm2
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm1
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm2
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vinsertps $28, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],zero,zero
; CHECK-NEXT:    addq $72, %rsp
; CHECK-NEXT:    retq
  %result = call <4 x half> @llvm.experimental.constrained.frem.v4f16(<4 x half> %x, <4 x half> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
  ret <4 x half> %result
}

define <8 x half> @frem_strict_vec8(<8 x half> %x, <8 x half> %y) nounwind #0 {
; CHECK-LABEL: frem_strict_vec8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $88, %rsp
; CHECK-NEXT:    vmovapd %xmm1, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm1
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm2, %xmm2, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    addq $88, %rsp
; CHECK-NEXT:    retq
  %result = call <8 x half> @llvm.experimental.constrained.frem.v8f16(<8 x half> %x, <8 x half> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
  ret <8 x half> %result
}

define <16 x half> @frem_strict_vec16(<16 x half> %x, <16 x half> %y) nounwind #0 {
; CHECK-LABEL: frem_strict_vec16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $184, %rsp
; CHECK-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm1
; CHECK-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $14, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrldq $14, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrldq $10, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrlq $48, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrld $16, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
; CHECK-NEXT:    addq $184, %rsp
; CHECK-NEXT:    retq
  %result = call <16 x half> @llvm.experimental.constrained.frem.v16f16(<16 x half> %x, <16 x half> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
  ret <16 x half> %result
}

define <32 x half> @frem_strict_vec32(<32 x half> %x, <32 x half> %y) nounwind #0 {
; CHECK-LABEL: frem_strict_vec32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $408, %rsp # imm = 0x198
; CHECK-NEXT:    vmovupd %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vextractf32x4 $3, %zmm1, %xmm1
; CHECK-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; CHECK-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; CHECK-NEXT:    vextractf32x4 $2, %zmm0, %xmm0
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; CHECK-NEXT:    vextractf32x4 $2, %zmm0, %xmm0
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $14, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrldq $14, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; CHECK-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrldq $10, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
; CHECK-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
; CHECK-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; CHECK-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm1
; CHECK-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
; CHECK-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
; CHECK-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
; CHECK-NEXT:    addq $408, %rsp # imm = 0x198
; CHECK-NEXT:    retq
  %result = call <32 x half> @llvm.experimental.constrained.frem.v32f16(<32 x half> %x, <32 x half> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
  ret <32 x half> %result
}

attributes #0 = { strictfp }
declare half @llvm.experimental.constrained.frem.f16(half, half, metadata, metadata)
declare <2 x half> @llvm.experimental.constrained.frem.v2f16(<2 x half>, <2 x half>, metadata, metadata)
declare <4 x half> @llvm.experimental.constrained.frem.v4f16(<4 x half>, <4 x half>, metadata, metadata)
declare <8 x half> @llvm.experimental.constrained.frem.v8f16(<8 x half>, <8 x half>, metadata, metadata)
declare <16 x half> @llvm.experimental.constrained.frem.v16f16(<16 x half>, <16 x half>, metadata, metadata)
declare <32 x half> @llvm.experimental.constrained.frem.v32f16(<32 x half>, <32 x half>, metadata, metadata)