llvm/llvm/test/CodeGen/X86/frem.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-linux-gnu < %s -mattr=fma | FileCheck %s

; Basic test coverage for FREM

define void @frem_f16(half %a0, half %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $16, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovaps %xmm1, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpextrw $0, %xmm0, (%rbx)
; CHECK-NEXT:    addq $16, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem half %a0, %a1
  store half %frem, ptr%p3
  ret void
}

define void @frem_f32(float %a0, float %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rbx)
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem float %a0, %a1
  store float %frem, ptr%p3
  ret void
}

define void @frem_f64(double %a0, double %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_f64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovsd %xmm0, (%rbx)
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem double %a0, %a1
  store double %frem, ptr%p3
  ret void
}

define void @frem_f80(x86_fp80 %a0, x86_fp80 %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_f80:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $32, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt (%rsp)
; CHECK-NEXT:    callq fmodl@PLT
; CHECK-NEXT:    fstpt (%rbx)
; CHECK-NEXT:    addq $32, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem x86_fp80 %a0, %a1
  store x86_fp80 %frem, ptr%p3
  ret void
}

define void @frem_f128(fp128 %a0, fp128 %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_f128:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    callq fmodf128
; CHECK-NEXT:    vmovaps %xmm0, (%rbx)
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem fp128 %a0, %a1
  store fp128 %frem, ptr%p3
  ret void
}

define void @frem_v16f32(<16 x float> %a0, <16 x float> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v16f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $160, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovaps %xmm1, %xmm0
; CHECK-NEXT:    vmovaps %xmm3, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm2, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm1 killed $xmm1 killed $ymm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm2, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT:    vmovaps %xmm0, 16(%rbx)
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vmovaps %xmm0, (%rbx)
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vmovaps %xmm0, 48(%rbx)
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vmovaps %xmm0, 32(%rbx)
; CHECK-NEXT:    addq $160, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem <16 x float> %a0, %a1
  store <16 x float> %frem, ptr%p3
  ret void
}

define void @frem_v8f32(<8 x float> %a0, <8 x float> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v8f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $96, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    # kill: def $xmm1 killed $xmm1 killed $ymm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm2, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT:    vmovaps %xmm0, 16(%rbx)
; CHECK-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vmovaps %xmm0, (%rbx)
; CHECK-NEXT:    addq $96, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem <8 x float> %a0, %a1
  store <8 x float> %frem, ptr%p3
  ret void
}

define void @frem_v4f32(<4 x float> %a0, <4 x float> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v4f32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $48, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,1,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[3,3,3,3]
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT:    vmovaps %xmm0, (%rbx)
; CHECK-NEXT:    addq $48, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem <4 x float> %a0, %a1
  store <4 x float> %frem, ptr%p3
  ret void
}

define void @frem_v8f64(<8 x double> %a0, <8 x double> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v8f64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $160, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm1, (%rsp) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovaps %xmm1, %xmm0
; CHECK-NEXT:    vmovaps %xmm3, %xmm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT:    vmovaps %xmm1, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm2, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm1 killed $xmm1 killed $ymm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovapd (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT:    vmovapd %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm2, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT:    vmovapd %xmm0, 16(%rbx)
; CHECK-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vmovaps %xmm0, (%rbx)
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vmovaps %xmm0, 48(%rbx)
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vmovaps %xmm0, 32(%rbx)
; CHECK-NEXT:    addq $160, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem <8 x double> %a0, %a1
  store <8 x double> %frem, ptr%p3
  ret void
}

define void @frem_v4f64(<4 x double> %a0, <4 x double> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v4f64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $96, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    # kill: def $xmm1 killed $xmm1 killed $ymm1
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovapd (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT:    vmovapd %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm2, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT:    vmovapd %xmm0, 16(%rbx)
; CHECK-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vmovaps %xmm0, (%rbx)
; CHECK-NEXT:    addq $96, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem <4 x double> %a0, %a1
  store <4 x double> %frem, ptr%p3
  ret void
}

define void @frem_v2f64(<2 x double> %a0, <2 x double> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v2f64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $48, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm1 = mem[1,0]
; CHECK-NEXT:    callq fmod@PLT
; CHECK-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT:    vmovapd %xmm0, (%rbx)
; CHECK-NEXT:    addq $48, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem <2 x double> %a0, %a1
  store <2 x double> %frem, ptr%p3
  ret void
}

define void @frem_v32f16(<32 x half> %a0, <32 x half> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v32f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $224, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vextractf128 $1, %ymm2, %xmm0
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, (%rsp) # 4-byte Folded Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vpshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vpshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, (%rsp) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, (%rsp) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vpshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, (%rsp) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, (%rsp) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vpshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovaps %ymm0, 32(%rbx)
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vmovaps %ymm0, (%rbx)
; CHECK-NEXT:    addq $224, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    retq
  %frem = frem <32 x half> %a0, %a1
  store <32 x half> %frem, ptr%p3
  ret void
}

define void @frem_v16f16(<16 x half> %a0, <16 x half> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v16f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $144, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm0
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vpshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, (%rsp) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, (%rsp) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vpshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT:    vmovaps %ymm0, (%rbx)
; CHECK-NEXT:    addq $144, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    retq
  %frem = frem <16 x half> %a0, %a1
  store <16 x half> %frem, ptr%p3
  ret void
}

define void @frem_v8f16(<8 x half> %a0, <8 x half> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v8f16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $80, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
; CHECK-NEXT:    vpshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, (%rsp) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,0]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, (%rsp) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrlq $48, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    vpsrld $16, %xmm0, %xmm0
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT:    callq fmodf@PLT
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT:    vmovdqa %xmm0, (%rbx)
; CHECK-NEXT:    addq $80, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem <8 x half> %a0, %a1
  store <8 x half> %frem, ptr%p3
  ret void
}

define void @frem_v4f80(<4 x x86_fp80> %a0, <4 x x86_fp80> %a1, ptr%p3) nounwind {
; CHECK-LABEL: frem_v4f80:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $128, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Spill
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Spill
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Spill
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Spill
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Spill
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Spill
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fstpt (%rsp)
; CHECK-NEXT:    callq fmodl@PLT
; CHECK-NEXT:    fstpt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Spill
; CHECK-NEXT:    fldt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Reload
; CHECK-NEXT:    fstpt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fldt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Reload
; CHECK-NEXT:    fstpt (%rsp)
; CHECK-NEXT:    callq fmodl@PLT
; CHECK-NEXT:    fstpt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Spill
; CHECK-NEXT:    fldt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Reload
; CHECK-NEXT:    fstpt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fldt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Reload
; CHECK-NEXT:    fstpt (%rsp)
; CHECK-NEXT:    callq fmodl@PLT
; CHECK-NEXT:    fstpt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Spill
; CHECK-NEXT:    fldt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Reload
; CHECK-NEXT:    fstpt {{[0-9]+}}(%rsp)
; CHECK-NEXT:    fldt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Reload
; CHECK-NEXT:    fstpt (%rsp)
; CHECK-NEXT:    callq fmodl@PLT
; CHECK-NEXT:    fstpt 30(%rbx)
; CHECK-NEXT:    fldt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Reload
; CHECK-NEXT:    fstpt 20(%rbx)
; CHECK-NEXT:    fldt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Reload
; CHECK-NEXT:    fstpt 10(%rbx)
; CHECK-NEXT:    fldt {{[-0-9]+}}(%r{{[sb]}}p) # 10-byte Folded Reload
; CHECK-NEXT:    fstpt (%rbx)
; CHECK-NEXT:    addq $128, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    retq
  %frem = frem <4 x x86_fp80> %a0, %a1
  store <4 x x86_fp80> %frem, ptr%p3
  ret void
}