; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefix=FAST
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=ssse3 | FileCheck %s --check-prefix=SLOW_32
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=ssse3 | FileCheck %s --check-prefix=SLOW_64
define void @bork(ptr nocapture align 4 %dst) nounwind {
; FAST-LABEL: bork:
; FAST: # %bb.0:
; FAST-NEXT: movl {{[0-9]+}}(%esp), %eax
; FAST-NEXT: xorps %xmm0, %xmm0
; FAST-NEXT: movups %xmm0, 64(%eax)
; FAST-NEXT: movups %xmm0, 48(%eax)
; FAST-NEXT: movups %xmm0, 32(%eax)
; FAST-NEXT: movups %xmm0, 16(%eax)
; FAST-NEXT: movups %xmm0, (%eax)
; FAST-NEXT: retl
;
; SLOW_32-LABEL: bork:
; SLOW_32: # %bb.0:
; SLOW_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SLOW_32-NEXT: xorps %xmm0, %xmm0
; SLOW_32-NEXT: movsd %xmm0, 72(%eax)
; SLOW_32-NEXT: movsd %xmm0, 64(%eax)
; SLOW_32-NEXT: movsd %xmm0, 56(%eax)
; SLOW_32-NEXT: movsd %xmm0, 48(%eax)
; SLOW_32-NEXT: movsd %xmm0, 40(%eax)
; SLOW_32-NEXT: movsd %xmm0, 32(%eax)
; SLOW_32-NEXT: movsd %xmm0, 24(%eax)
; SLOW_32-NEXT: movsd %xmm0, 16(%eax)
; SLOW_32-NEXT: movsd %xmm0, 8(%eax)
; SLOW_32-NEXT: movsd %xmm0, (%eax)
; SLOW_32-NEXT: retl
;
; SLOW_64-LABEL: bork:
; SLOW_64: # %bb.0:
; SLOW_64-NEXT: movq $0, 72(%rdi)
; SLOW_64-NEXT: movq $0, 64(%rdi)
; SLOW_64-NEXT: movq $0, 56(%rdi)
; SLOW_64-NEXT: movq $0, 48(%rdi)
; SLOW_64-NEXT: movq $0, 40(%rdi)
; SLOW_64-NEXT: movq $0, 32(%rdi)
; SLOW_64-NEXT: movq $0, 24(%rdi)
; SLOW_64-NEXT: movq $0, 16(%rdi)
; SLOW_64-NEXT: movq $0, 8(%rdi)
; SLOW_64-NEXT: movq $0, (%rdi)
; SLOW_64-NEXT: retq
call void @llvm.memset.p0.i64(ptr align 4 %dst, i8 0, i64 80, i1 false)
ret void
}
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind