llvm/llvm/test/CodeGen/X86/maskmovdqu.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686--    -mattr=+sse2,-avx | FileCheck %s --check-prefix=i686_SSE2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2,-avx | FileCheck %s --check-prefix=x86_64_SSE2
; RUN: llc < %s -mtriple=x86_64--gnux32 -mattr=+sse2,-avx | FileCheck %s --check-prefix=x86_x32_SSE2
; RUN: llc < %s -mtriple=i686--    -mattr=+avx | FileCheck %s --check-prefix=i686_AVX
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefix=x86_64_AVX
; RUN: llc < %s -mtriple=x86_64--gnux32 -mattr=+avx | FileCheck %s --check-prefix=x86_x32_AVX
; rdar://6573467

define void @test(<16 x i8> %a, <16 x i8> %b, i32 %dummy, ptr %c) nounwind {
; i686_SSE2-LABEL: test:
; i686_SSE2:       # %bb.0: # %entry
; i686_SSE2-NEXT:    pushl %edi
; i686_SSE2-NEXT:    movl {{[0-9]+}}(%esp), %edi
; i686_SSE2-NEXT:    maskmovdqu %xmm1, %xmm0
; i686_SSE2-NEXT:    popl %edi
; i686_SSE2-NEXT:    retl
;
; x86_64_SSE2-LABEL: test:
; x86_64_SSE2:       # %bb.0: # %entry
; x86_64_SSE2-NEXT:    movq %rsi, %rdi
; x86_64_SSE2-NEXT:    maskmovdqu %xmm1, %xmm0
; x86_64_SSE2-NEXT:    retq
;
; x86_x32_SSE2-LABEL: test:
; x86_x32_SSE2:       # %bb.0: # %entry
; x86_x32_SSE2-NEXT:    movq %rsi, %rdi
; x86_x32_SSE2-NEXT:    # kill: def $edi killed $edi killed $rdi
; x86_x32_SSE2-NEXT:    addr32 maskmovdqu %xmm1, %xmm0
; x86_x32_SSE2-NEXT:    retq
;
; i686_AVX-LABEL: test:
; i686_AVX:       # %bb.0: # %entry
; i686_AVX-NEXT:    pushl %edi
; i686_AVX-NEXT:    movl {{[0-9]+}}(%esp), %edi
; i686_AVX-NEXT:    vmaskmovdqu %xmm1, %xmm0
; i686_AVX-NEXT:    popl %edi
; i686_AVX-NEXT:    retl
;
; x86_64_AVX-LABEL: test:
; x86_64_AVX:       # %bb.0: # %entry
; x86_64_AVX-NEXT:    movq %rsi, %rdi
; x86_64_AVX-NEXT:    vmaskmovdqu %xmm1, %xmm0
; x86_64_AVX-NEXT:    retq
; x86_x32_AVX-LABEL: test:
; x86_x32_AVX:       # %bb.0: # %entry
; x86_x32_AVX-NEXT:    movq %rsi, %rdi
; x86_x32_AVX-NEXT:    # kill: def $edi killed $edi killed $rdi
; x86_x32_AVX-NEXT:    addr32 vmaskmovdqu %xmm1, %xmm0
; x86_x32_AVX-NEXT:    retq
entry:
	tail call void @llvm.x86.sse2.maskmov.dqu( <16 x i8> %a, <16 x i8> %b, ptr %c )
	ret void
}

declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, ptr) nounwind