llvm/llvm/test/CodeGen/X86/fast-isel-store.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-none-linux -fast-isel -fast-isel-abort=1 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=X86,X86-SSE
; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort=1 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=X64,X64-SSE
; RUN: llc -mtriple=i686-none-linux -fast-isel -fast-isel-abort=1 -mattr=+avx < %s | FileCheck %s --check-prefixes=X86,X86-AVX,X86-AVX1
; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort=1 -mattr=+avx < %s | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX1
; RUN: llc -mtriple=i686-none-linux -fast-isel -fast-isel-abort=1 -mattr=+avx512f < %s | FileCheck %s --check-prefixes=X86,X86-AVX,X86-AVX512
; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort=1 -mattr=+avx512f < %s | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX512
; RUN: llc -mtriple=i686-none-linux -fast-isel -fast-isel-abort=1 -mattr=+avx512f,+avx512dq,+avx512bw < %s | FileCheck %s --check-prefixes=X86,X86-AVX,X86-AVX512
; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort=1 -mattr=+avx512vl,+avx512dq,+avx512bw < %s | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX512

define i32 @test_store_32(ptr nocapture %addr, i32 %value) nounwind {
; X86-LABEL: test_store_32:
; X86:       # %bb.0: # %entry
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    movl %eax, (%ecx)
; X86-NEXT:    retl
;
; X64-LABEL: test_store_32:
; X64:       # %bb.0: # %entry
; X64-NEXT:    movl %esi, %eax
; X64-NEXT:    movl %esi, (%rdi)
; X64-NEXT:    retq
entry:
  store i32 %value, ptr %addr, align 1
  ret i32 %value
}

define i16 @test_store_16(ptr nocapture %addr, i16 %value) nounwind {
; X86-LABEL: test_store_16:
; X86:       # %bb.0: # %entry
; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    movw %ax, (%ecx)
; X86-NEXT:    retl
;
; X64-LABEL: test_store_16:
; X64:       # %bb.0: # %entry
; X64-NEXT:    movl %esi, %eax
; X64-NEXT:    movw %ax, (%rdi)
; X64-NEXT:    # kill: def $ax killed $ax killed $eax
; X64-NEXT:    retq
entry:
  store i16 %value, ptr %addr, align 1
  ret i16 %value
}

define <4 x i32> @test_store_4xi32(ptr nocapture %addr, <4 x i32> %value, <4 x i32> %value2) nounwind {
; X86-SSE-LABEL: test_store_4xi32:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    paddd %xmm1, %xmm0
; X86-SSE-NEXT:    movdqu %xmm0, (%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_4xi32:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    paddd %xmm1, %xmm0
; X64-SSE-NEXT:    movdqu %xmm0, (%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_4xi32:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT:    vmovdqu %xmm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_4xi32:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT:    vmovdqu %xmm0, (%rdi)
; X64-AVX-NEXT:    retq
  %foo = add <4 x i32> %value, %value2 ; to force integer type on store
  store <4 x i32> %foo, ptr %addr, align 1
  ret <4 x i32> %foo
}

define <4 x i32> @test_store_4xi32_aligned(ptr nocapture %addr, <4 x i32> %value, <4 x i32> %value2) nounwind {
; X86-SSE-LABEL: test_store_4xi32_aligned:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    paddd %xmm1, %xmm0
; X86-SSE-NEXT:    movdqa %xmm0, (%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_4xi32_aligned:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    paddd %xmm1, %xmm0
; X64-SSE-NEXT:    movdqa %xmm0, (%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_4xi32_aligned:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT:    vmovdqa %xmm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_4xi32_aligned:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT:    vmovdqa %xmm0, (%rdi)
; X64-AVX-NEXT:    retq
  %foo = add <4 x i32> %value, %value2 ; to force integer type on store
  store <4 x i32> %foo, ptr %addr, align 16
  ret <4 x i32> %foo
}

define <4 x float> @test_store_4xf32(ptr nocapture %addr, <4 x float> %value) nounwind {
; X86-SSE-LABEL: test_store_4xf32:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movups %xmm0, (%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_4xf32:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movups %xmm0, (%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_4xf32:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vmovups %xmm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_4xf32:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vmovups %xmm0, (%rdi)
; X64-AVX-NEXT:    retq
  store <4 x float> %value, ptr %addr, align 1
  ret <4 x float> %value
}

define <4 x float> @test_store_4xf32_aligned(ptr nocapture %addr, <4 x float> %value) nounwind {
; X86-SSE-LABEL: test_store_4xf32_aligned:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movaps %xmm0, (%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_4xf32_aligned:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_4xf32_aligned:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vmovaps %xmm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_4xf32_aligned:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
; X64-AVX-NEXT:    retq
  store <4 x float> %value, ptr %addr, align 16
  ret <4 x float> %value
}

define <2 x double> @test_store_2xf64(ptr nocapture %addr, <2 x double> %value, <2 x double> %value2) nounwind {
; X86-SSE-LABEL: test_store_2xf64:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    addpd %xmm1, %xmm0
; X86-SSE-NEXT:    movupd %xmm0, (%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_2xf64:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    addpd %xmm1, %xmm0
; X64-SSE-NEXT:    movupd %xmm0, (%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_2xf64:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT:    vmovupd %xmm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_2xf64:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT:    vmovupd %xmm0, (%rdi)
; X64-AVX-NEXT:    retq
  %foo = fadd <2 x double> %value, %value2 ; to force dobule type on store
  store <2 x double> %foo, ptr %addr, align 1
  ret <2 x double> %foo
}

define <2 x double> @test_store_2xf64_aligned(ptr nocapture %addr, <2 x double> %value, <2 x double> %value2) nounwind {
; X86-SSE-LABEL: test_store_2xf64_aligned:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    addpd %xmm1, %xmm0
; X86-SSE-NEXT:    movapd %xmm0, (%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_2xf64_aligned:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    addpd %xmm1, %xmm0
; X64-SSE-NEXT:    movapd %xmm0, (%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_2xf64_aligned:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT:    vmovapd %xmm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_2xf64_aligned:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT:    vmovapd %xmm0, (%rdi)
; X64-AVX-NEXT:    retq
  %foo = fadd <2 x double> %value, %value2 ; to force dobule type on store
  store <2 x double> %foo, ptr %addr, align 16
  ret <2 x double> %foo
}

define <8 x i32> @test_store_8xi32(ptr nocapture %addr, <8 x i32> %value) nounwind {
; X86-SSE-LABEL: test_store_8xi32:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movups %xmm0, (%eax)
; X86-SSE-NEXT:    movups %xmm1, 16(%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_8xi32:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movups %xmm0, (%rdi)
; X64-SSE-NEXT:    movups %xmm1, 16(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_8xi32:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vmovups %ymm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_8xi32:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vmovups %ymm0, (%rdi)
; X64-AVX-NEXT:    retq
  store <8 x i32> %value, ptr %addr, align 1
  ret <8 x i32> %value
}

define <8 x i32> @test_store_8xi32_aligned(ptr nocapture %addr, <8 x i32> %value) nounwind {
; X86-SSE-LABEL: test_store_8xi32_aligned:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movaps %xmm0, (%eax)
; X86-SSE-NEXT:    movaps %xmm1, 16(%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_8xi32_aligned:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
; X64-SSE-NEXT:    movaps %xmm1, 16(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_8xi32_aligned:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vmovaps %ymm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_8xi32_aligned:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vmovaps %ymm0, (%rdi)
; X64-AVX-NEXT:    retq
  store <8 x i32> %value, ptr %addr, align 32
  ret <8 x i32> %value
}

define <8 x float> @test_store_8xf32(ptr nocapture %addr, <8 x float> %value) nounwind {
; X86-SSE-LABEL: test_store_8xf32:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movups %xmm0, (%eax)
; X86-SSE-NEXT:    movups %xmm1, 16(%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_8xf32:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movups %xmm0, (%rdi)
; X64-SSE-NEXT:    movups %xmm1, 16(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_8xf32:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vmovups %ymm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_8xf32:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vmovups %ymm0, (%rdi)
; X64-AVX-NEXT:    retq
  store <8 x float> %value, ptr %addr, align 1
  ret <8 x float> %value
}

define <8 x float> @test_store_8xf32_aligned(ptr nocapture %addr, <8 x float> %value) nounwind {
; X86-SSE-LABEL: test_store_8xf32_aligned:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movaps %xmm0, (%eax)
; X86-SSE-NEXT:    movaps %xmm1, 16(%eax)
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_8xf32_aligned:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
; X64-SSE-NEXT:    movaps %xmm1, 16(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_8xf32_aligned:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vmovaps %ymm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_8xf32_aligned:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vmovaps %ymm0, (%rdi)
; X64-AVX-NEXT:    retq
  store <8 x float> %value, ptr %addr, align 32
  ret <8 x float> %value
}

define <4 x double> @test_store_4xf64(ptr nocapture %addr, <4 x double> %value, <4 x double> %value2) nounwind {
; X86-SSE-LABEL: test_store_4xf64:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    subl $12, %esp
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm3
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    addpd %xmm2, %xmm0
; X86-SSE-NEXT:    movupd %xmm0, (%eax)
; X86-SSE-NEXT:    addpd %xmm3, %xmm1
; X86-SSE-NEXT:    movupd %xmm1, 16(%eax)
; X86-SSE-NEXT:    addl $12, %esp
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_4xf64:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    addpd %xmm2, %xmm0
; X64-SSE-NEXT:    movupd %xmm0, (%rdi)
; X64-SSE-NEXT:    addpd %xmm3, %xmm1
; X64-SSE-NEXT:    movupd %xmm1, 16(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_4xf64:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
; X86-AVX-NEXT:    vmovupd %ymm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_4xf64:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
; X64-AVX-NEXT:    vmovupd %ymm0, (%rdi)
; X64-AVX-NEXT:    retq
  %foo = fadd <4 x double> %value, %value2 ; to force dobule type on store
  store <4 x double> %foo, ptr %addr, align 1
  ret <4 x double> %foo
}

define <4 x double> @test_store_4xf64_aligned(ptr nocapture %addr, <4 x double> %value, <4 x double> %value2) nounwind {
; X86-SSE-LABEL: test_store_4xf64_aligned:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    subl $12, %esp
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm3
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    addpd %xmm2, %xmm0
; X86-SSE-NEXT:    movapd %xmm0, (%eax)
; X86-SSE-NEXT:    addpd %xmm3, %xmm1
; X86-SSE-NEXT:    movapd %xmm1, 16(%eax)
; X86-SSE-NEXT:    addl $12, %esp
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_4xf64_aligned:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    addpd %xmm2, %xmm0
; X64-SSE-NEXT:    movapd %xmm0, (%rdi)
; X64-SSE-NEXT:    addpd %xmm3, %xmm1
; X64-SSE-NEXT:    movapd %xmm1, 16(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX-LABEL: test_store_4xf64_aligned:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
; X86-AVX-NEXT:    vmovapd %ymm0, (%eax)
; X86-AVX-NEXT:    retl
;
; X64-AVX-LABEL: test_store_4xf64_aligned:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
; X64-AVX-NEXT:    vmovapd %ymm0, (%rdi)
; X64-AVX-NEXT:    retq
  %foo = fadd <4 x double> %value, %value2 ; to force dobule type on store
  store <4 x double> %foo, ptr %addr, align 32
  ret <4 x double> %foo
}

define <16 x i32> @test_store_16xi32(ptr nocapture %addr, <16 x i32> %value) nounwind {
; X86-SSE-LABEL: test_store_16xi32:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    subl $12, %esp
; X86-SSE-NEXT:    movaps {{[0-9]+}}(%esp), %xmm3
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movups %xmm0, (%eax)
; X86-SSE-NEXT:    movups %xmm1, 16(%eax)
; X86-SSE-NEXT:    movups %xmm2, 32(%eax)
; X86-SSE-NEXT:    movups %xmm3, 48(%eax)
; X86-SSE-NEXT:    addl $12, %esp
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_16xi32:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movups %xmm0, (%rdi)
; X64-SSE-NEXT:    movups %xmm1, 16(%rdi)
; X64-SSE-NEXT:    movups %xmm2, 32(%rdi)
; X64-SSE-NEXT:    movups %xmm3, 48(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX1-LABEL: test_store_16xi32:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX1-NEXT:    vmovups %ymm0, (%eax)
; X86-AVX1-NEXT:    vmovups %ymm1, 32(%eax)
; X86-AVX1-NEXT:    retl
;
; X64-AVX1-LABEL: test_store_16xi32:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovups %ymm0, (%rdi)
; X64-AVX1-NEXT:    vmovups %ymm1, 32(%rdi)
; X64-AVX1-NEXT:    retq
;
; X86-AVX512-LABEL: test_store_16xi32:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX512-NEXT:    vmovups %zmm0, (%eax)
; X86-AVX512-NEXT:    retl
;
; X64-AVX512-LABEL: test_store_16xi32:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovups %zmm0, (%rdi)
; X64-AVX512-NEXT:    retq
  store <16 x i32> %value, ptr %addr, align 1
  ret <16 x i32> %value
}

define <16 x i32> @test_store_16xi32_aligned(ptr nocapture %addr, <16 x i32> %value) nounwind {
; X86-SSE-LABEL: test_store_16xi32_aligned:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    subl $12, %esp
; X86-SSE-NEXT:    movaps {{[0-9]+}}(%esp), %xmm3
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movaps %xmm0, (%eax)
; X86-SSE-NEXT:    movaps %xmm1, 16(%eax)
; X86-SSE-NEXT:    movaps %xmm2, 32(%eax)
; X86-SSE-NEXT:    movaps %xmm3, 48(%eax)
; X86-SSE-NEXT:    addl $12, %esp
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_16xi32_aligned:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
; X64-SSE-NEXT:    movaps %xmm1, 16(%rdi)
; X64-SSE-NEXT:    movaps %xmm2, 32(%rdi)
; X64-SSE-NEXT:    movaps %xmm3, 48(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX1-LABEL: test_store_16xi32_aligned:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX1-NEXT:    vmovaps %ymm0, (%eax)
; X86-AVX1-NEXT:    vmovaps %ymm1, 32(%eax)
; X86-AVX1-NEXT:    retl
;
; X64-AVX1-LABEL: test_store_16xi32_aligned:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovaps %ymm0, (%rdi)
; X64-AVX1-NEXT:    vmovaps %ymm1, 32(%rdi)
; X64-AVX1-NEXT:    retq
;
; X86-AVX512-LABEL: test_store_16xi32_aligned:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX512-NEXT:    vmovaps %zmm0, (%eax)
; X86-AVX512-NEXT:    retl
;
; X64-AVX512-LABEL: test_store_16xi32_aligned:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovaps %zmm0, (%rdi)
; X64-AVX512-NEXT:    retq
  store <16 x i32> %value, ptr %addr, align 64
  ret <16 x i32> %value
}

define <16 x float> @test_store_16xf32(ptr nocapture %addr, <16 x float> %value) nounwind {
; X86-SSE-LABEL: test_store_16xf32:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    subl $12, %esp
; X86-SSE-NEXT:    movaps {{[0-9]+}}(%esp), %xmm3
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movups %xmm0, (%eax)
; X86-SSE-NEXT:    movups %xmm1, 16(%eax)
; X86-SSE-NEXT:    movups %xmm2, 32(%eax)
; X86-SSE-NEXT:    movups %xmm3, 48(%eax)
; X86-SSE-NEXT:    addl $12, %esp
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_16xf32:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movups %xmm0, (%rdi)
; X64-SSE-NEXT:    movups %xmm1, 16(%rdi)
; X64-SSE-NEXT:    movups %xmm2, 32(%rdi)
; X64-SSE-NEXT:    movups %xmm3, 48(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX1-LABEL: test_store_16xf32:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX1-NEXT:    vmovups %ymm0, (%eax)
; X86-AVX1-NEXT:    vmovups %ymm1, 32(%eax)
; X86-AVX1-NEXT:    retl
;
; X64-AVX1-LABEL: test_store_16xf32:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovups %ymm0, (%rdi)
; X64-AVX1-NEXT:    vmovups %ymm1, 32(%rdi)
; X64-AVX1-NEXT:    retq
;
; X86-AVX512-LABEL: test_store_16xf32:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX512-NEXT:    vmovups %zmm0, (%eax)
; X86-AVX512-NEXT:    retl
;
; X64-AVX512-LABEL: test_store_16xf32:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovups %zmm0, (%rdi)
; X64-AVX512-NEXT:    retq
  store <16 x float> %value, ptr %addr, align 1
  ret <16 x float> %value
}

define <16 x float> @test_store_16xf32_aligned(ptr nocapture %addr, <16 x float> %value) nounwind {
; X86-SSE-LABEL: test_store_16xf32_aligned:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    subl $12, %esp
; X86-SSE-NEXT:    movaps {{[0-9]+}}(%esp), %xmm3
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    movaps %xmm0, (%eax)
; X86-SSE-NEXT:    movaps %xmm1, 16(%eax)
; X86-SSE-NEXT:    movaps %xmm2, 32(%eax)
; X86-SSE-NEXT:    movaps %xmm3, 48(%eax)
; X86-SSE-NEXT:    addl $12, %esp
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_16xf32_aligned:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
; X64-SSE-NEXT:    movaps %xmm1, 16(%rdi)
; X64-SSE-NEXT:    movaps %xmm2, 32(%rdi)
; X64-SSE-NEXT:    movaps %xmm3, 48(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX1-LABEL: test_store_16xf32_aligned:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX1-NEXT:    vmovaps %ymm0, (%eax)
; X86-AVX1-NEXT:    vmovaps %ymm1, 32(%eax)
; X86-AVX1-NEXT:    retl
;
; X64-AVX1-LABEL: test_store_16xf32_aligned:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovaps %ymm0, (%rdi)
; X64-AVX1-NEXT:    vmovaps %ymm1, 32(%rdi)
; X64-AVX1-NEXT:    retq
;
; X86-AVX512-LABEL: test_store_16xf32_aligned:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX512-NEXT:    vmovaps %zmm0, (%eax)
; X86-AVX512-NEXT:    retl
;
; X64-AVX512-LABEL: test_store_16xf32_aligned:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovaps %zmm0, (%rdi)
; X64-AVX512-NEXT:    retq
  store <16 x float> %value, ptr %addr, align 64
  ret <16 x float> %value
}

define <8 x double> @test_store_8xf64(ptr nocapture %addr, <8 x double> %value, <8 x double> %value2) nounwind {
; X86-SSE-LABEL: test_store_8xf64:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    subl $12, %esp
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm4
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm5
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm6
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm3
; X86-SSE-NEXT:    addpd %xmm4, %xmm3
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    addpd {{[0-9]+}}(%esp), %xmm0
; X86-SSE-NEXT:    movupd %xmm0, (%eax)
; X86-SSE-NEXT:    addpd %xmm6, %xmm1
; X86-SSE-NEXT:    movupd %xmm1, 16(%eax)
; X86-SSE-NEXT:    addpd %xmm5, %xmm2
; X86-SSE-NEXT:    movupd %xmm2, 32(%eax)
; X86-SSE-NEXT:    movupd %xmm3, 48(%eax)
; X86-SSE-NEXT:    addl $12, %esp
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_8xf64:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    addpd %xmm4, %xmm0
; X64-SSE-NEXT:    movupd %xmm0, (%rdi)
; X64-SSE-NEXT:    addpd %xmm5, %xmm1
; X64-SSE-NEXT:    movupd %xmm1, 16(%rdi)
; X64-SSE-NEXT:    addpd %xmm6, %xmm2
; X64-SSE-NEXT:    movupd %xmm2, 32(%rdi)
; X64-SSE-NEXT:    addpd %xmm7, %xmm3
; X64-SSE-NEXT:    movupd %xmm3, 48(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX1-LABEL: test_store_8xf64:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    pushl %ebp
; X86-AVX1-NEXT:    movl %esp, %ebp
; X86-AVX1-NEXT:    andl $-32, %esp
; X86-AVX1-NEXT:    subl $32, %esp
; X86-AVX1-NEXT:    vmovapd 40(%ebp), %ymm3
; X86-AVX1-NEXT:    movl 8(%ebp), %eax
; X86-AVX1-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
; X86-AVX1-NEXT:    vmovupd %ymm0, (%eax)
; X86-AVX1-NEXT:    vaddpd %ymm3, %ymm1, %ymm1
; X86-AVX1-NEXT:    vmovupd %ymm1, 32(%eax)
; X86-AVX1-NEXT:    movl %ebp, %esp
; X86-AVX1-NEXT:    popl %ebp
; X86-AVX1-NEXT:    retl
;
; X64-AVX1-LABEL: test_store_8xf64:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
; X64-AVX1-NEXT:    vmovupd %ymm0, (%rdi)
; X64-AVX1-NEXT:    vaddpd %ymm3, %ymm1, %ymm1
; X64-AVX1-NEXT:    vmovupd %ymm1, 32(%rdi)
; X64-AVX1-NEXT:    retq
;
; X86-AVX512-LABEL: test_store_8xf64:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX512-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
; X86-AVX512-NEXT:    vmovupd %zmm0, (%eax)
; X86-AVX512-NEXT:    retl
;
; X64-AVX512-LABEL: test_store_8xf64:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT:    vmovupd %zmm0, (%rdi)
; X64-AVX512-NEXT:    retq
  %foo = fadd <8 x double> %value, %value2 ; to force dobule type on store
  store <8 x double> %foo, ptr %addr, align 1
  ret <8 x double> %foo
}

define <8 x double> @test_store_8xf64_aligned(ptr nocapture %addr, <8 x double> %value, <8 x double> %value2) nounwind {
; X86-SSE-LABEL: test_store_8xf64_aligned:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    subl $12, %esp
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm4
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm5
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm6
; X86-SSE-NEXT:    movapd {{[0-9]+}}(%esp), %xmm3
; X86-SSE-NEXT:    addpd %xmm4, %xmm3
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT:    addpd {{[0-9]+}}(%esp), %xmm0
; X86-SSE-NEXT:    movapd %xmm0, (%eax)
; X86-SSE-NEXT:    addpd %xmm6, %xmm1
; X86-SSE-NEXT:    movapd %xmm1, 16(%eax)
; X86-SSE-NEXT:    addpd %xmm5, %xmm2
; X86-SSE-NEXT:    movapd %xmm2, 32(%eax)
; X86-SSE-NEXT:    movapd %xmm3, 48(%eax)
; X86-SSE-NEXT:    addl $12, %esp
; X86-SSE-NEXT:    retl
;
; X64-SSE-LABEL: test_store_8xf64_aligned:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    addpd %xmm4, %xmm0
; X64-SSE-NEXT:    movapd %xmm0, (%rdi)
; X64-SSE-NEXT:    addpd %xmm5, %xmm1
; X64-SSE-NEXT:    movapd %xmm1, 16(%rdi)
; X64-SSE-NEXT:    addpd %xmm6, %xmm2
; X64-SSE-NEXT:    movapd %xmm2, 32(%rdi)
; X64-SSE-NEXT:    addpd %xmm7, %xmm3
; X64-SSE-NEXT:    movapd %xmm3, 48(%rdi)
; X64-SSE-NEXT:    retq
;
; X86-AVX1-LABEL: test_store_8xf64_aligned:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    pushl %ebp
; X86-AVX1-NEXT:    movl %esp, %ebp
; X86-AVX1-NEXT:    andl $-32, %esp
; X86-AVX1-NEXT:    subl $32, %esp
; X86-AVX1-NEXT:    vmovapd 40(%ebp), %ymm3
; X86-AVX1-NEXT:    movl 8(%ebp), %eax
; X86-AVX1-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
; X86-AVX1-NEXT:    vmovapd %ymm0, (%eax)
; X86-AVX1-NEXT:    vaddpd %ymm3, %ymm1, %ymm1
; X86-AVX1-NEXT:    vmovapd %ymm1, 32(%eax)
; X86-AVX1-NEXT:    movl %ebp, %esp
; X86-AVX1-NEXT:    popl %ebp
; X86-AVX1-NEXT:    retl
;
; X64-AVX1-LABEL: test_store_8xf64_aligned:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
; X64-AVX1-NEXT:    vmovapd %ymm0, (%rdi)
; X64-AVX1-NEXT:    vaddpd %ymm3, %ymm1, %ymm1
; X64-AVX1-NEXT:    vmovapd %ymm1, 32(%rdi)
; X64-AVX1-NEXT:    retq
;
; X86-AVX512-LABEL: test_store_8xf64_aligned:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-AVX512-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
; X86-AVX512-NEXT:    vmovapd %zmm0, (%eax)
; X86-AVX512-NEXT:    retl
;
; X64-AVX512-LABEL: test_store_8xf64_aligned:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT:    vmovapd %zmm0, (%rdi)
; X64-AVX512-NEXT:    retq
  %foo = fadd <8 x double> %value, %value2 ; to force dobule type on store
  store <8 x double> %foo, ptr %addr, align 64
  ret <8 x double> %foo
}