llvm/llvm/test/CodeGen/X86/vmovq.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx    | FileCheck %s --check-prefix=AVX

define <2 x i64> @PR25554(<2 x i64> %v0, <2 x i64> %v1) {
; SSE-LABEL: PR25554:
; SSE:       # %bb.0:
; SSE-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    paddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: PR25554:
; AVX:       # %bb.0:
; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    retq

  %c1 = or <2 x i64> %v0, <i64 1, i64 0>
  %c2 = add <2 x i64> %c1, <i64 0, i64 1>
  ret <2 x i64> %c2
}