; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
; arm64 has a separate copy due to intrinsics
define <4 x i32> @copyTuple.QPair(ptr %a, ptr %b) {
; CHECK-LABEL: copyTuple.QPair:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v3.4s, #2
; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
; CHECK-NEXT: mov v1.16b, v3.16b
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ld2 { v0.s, v1.s }[1], [x0]
; CHECK-NEXT: mov v1.16b, v2.16b
; CHECK-NEXT: ld2 { v0.s, v1.s }[1], [x1]
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $q0_q1
; CHECK-NEXT: ret
entry:
%vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>, i64 1, ptr %a)
%extract = extractvalue { <4 x i32>, <4 x i32> } %vld, 0
%vld1 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i64 1, ptr %b)
%vld1.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld1, 0
ret <4 x i32> %vld1.fca.0.extract
}
define <4 x i32> @copyTuple.QTriple(ptr %a, ptr %b, <4 x i32> %c) {
; CHECK-LABEL: copyTuple.QTriple:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $q0 killed $q0 def $q31_q0_q1
; CHECK-NEXT: movi v31.2d, #0xffffffffffffffff
; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v2.16b, v31.16b
; CHECK-NEXT: mov v3.16b, v0.16b
; CHECK-NEXT: mov v4.16b, v1.16b
; CHECK-NEXT: ld3 { v2.s, v3.s, v4.s }[1], [x0]
; CHECK-NEXT: mov v3.16b, v31.16b
; CHECK-NEXT: mov v4.16b, v0.16b
; CHECK-NEXT: ld3 { v2.s, v3.s, v4.s }[1], [x1]
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
entry:
%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, ptr %a)
%extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0
%vld1 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, i64 1, ptr %b)
%vld1.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld1, 0
ret <4 x i32> %vld1.fca.0.extract
}
define <4 x i32> @copyTuple.QQuad(ptr %a, ptr %b, <4 x i32> %c) {
; CHECK-LABEL: copyTuple.QQuad:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $q0 killed $q0 def $q31_q0_q1_q2
; CHECK-NEXT: movi v31.2d, #0xffffffffffffffff
; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v2.16b, v0.16b
; CHECK-NEXT: mov v3.16b, v31.16b
; CHECK-NEXT: mov v4.16b, v0.16b
; CHECK-NEXT: mov v5.16b, v1.16b
; CHECK-NEXT: mov v6.16b, v2.16b
; CHECK-NEXT: ld4 { v3.s, v4.s, v5.s, v6.s }[1], [x0]
; CHECK-NEXT: mov v4.16b, v31.16b
; CHECK-NEXT: mov v5.16b, v0.16b
; CHECK-NEXT: mov v6.16b, v0.16b
; CHECK-NEXT: ld4 { v3.s, v4.s, v5.s, v6.s }[1], [x1]
; CHECK-NEXT: mov v0.16b, v3.16b
; CHECK-NEXT: ret
entry:
%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, <4 x i32> %c, i64 1, ptr %a)
%extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0
%vld1 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, ptr %b)
%vld1.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld1, 0
ret <4 x i32> %vld1.fca.0.extract
}
declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32>, <4 x i32>, i64, ptr)
declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, i64, ptr)
declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, ptr)