# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64 -O0 -run-pass=legalizer -global-isel-abort=1 %s -o - | FileCheck %s
---
name: combine_unmerge_from_unmerge_of_concat_tree
alignment: 4
tracksRegLiveness: true
body: |
bb.1:
liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK-LABEL: name: combine_unmerge_from_unmerge_of_concat_tree
; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: RET_ReallyLR
%0:_(s64) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(p0) = COPY $x2
%3:_(s64) = COPY $d0
%4:_(s64) = COPY $d1
%5:_(s64) = COPY $d2
%6:_(s64) = COPY $d3
%7:_(s64) = COPY $d4
%8:_(s64) = COPY $d5
%v2s64_val = G_BUILD_VECTOR %5:_(s64), %6:_(s64)
%v2s64_val2 = G_BUILD_VECTOR %6:_(s64), %8:_(s64)
%v4s64_val1:_(<4 x s64>) = G_CONCAT_VECTORS %v2s64_val:_(<2 x s64>), %v2s64_val2:_(<2 x s64>)
%v4s64_val2:_(<4 x s64>) = G_CONCAT_VECTORS %v2s64_val2:_(<2 x s64>), %v2s64_val:_(<2 x s64>)
%v8s64_undef:_(<8 x s64>) = G_IMPLICIT_DEF
%concat1:_(<8 x s64>) = G_CONCAT_VECTORS %v4s64_val1:_(<4 x s64>), %v4s64_val2:_(<4 x s64>)
%bigconcat:_(<24 x s64>) = G_CONCAT_VECTORS %concat1:_(<8 x s64>), %v8s64_undef:_(<8 x s64>), %v8s64_undef:_(<8 x s64>)
%unmerge1:_(<6 x s64>), %deaddef1:_(<6 x s64>), %deaddef2:_(<6 x s64>), %deaddef3:_(<6 x s64>) = G_UNMERGE_VALUES %bigconcat:_(<24 x s64>)
%val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>) = G_UNMERGE_VALUES %unmerge1:_(<6 x s64>)
G_STORE %val1:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
G_STORE %val2:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
RET_ReallyLR
...
---
name: combine_unmerge_from_unmerge_of_concat_tree_high_bits
alignment: 4
tracksRegLiveness: true
body: |
bb.1:
liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK-LABEL: name: combine_unmerge_from_unmerge_of_concat_tree_high_bits
; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: RET_ReallyLR
%0:_(s64) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(p0) = COPY $x2
%3:_(s64) = COPY $d0
%4:_(s64) = COPY $d1
%5:_(s64) = COPY $d2
%6:_(s64) = COPY $d3
%7:_(s64) = COPY $d4
%8:_(s64) = COPY $d5
%v2s64_val = G_BUILD_VECTOR %5:_(s64), %6:_(s64)
%v2s64_val2 = G_BUILD_VECTOR %6:_(s64), %8:_(s64)
%v4s64_val1:_(<4 x s64>) = G_CONCAT_VECTORS %v2s64_val:_(<2 x s64>), %v2s64_val2:_(<2 x s64>)
%v4s64_val2:_(<4 x s64>) = G_CONCAT_VECTORS %v2s64_val2:_(<2 x s64>), %v2s64_val:_(<2 x s64>)
%v8s64_undef:_(<8 x s64>) = G_IMPLICIT_DEF
%concat1:_(<8 x s64>) = G_CONCAT_VECTORS %v4s64_val1:_(<4 x s64>), %v4s64_val2:_(<4 x s64>)
%bigconcat:_(<24 x s64>) = G_CONCAT_VECTORS %v8s64_undef:_(<8 x s64>), %v8s64_undef:_(<8 x s64>), %concat1:_(<8 x s64>)
%deaddef1:_(<6 x s64>), %deaddef2:_(<6 x s64>), %deaddef3:_(<6 x s64>), %unmerge1:_(<6 x s64>) = G_UNMERGE_VALUES %bigconcat:_(<24 x s64>)
%val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>) = G_UNMERGE_VALUES %unmerge1:_(<6 x s64>)
G_STORE %val1:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
G_STORE %val2:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
RET_ReallyLR
...
---
name: combine_unmerge_from_insert_into_low
alignment: 4
tracksRegLiveness: true
body: |
bb.1:
liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK-LABEL: name: combine_unmerge_from_insert_into_low
; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: RET_ReallyLR
%0:_(s64) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(p0) = COPY $x2
%3:_(s64) = COPY $d0
%4:_(s64) = COPY $d1
%5:_(s64) = COPY $d2
%6:_(s64) = COPY $d3
%7:_(s64) = COPY $d4
%8:_(s64) = COPY $d5
%v2s64_val = G_BUILD_VECTOR %5:_(s64), %6:_(s64)
%v2s64_val2 = G_BUILD_VECTOR %6:_(s64), %8:_(s64)
%v4s64_val1:_(<4 x s64>) = G_CONCAT_VECTORS %v2s64_val:_(<2 x s64>), %v2s64_val2:_(<2 x s64>)
%v8s64_undef:_(<8 x s64>) = G_IMPLICIT_DEF
%insert:_(<8 x s64>) = G_INSERT %v8s64_undef:_(<8 x s64>), %v4s64_val1:_(<4 x s64>), 0
%val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>), %val4:_(<2 x s64>) = G_UNMERGE_VALUES %insert:_(<8 x s64>)
; val1 should be <%5, %6>
G_STORE %val1:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
; val2 should be <%6, %8>
G_STORE %val2:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
RET_ReallyLR
...
---
name: combine_unmerge_from_insert_into_high
alignment: 4
tracksRegLiveness: true
body: |
bb.1:
liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK-LABEL: name: combine_unmerge_from_insert_into_high
; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: RET_ReallyLR
%0:_(s64) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(p0) = COPY $x2
%3:_(s64) = COPY $d0
%4:_(s64) = COPY $d1
%5:_(s64) = COPY $d2
%6:_(s64) = COPY $d3
%7:_(s64) = COPY $d4
%8:_(s64) = COPY $d5
%v2s64_val = G_BUILD_VECTOR %5:_(s64), %6:_(s64)
%v2s64_val2 = G_BUILD_VECTOR %6:_(s64), %8:_(s64)
%v4s64_val1:_(<4 x s64>) = G_CONCAT_VECTORS %v2s64_val:_(<2 x s64>), %v2s64_val2:_(<2 x s64>)
%v8s64_undef:_(<8 x s64>) = G_IMPLICIT_DEF
%insert:_(<8 x s64>) = G_INSERT %v8s64_undef:_(<8 x s64>), %v4s64_val1:_(<4 x s64>), 256
%val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>), %val4:_(<2 x s64>) = G_UNMERGE_VALUES %insert:_(<8 x s64>)
; val3 should be <%5, %6>
G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
; val4 should be <%6, %8>
G_STORE %val4:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
RET_ReallyLR
...
---
name: combine_unmerge_from_insert_look_into_container
alignment: 4
tracksRegLiveness: true
body: |
bb.1:
liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK-LABEL: name: combine_unmerge_from_insert_look_into_container
; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
; CHECK: RET_ReallyLR
%0:_(s64) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(p0) = COPY $x2
%3:_(s64) = COPY $d0
%4:_(s64) = COPY $d1
%5:_(s64) = COPY $d2
%6:_(s64) = COPY $d3
%7:_(s64) = COPY $d4
%8:_(s64) = COPY $d5
%v2s64_val = G_BUILD_VECTOR %5:_(s64), %6:_(s64)
%v2s64_val2 = G_BUILD_VECTOR %6:_(s64), %8:_(s64)
%v4s64_undef:_(<4 x s64>) = G_IMPLICIT_DEF
%v4s64_val1:_(<4 x s64>) = G_CONCAT_VECTORS %v2s64_val:_(<2 x s64>), %v2s64_val2:_(<2 x s64>)
%v8s64_val1:_(<8 x s64>) = G_CONCAT_VECTORS %v4s64_undef:_(<4 x s64>), %v4s64_val1:_(<4 x s64>)
%insert:_(<8 x s64>) = G_INSERT %v8s64_val1:_(<8 x s64>), %v4s64_undef:_(<4 x s64>), 0
; The values we're interested in are in bits 256-512 of the insert container.
%val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>), %val4:_(<2 x s64>) = G_UNMERGE_VALUES %insert:_(<8 x s64>)
; val3 should be <%5, %6>
G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
; val4 should be <%6, %8>
G_STORE %val4:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
RET_ReallyLR
...