llvm/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=arm64-apple-ios -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=CHECK-NOLSE
# RUN: llc -mtriple=arm64-apple-ios -mcpu=apple-a13 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=CHECK-LSE

---
name:            compare_swap_128
alignment:       4
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $x0_x1, $x1

   liveins: $x0, $x1, $x2, $x3, $x4

    ; CHECK-NOLSE-LABEL: name: compare_swap_128
    ; CHECK-NOLSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
    ; CHECK-NOLSE-NEXT: {{  $}}
    ; CHECK-NOLSE-NEXT: [[COPY:%[0-9]+]]:gpr64(p0) = COPY $x0
    ; CHECK-NOLSE-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-NOLSE-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
    ; CHECK-NOLSE-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
    ; CHECK-NOLSE-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
    ; CHECK-NOLSE-NEXT: [[COPY5:%[0-9]+]]:gpr64(s64) = COPY [[COPY1]](s64)
    ; CHECK-NOLSE-NEXT: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
    ; CHECK-NOLSE-NEXT: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
    ; CHECK-NOLSE-NEXT: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
    ; CHECK-NOLSE-NEXT: early-clobber %14:gpr64common(s64), early-clobber %15:gpr64common(s64), early-clobber %17:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire (s128))
    ; CHECK-NOLSE-NEXT: [[COPY9:%[0-9]+]]:gpr64 = COPY %17
    ; CHECK-NOLSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %14(s64), %15(s64)
    ; CHECK-NOLSE-NEXT: [[COPY10:%[0-9]+]]:_(s128) = COPY [[MV]](s128)
    ; CHECK-NOLSE-NEXT: G_STORE [[COPY10]](s128), [[COPY]](p0) :: (store (s128))
    ; CHECK-NOLSE-NEXT: RET_ReallyLR
    ;
    ; CHECK-LSE-LABEL: name: compare_swap_128
    ; CHECK-LSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
    ; CHECK-LSE-NEXT: {{  $}}
    ; CHECK-LSE-NEXT: [[COPY:%[0-9]+]]:gpr64sp(p0) = COPY $x0
    ; CHECK-LSE-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-LSE-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
    ; CHECK-LSE-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
    ; CHECK-LSE-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
    ; CHECK-LSE-NEXT: [[REG_SEQUENCE:%[0-9]+]]:xseqpairsclass(s128) = REG_SEQUENCE [[COPY1]](s64), %subreg.sube64, [[COPY2]](s64), %subreg.subo64
    ; CHECK-LSE-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:xseqpairsclass(s128) = REG_SEQUENCE [[COPY3]](s64), %subreg.sube64, [[COPY4]](s64), %subreg.subo64
    ; CHECK-LSE-NEXT: [[CASPAX:%[0-9]+]]:xseqpairsclass(s128) = CASPAX [[REG_SEQUENCE]](s128), [[REG_SEQUENCE1]](s128), [[COPY]](p0) :: (load store acquire acquire (s128))
    ; CHECK-LSE-NEXT: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 0
    ; CHECK-LSE-NEXT: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 64
    ; CHECK-LSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[EXTRACT]](s64), [[EXTRACT1]](s64)
    ; CHECK-LSE-NEXT: [[COPY5:%[0-9]+]]:_(s128) = COPY [[MV]](s128)
    ; CHECK-LSE-NEXT: G_STORE [[COPY5]](s128), [[COPY]](p0) :: (store (s128))
    ; CHECK-LSE-NEXT: RET_ReallyLR
    %0:_(p0) = COPY $x0
    %3:_(s64) = COPY $x1
    %4:_(s64) = COPY $x2
    %1:_(s128) = G_MERGE_VALUES %3(s64), %4(s64)
    %5:_(s64) = COPY $x3
    %6:_(s64) = COPY $x4
    %2:_(s128) = G_MERGE_VALUES %5(s64), %6(s64)
    %7:_(s128), %8:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0(p0), %1, %2 :: (load store acquire acquire (s128))
    G_STORE %7(s128), %0(p0) :: (store (s128))
    RET_ReallyLR

...