llvm/llvm/test/CodeGen/MIR/AArch64/atomic-memoperands.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s | FileCheck %s

---
name:            atomic_memoperands
body: |
  bb.0:

    ; CHECK-LABEL: name: atomic_memoperands
    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load unordered (s64))
    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load monotonic (s32))
    ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load acquire (s16))
    ; CHECK-NEXT: G_STORE [[LOAD2]](s16), [[COPY]](p0) :: (store release (s16))
    ; CHECK-NEXT: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store syncscope("singlethread") seq_cst (s64))
    ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[LOAD]] :: (load store acq_rel (s64))
    ; CHECK-NEXT: [[ATOMICRMW_ADD1:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[LOAD]] :: (load store seq_cst (s64))
    ; CHECK-NEXT: RET_ReallyLR
    %0:_(p0) = COPY $x0
    %1:_(s64) = G_LOAD %0(p0) :: (load unordered (s64))
    %2:_(s32) = G_LOAD %0(p0) :: (load monotonic (s32))
    %3:_(s16) = G_LOAD %0(p0) :: (load acquire (s16))
    G_STORE %3(s16), %0(p0) :: (store release (s16))
    G_STORE %1(s64), %0(p0) :: (store syncscope("singlethread") seq_cst (s64))
    %4:_(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store acq_rel (s64))
    %5:_(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst (s64))
    RET_ReallyLR
...