llvm/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -debugify-and-strip-all-safe -mtriple arm64-apple-ios -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="overlapping_and" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
# REQUIRES: asserts
---
name:            bitmask_overlap1
body:             |
  bb.1:
    ; CHECK-LABEL: name: bitmask_overlap1
    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
    ; CHECK: $w0 = COPY [[AND]](s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = G_CONSTANT i32 -128
    %3:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_AND %0, %1
    %4:_(s32) = G_AND %2, %3
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            bitmask_overlap2
body:             |
  bb.1:
    ; CHECK-LABEL: name: bitmask_overlap2
    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
    ; CHECK: $w0 = COPY [[AND]](s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = G_CONSTANT i32 255
    %3:_(s32) = G_CONSTANT i32 -128
    %2:_(s32) = G_AND %1, %0
    %4:_(s32) = G_AND %2, %3
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            bitmask_overlap3
body:             |
  bb.1:
    ; CHECK-LABEL: name: bitmask_overlap3
    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
    ; CHECK: $w0 = COPY [[AND]](s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = G_CONSTANT i32 255
    %3:_(s32) = G_CONSTANT i32 -128
    %2:_(s32) = G_AND %1, %0
    %4:_(s32) = G_AND %3, %2
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            bitmask_overlap4
body:             |
  bb.1:
    ; CHECK-LABEL: name: bitmask_overlap4
    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
    ; CHECK: $w0 = COPY [[AND]](s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = G_CONSTANT i32 255
    %3:_(s32) = G_CONSTANT i32 -128
    %2:_(s32) = G_AND %0, %1
    %4:_(s32) = G_AND %3, %2
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            bitmask_no_overlap
body:             |
  bb.1:
    ; CHECK-LABEL: name: bitmask_no_overlap
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; CHECK: $w0 = COPY [[C]](s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = G_CONSTANT i32 1
    %3:_(s32) = G_CONSTANT i32 2
    %2:_(s32) = G_AND %0, %1
    %4:_(s32) = G_AND %2, %3
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            bitmask_overlap_extrause
body:             |
  bb.1:
    ; CHECK-LABEL: name: bitmask_overlap_extrause
    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
    ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
    ; CHECK: G_STORE [[AND]](s32), [[COPY1]](p0) :: (store (s32))
    ; CHECK: $w0 = COPY [[AND1]](s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(p0) = COPY $x1
    %2:_(s32) = G_CONSTANT i32 255
    %4:_(s32) = G_CONSTANT i32 -128
    %3:_(s32) = G_AND %0, %2
    %5:_(s32) = G_AND %3, %4
    G_STORE %3(s32), %1(p0) :: (store (s32))
    $w0 = COPY %5(s32)
    RET_ReallyLR implicit $w0

...