llvm/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-commute-shift.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
# RUN: llc -mtriple aarch64 -mattr=+fullfp16 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
---
name:            shl_add_k
alignment:       4
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $w1, $x0

    ; CHECK-LABEL: name: shl_add_k
    ; CHECK: liveins: $w1, $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[C1]]
    ; CHECK-NEXT: G_STORE [[ADD]](s32), [[COPY]](p0) :: (store (s32))
    ; CHECK-NEXT: RET_ReallyLR
    %0:_(p0) = COPY $x0
    %1:_(s32) = COPY $w1
    %2:_(s32) = G_CONSTANT i32 1
    %4:_(s32) = G_CONSTANT i32 2
    %3:_(s32) = G_ADD %1, %2
    %5:_(s32) = G_SHL %3, %4(s32)
    G_STORE %5(s32), %0(p0) :: (store (s32))
    RET_ReallyLR

...
---
name:            shl_or_k
alignment:       4
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $w1, $x0

    ; CHECK-LABEL: name: shl_or_k
    ; CHECK: liveins: $w1, $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[C1]]
    ; CHECK-NEXT: G_STORE [[OR]](s32), [[COPY]](p0) :: (store (s32))
    ; CHECK-NEXT: RET_ReallyLR
    %0:_(p0) = COPY $x0
    %1:_(s32) = COPY $w1
    %2:_(s32) = G_CONSTANT i32 1
    %4:_(s32) = G_CONSTANT i32 2
    %3:_(s32) = G_OR %1, %2
    %5:_(s32) = G_SHL %3, %4(s32)
    G_STORE %5(s32), %0(p0) :: (store (s32))
    RET_ReallyLR

...
---
name:            shl_or_k_multiuse
alignment:       4
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $w1, $x0

    ; CHECK-LABEL: name: shl_or_k_multiuse
    ; CHECK: liveins: $w1, $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: %ptr:_(p0) = COPY $x1
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY1]], [[C]]
    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[OR]], [[C1]](s32)
    ; CHECK-NEXT: G_STORE [[SHL]](s32), [[COPY]](p0) :: (store (s32))
    ; CHECK-NEXT: G_STORE [[OR]](s32), %ptr(p0) :: (store (s32))
    ; CHECK-NEXT: RET_ReallyLR
    %0:_(p0) = COPY $x0
    %ptr:_(p0) = COPY $x1
    %1:_(s32) = COPY $w1
    %2:_(s32) = G_CONSTANT i32 1
    %4:_(s32) = G_CONSTANT i32 2
    %3:_(s32) = G_OR %1, %2
    %5:_(s32) = G_SHL %3, %4(s32)
    G_STORE %5(s32), %0(p0) :: (store (s32))
    G_STORE %3(s32), %ptr(p0) :: (store (s32))
    RET_ReallyLR

...
---
name:            shl_add_k_vector
alignment:       4
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $w1, $x0

    ; CHECK-LABEL: name: shl_add_k_vector
    ; CHECK: liveins: $w1, $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
    ; CHECK-NEXT: %xvec:_(<4 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY1]](s32), [[COPY1]](s32), [[COPY1]](s32)
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
    ; CHECK-NEXT: %veccst2:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL %xvec, %veccst2(<4 x s32>)
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[SHL]], [[BUILD_VECTOR]]
    ; CHECK-NEXT: G_STORE [[ADD]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>))
    ; CHECK-NEXT: RET_ReallyLR
    %0:_(p0) = COPY $x0
    %1:_(s32) = COPY $w1
    %xvec:_(<4 x s32>) = G_BUILD_VECTOR %1, %1, %1, %1
    %2:_(s32) = G_CONSTANT i32 1
    %veccst:_(<4 x s32>) = G_BUILD_VECTOR %2, %2, %2, %2
    %4:_(s32) = G_CONSTANT i32 2
    %veccst2:_(<4 x s32>) = G_BUILD_VECTOR %4, %4, %4, %4
    %3:_(<4 x s32>) = G_ADD %xvec, %veccst2
    %5:_(<4 x s32>) = G_SHL %3, %veccst2
    G_STORE %5(<4 x s32>), %0(p0) :: (store (<4 x s32>))
    RET_ReallyLR

...