llvm/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-select.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
--- |
  declare void @foo()
  define void @self() { ret void }
  define void @self_with_copy() { ret void }
  define void @self_not_equivalent_overwrite_w0() { ret void }
  define void @self_not_equivalent_overwrite_w0_implicit() { ret void }
  define void @self_not_equivalent_different_copies() { ret void }
  define void @self_with_assert_zext() { ret void }
...
---
name:            self
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; Optimize (cond ? %a : %a) -> %a
    ; CHECK-LABEL: name: self
    ; CHECK: liveins: $w0, $w1
    ; CHECK: %a:_(s32) = COPY $w0
    ; CHECK: $w0 = COPY %a(s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %a:_(s32) = COPY $w0
    %cond_wide:gpr(s32) = COPY $w1
    %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
    %select:_(s32) = G_SELECT %cond(s1), %a, %a
    $w0 = COPY %select(s32)
    RET_ReallyLR implicit $w0

...
---
name:            self_with_copy
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; Optimize (cond ? %a : %b) -> %a
    ;
    ; This shows that we are looking through copies correctly and deduce that
    ; %b is a copy from %a.
    ;
    ; CHECK-LABEL: name: self_with_copy
    ; CHECK: liveins: $w0, $w1
    ; CHECK: %a:_(s32) = COPY $w0
    ; CHECK: $w0 = COPY %a(s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %a:_(s32) = COPY $w0
    %b:_(s32) = COPY %a
    %cond_wide:gpr(s32) = COPY $w1
    %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
    %select:_(s32) = G_SELECT %cond(s1), %a, %b
    $w0 = COPY %select(s32)
    RET_ReallyLR implicit $w0

...
---
name:            self_not_equivalent_overwrite_w0
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ;
    ; $w0 is overwritten by a copy from $w1, so the copies for %a and %b are
    ; not the same.
    ;
    ; CHECK-LABEL: name: self_not_equivalent_overwrite_w0
    ; CHECK: liveins: $w0, $w1
    ; CHECK: %a:_(s32) = COPY $w0
    ; CHECK: $w0 = COPY $w1
    ; CHECK: %b:_(s32) = COPY $w0
    ; CHECK: %cond_wide:gpr(s32) = COPY $w1
    ; CHECK: %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
    ; CHECK: %select:_(s32) = G_SELECT %cond(s1), %a, %b
    ; CHECK: $w0 = COPY %select(s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %a:_(s32) = COPY $w0
    $w0 = COPY $w1
    %b:_(s32) = COPY $w0
    %cond_wide:gpr(s32) = COPY $w1
    %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
    %select:_(s32) = G_SELECT %cond(s1), %a, %b
    $w0 = COPY %select(s32)
    RET_ReallyLR implicit $w0

...
---
name:            self_not_equivalent_overwrite_w0_implicit
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ;
    ; $w0 is overwritten by a call which defines it implicitly, so the copies
    ; are not the same.
    ;
    ; CHECK-LABEL: name: self_not_equivalent_overwrite_w0_implicit
    ; CHECK: liveins: $w0, $w1
    ; CHECK: %a:_(s32) = COPY $w0
    ; CHECK: BL @foo, implicit-def $w0
    ; CHECK: %b:_(s32) = COPY $w0
    ; CHECK: %cond_wide:gpr(s32) = COPY $w1
    ; CHECK: %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
    ; CHECK: %select:_(s32) = G_SELECT %cond(s1), %a, %b
    ; CHECK: $w0 = COPY %select(s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %a:_(s32) = COPY $w0
    BL @foo, implicit-def $w0
    %b:_(s32) = COPY $w0
    %cond_wide:gpr(s32) = COPY $w1
    %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
    %select:_(s32) = G_SELECT %cond(s1), %a, %b
    $w0 = COPY %select(s32)
    RET_ReallyLR implicit $w0

...
---
name:            self_not_equivalent_different_copies
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; In this case, the copies are not equivalent, so there is no optimization.
    ;
    ; CHECK-LABEL: name: self_not_equivalent_different_copies
    ; CHECK: liveins: $w0, $w1
    ; CHECK: %a:_(s32) = COPY $w0
    ; CHECK: %b:_(s32) = COPY $w1
    ; CHECK: %cond_wide:gpr(s32) = COPY $w1
    ; CHECK: %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
    ; CHECK: %select:_(s32) = G_SELECT %cond(s1), %a, %b
    ; CHECK: $w0 = COPY %select(s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %a:_(s32) = COPY $w0
    %b:_(s32) = COPY $w1
    %c:_(s32) = COPY %b
    %cond_wide:gpr(s32) = COPY $w1
    %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
    %select:_(s32) = G_SELECT %cond(s1), %a, %c
    $w0 = COPY %select(s32)
    RET_ReallyLR implicit $w0

...
---
name:            self_with_assert_zext
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; We should walk through G_ASSERT_ZEXT as if it's a copy, and remove the
    ; G_SELECT.
    ;
    ; CHECK-LABEL: name: self_with_assert_zext
    ; CHECK: liveins: $w0, $w1
    ; CHECK: %a:_(s32) = COPY $w0
    ; CHECK: %a_assert_zext:_(s32) = G_ASSERT_ZEXT %a, 16
    ; CHECK: $w0 = COPY %a_assert_zext(s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %a:_(s32) = COPY $w0
    %a_assert_zext:_(s32) = G_ASSERT_ZEXT %a, 16
    %b:_(s32) = COPY %a_assert_zext
    %cond_wide:gpr(s32) = COPY $w1
    %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
    %select:_(s32) = G_SELECT %cond(s1), %a_assert_zext, %b
    $w0 = COPY %select(s32)
    RET_ReallyLR implicit $w0