llvm/llvm/test/CodeGen/AArch64/GlobalISel/select-redundant-zext.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s

...
---
name:            fold
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1

    ; This should not have an UBFMXri, since ADDWrr implicitly gives us the
    ; zext.

    ; CHECK-LABEL: name: fold
    ; CHECK: liveins: $w0, $w1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY1]], [[COPY]]
    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[ADDWrr]], 0
    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
    ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_ADD %1, %0
    %3:gpr(s64) = G_ZEXT %2(s32)
    $x0 = COPY %3(s64)
    RET_ReallyLR implicit $x0

...
---
name:            dont_fold_s16
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1

    ; We should have a UBFMXri here, because we only do this for zero extends
    ; from 32 bits to 64 bits.

    ; CHECK-LABEL: name: dont_fold_s16
    ; CHECK: liveins: $w0, $w1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[DEF]], %subreg.sub_32
    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 15
    ; CHECK-NEXT: $x0 = COPY [[UBFMXri]]
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %0:gpr(s16) = G_IMPLICIT_DEF
    %3:gpr(s64) = G_ZEXT %0(s16)
    $x0 = COPY %3(s64)
    RET_ReallyLR implicit $x0

...
---
name:            dont_fold_copy
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0

    ; We should have a ORRWrs here, because isDef32 disallows copies.

    ; CHECK-LABEL: name: dont_fold_copy
    ; CHECK: liveins: $w0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %copy:gpr32 = COPY $w0
    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %copy, 0
    ; CHECK-NEXT: %zext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
    ; CHECK-NEXT: $x0 = COPY %zext
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %copy:gpr(s32) = COPY $w0
    %zext:gpr(s64) = G_ZEXT %copy(s32)
    $x0 = COPY %zext(s64)
    RET_ReallyLR implicit $x0

...
---
name:            dont_fold_bitcast
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0

    ; We should have a ORRWrs here, because isDef32 disallows bitcasts.

    ; CHECK-LABEL: name: dont_fold_bitcast
    ; CHECK: liveins: $w0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %copy:gpr32all = COPY $w0
    ; CHECK-NEXT: %bitcast1:gpr32 = COPY %copy
    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %bitcast1, 0
    ; CHECK-NEXT: %zext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
    ; CHECK-NEXT: $x0 = COPY %zext
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %copy:gpr(s32) = COPY $w0
    %bitcast0:gpr(<4 x s8>) = G_BITCAST %copy(s32)
    %bitcast1:gpr(s32) = G_BITCAST %bitcast0
    %zext:gpr(s64) = G_ZEXT %bitcast1(s32)
    $x0 = COPY %zext(s64)
    RET_ReallyLR implicit $x0

...
---
name:            dont_fold_trunc
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0

    ; We should have a ORRWrs here, because isDef32 disallows truncs.

    ; CHECK-LABEL: name: dont_fold_trunc
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %copy:gpr64sp = COPY $x0
    ; CHECK-NEXT: %trunc:gpr32common = COPY %copy.sub_32
    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %trunc, 0
    ; CHECK-NEXT: %zext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
    ; CHECK-NEXT: $x0 = COPY %zext
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %copy:gpr(s64) = COPY $x0
    %trunc:gpr(s32) = G_TRUNC %copy(s64)
    %zext:gpr(s64) = G_ZEXT %trunc(s32)
    $x0 = COPY %zext(s64)
    RET_ReallyLR implicit $x0

...
---
name:            dont_fold_phi
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  ; CHECK-LABEL: name: dont_fold_phi
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
  ; CHECK-NEXT:   liveins: $w0, $w1, $w2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %copy1:gpr32all = COPY $w0
  ; CHECK-NEXT:   %copy2:gpr32all = COPY $w1
  ; CHECK-NEXT:   %cond_wide:gpr32 = COPY $w2
  ; CHECK-NEXT:   TBNZW %cond_wide, 0, %bb.1
  ; CHECK-NEXT:   B %bb.2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2:
  ; CHECK-NEXT:   %phi:gpr32 = PHI %copy1, %bb.0, %copy2, %bb.1
  ; CHECK-NEXT:   [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %phi, 0
  ; CHECK-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
  ; CHECK-NEXT:   $x0 = COPY [[SUBREG_TO_REG]]
  ; CHECK-NEXT:   RET_ReallyLR implicit $x0
  ; We should have a ORRWrs here, because isDef32 disallows phis.

  bb.0:
    liveins: $w0, $w1, $w2

    %copy1:gpr(s32) = COPY $w0
    %copy2:gpr(s32) = COPY $w1
    %cond_wide:gpr(s32) = COPY $w2
    G_BRCOND %cond_wide, %bb.1
    G_BR %bb.2

  bb.1:

  bb.2:
    %phi:gpr(s32) = G_PHI %copy1(s32), %bb.0, %copy2(s32), %bb.1
    %5:gpr(s64) = G_ZEXT %phi(s32)
    $x0 = COPY %5(s64)
    RET_ReallyLR implicit $x0

...
---
name:            dont_look_through_copy
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1

    ; Make sure we don't walk past the copy.

    ; CHECK-LABEL: name: dont_look_through_copy
    ; CHECK: liveins: $w0, $w1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY1]], [[COPY]]
    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[ADDWrr]], 0
    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
    ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_ADD %1, %0
    %3:gpr(s32) = COPY %2(s32)
    %4:gpr(s64) = G_ZEXT %3(s32)
    $x0 = COPY %4(s64)
    RET_ReallyLR implicit $x0