llvm/llvm/test/CodeGen/AArch64/GlobalISel/combine-mulo-with-2.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="mulo_by_2" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
# REQUIRES: asserts
...
---
name:            smulo_to_saddo
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0

    ; CHECK-LABEL: name: smulo_to_saddo
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %copy:_(s64) = COPY $x0
    ; CHECK-NEXT: %mul:_(s64), %o:_(s1) = G_SADDO %copy, %copy
    ; CHECK-NEXT: %overflow_ext:_(s32) = G_ZEXT %o(s1)
    ; CHECK-NEXT: $w0 = COPY %overflow_ext(s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %copy:_(s64) = COPY $x0
    %two:_(s64) = G_CONSTANT i64 2
    %mul:_(s64), %o:_(s1) = G_SMULO %copy, %two
    %overflow_ext:_(s32) = G_ZEXT %o(s1)
    $w0 = COPY %overflow_ext(s32)
    RET_ReallyLR implicit $w0
...
---
name:            umulo_to_uaddo
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0

    ; CHECK-LABEL: name: umulo_to_uaddo
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %copy:_(s64) = COPY $x0
    ; CHECK-NEXT: %mul:_(s64), %o:_(s1) = G_UADDO %copy, %copy
    ; CHECK-NEXT: %overflow_ext:_(s32) = G_ZEXT %o(s1)
    ; CHECK-NEXT: $w0 = COPY %overflow_ext(s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %copy:_(s64) = COPY $x0
    %two:_(s64) = G_CONSTANT i64 2
    %mul:_(s64), %o:_(s1) = G_UMULO %copy, %two
    %overflow_ext:_(s32) = G_ZEXT %o(s1)
    $w0 = COPY %overflow_ext(s32)
    RET_ReallyLR implicit $w0
...
---
name:            vector
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $d0, $d1

    ; CHECK-LABEL: name: vector
    ; CHECK: liveins: $d0, $d1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %copy:_(<2 x s32>) = COPY $d0
    ; CHECK-NEXT: %mul:_(<2 x s32>), %o:_(<2 x s1>) = G_SADDO %copy, %copy
    ; CHECK-NEXT: %overflow_ext:_(<2 x s32>) = G_ANYEXT %o(<2 x s1>)
    ; CHECK-NEXT: $d0 = COPY %overflow_ext(<2 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $d0
    %copy:_(<2 x s32>) = COPY $d0
    %two:_(s32) = G_CONSTANT i32 2
    %splat_two:_(<2 x s32>) = G_BUILD_VECTOR %two(s32), %two(s32)
    %mul:_(<2 x s32>), %o:_(<2 x s1>) = G_SMULO %copy, %splat_two
    %overflow_ext:_(<2 x s32>) = G_ANYEXT %o(<2 x s1>)
    $d0 = COPY %overflow_ext(<2 x s32>)
    RET_ReallyLR implicit $d0
...
---
name:            dont_combine_wrong_cst
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0

    ; CHECK-LABEL: name: dont_combine_wrong_cst
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %copy:_(s64) = COPY $x0
    ; CHECK-NEXT: %three:_(s64) = G_CONSTANT i64 3
    ; CHECK-NEXT: %mul:_(s64), %o:_(s1) = G_UMULO %copy, %three
    ; CHECK-NEXT: %overflow_ext:_(s32) = G_ZEXT %o(s1)
    ; CHECK-NEXT: $w0 = COPY %overflow_ext(s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %copy:_(s64) = COPY $x0
    %three:_(s64) = G_CONSTANT i64 3
    %mul:_(s64), %o:_(s1) = G_UMULO %copy, %three
    %overflow_ext:_(s32) = G_ZEXT %o(s1)
    $w0 = COPY %overflow_ext(s32)
    RET_ReallyLR implicit $w0
...
---
name:            dont_combine_not_cst
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1

    ; CHECK-LABEL: name: dont_combine_not_cst
    ; CHECK: liveins: $x0, $x1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %copy1:_(s64) = COPY $x0
    ; CHECK-NEXT: %copy2:_(s64) = COPY $x1
    ; CHECK-NEXT: %mul:_(s64), %o:_(s1) = G_UMULO %copy1, %copy2
    ; CHECK-NEXT: %overflow_ext:_(s32) = G_ZEXT %o(s1)
    ; CHECK-NEXT: $w0 = COPY %overflow_ext(s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %copy1:_(s64) = COPY $x0
    %copy2:_(s64) = COPY $x1
    %mul:_(s64), %o:_(s1) = G_UMULO %copy1, %copy2
    %overflow_ext:_(s32) = G_ZEXT %o(s1)
    $w0 = COPY %overflow_ext(s32)
    RET_ReallyLR implicit $w0
...