llvm/llvm/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64 -run-pass=legalizer -global-isel-abort=1  %s -o - | FileCheck %s
---
name:            test_scalar_mul_small
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_scalar_mul_small
    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[TRUNC]], [[TRUNC1]]
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[MUL]](s32)
    ; CHECK-NEXT: $x0 = COPY [[ANYEXT]](s64)
    %0:_(s64) = COPY $x0
    %1:_(s64) = COPY $x1
    %2:_(s8) = G_TRUNC %0(s64)
    %3:_(s8) = G_TRUNC %1(s64)
    %4:_(s8) = G_MUL %2, %3
    %5:_(s64) = G_ANYEXT %4(s8)
    $x0 = COPY %5(s64)

...
---
name:            test_smul_overflow
body:             |
  bb.0:
    ; CHECK-LABEL: name: test_smul_overflow
    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-NEXT: [[SMULH:%[0-9]+]]:_(s64) = G_SMULH [[COPY]], [[COPY1]]
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MUL]], [[C]](s64)
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SMULH]](s64), [[ASHR]]
    ; CHECK-NEXT: $x0 = COPY [[MUL]](s64)
    ; CHECK-NEXT: $w0 = COPY [[ICMP]](s32)
    %0:_(s64) = COPY $x0
    %1:_(s64) = COPY $x1
    %2:_(s64), %3:_(s1) = G_SMULO %0, %1
    $x0 = COPY %2(s64)
    %4:_(s32) = G_ANYEXT %3(s1)
    $w0 = COPY %4(s32)

...
---
name:            test_umul_overflow
body:             |
  bb.0:
    ; CHECK-LABEL: name: test_umul_overflow
    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[COPY]], [[COPY1]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
    ; CHECK-NEXT: $x0 = COPY [[MUL]](s64)
    ; CHECK-NEXT: $w0 = COPY [[ICMP]](s32)
    %0:_(s64) = COPY $x0
    %1:_(s64) = COPY $x1
    %2:_(s64), %3:_(s1) = G_UMULO %0, %1
    $x0 = COPY %2(s64)
    %4:_(s32) = G_ANYEXT %3(s1)
    $w0 = COPY %4(s32)

...
---
name:            test_smul_overflow_s32
body:             |
  bb.0:
    ; CHECK-LABEL: name: test_smul_overflow_s32
    ; CHECK: %lhs:_(s32) = COPY $w0
    ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT %lhs(s32)
    ; CHECK-NEXT: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT %rhs(s32)
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[SEXT1]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MUL]], [[C]](s64)
    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ASHR]](s64)
    ; CHECK-NEXT: %mul:_(s32) = G_MUL %lhs, %rhs
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
    ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR %mul, [[C1]](s64)
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[TRUNC]](s32), [[ASHR1]]
    ; CHECK-NEXT: $w0 = COPY %mul(s32)
    ; CHECK-NEXT: $w0 = COPY [[ICMP]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %lhs:_(s32) = COPY $w0
    %rhs:_(s32) = COPY $w1
    %mul:_(s32), %overflow:_(s1) = G_SMULO %lhs, %rhs
    $w0 = COPY %mul(s32)
    %ext_overflow:_(s32) = G_ANYEXT %overflow(s1)
    $w0 = COPY %ext_overflow(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_umul_overflow_s32
body:             |
  bb.0:
    ; CHECK-LABEL: name: test_umul_overflow_s32
    ; CHECK: %lhs:_(s32) = COPY $w0
    ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT %lhs(s32)
    ; CHECK-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT %rhs(s32)
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[ZEXT1]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C]](s64)
    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; CHECK-NEXT: %mul:_(s32) = G_MUL %lhs, %rhs
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[TRUNC]](s32), [[C1]]
    ; CHECK-NEXT: $w0 = COPY %mul(s32)
    ; CHECK-NEXT: $w0 = COPY [[ICMP]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %lhs:_(s32) = COPY $w0
    %rhs:_(s32) = COPY $w1
    %mul:_(s32), %overflow:_(s1) = G_UMULO %lhs, %rhs
    $w0 = COPY %mul(s32)
    %ext_overflow:_(s32) = G_ANYEXT %overflow(s1)
    $w0 = COPY %ext_overflow(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_umul_overflow_s24
body:             |
  bb.0:
    ; CHECK-LABEL: name: test_umul_overflow_s24
    ; CHECK: %lhs_wide:_(s32) = COPY $w0
    ; CHECK-NEXT: %rhs_wide:_(s32) = COPY $w1
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND %lhs_wide, [[C]]
    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND %rhs_wide, [[C]]
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT %lhs_wide(s32)
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16777215
    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT %rhs_wide(s32)
    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C1]]
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND2]], [[AND3]]
    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C2]](s64)
    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[TRUNC]](s32), [[C3]]
    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C]]
    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[MUL1]](s32), [[AND4]]
    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP]], [[ICMP1]]
    ; CHECK-NEXT: $w0 = COPY [[MUL1]](s32)
    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %lhs_wide:_(s32) = COPY $w0
    %rhs_wide:_(s32) = COPY $w1
    %lhs:_(s24) = G_TRUNC %lhs_wide
    %rhs:_(s24) = G_TRUNC %rhs_wide
    %mul:_(s24), %overflow:_(s1) = G_UMULO %lhs, %rhs
    %ext_mul:_(s32) = G_ANYEXT %mul
    $w0 = COPY %ext_mul(s32)
    %ext_overflow:_(s32) = G_ANYEXT %overflow(s1)
    $w0 = COPY %ext_overflow(s32)
    RET_ReallyLR implicit $w0

...
---
name:            vector_mul_scalarize
liveins:
  - { reg: '$q0' }
  - { reg: '$q1' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: vector_mul_scalarize
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[COPY]], [[COPY1]]
    ; CHECK-NEXT: $q0 = COPY [[MUL]](<2 x s64>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<2 x s64>) = COPY $q0
    %1:_(<2 x s64>) = COPY $q1
    %2:_(<2 x s64>) = G_MUL %0, %1
    $q0 = COPY %2(<2 x s64>)
    RET_ReallyLR implicit $q0
...
---
name:            test_umulo_overflow_no_invalid_mir
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$x0' }
  - { reg: '$x1' }
  - { reg: '$x2' }
frameInfo:
  maxAlignment:    16
stack:
  - { id: 0, size: 8, alignment: 8 }
  - { id: 1, size: 8, alignment: 8 }
  - { id: 2, size: 16, alignment: 16 }
  - { id: 3, size: 16, alignment: 8 }
machineFunctionInfo: {}
body:             |
  bb.1:
    liveins: $x0, $x1, $x2
    ; Check that the overflow result doesn't generate incorrect MIR by using a G_CONSTANT 0
    ; before it's been defined.
    ; CHECK-LABEL: name: test_umulo_overflow_no_invalid_mir
    ; CHECK: liveins: $x0, $x1, $x2
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
    ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
    ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
    ; CHECK-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.3
    ; CHECK-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
    ; CHECK-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64))
    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s64))
    ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[LOAD]], [[LOAD1]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[LOAD1]]
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
    ; CHECK-NEXT: G_STORE [[C]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 1)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
    ; CHECK-NEXT: $x0 = COPY [[MUL]](s64)
    ; CHECK-NEXT: $x1 = COPY [[AND]](s64)
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %0:_(p0) = COPY $x0
    %1:_(s64) = COPY $x1
    %2:_(s64) = COPY $x2
    %25:_(s32) = G_CONSTANT i32 0
    %3:_(p0) = G_FRAME_INDEX %stack.0
    %4:_(p0) = G_FRAME_INDEX %stack.1
    %6:_(p0) = G_FRAME_INDEX %stack.3
    G_STORE %2(s64), %3(p0) :: (store (s64))
    G_STORE %1(s64), %4(p0) :: (store (s64))
    %7:_(s64) = G_LOAD %3(p0) :: (dereferenceable load (s64))
    %8:_(s64) = G_LOAD %4(p0) :: (dereferenceable load (s64))
    %9:_(s64), %10:_(s1) = G_UMULO %7, %8
    %31:_(s64) = G_CONSTANT i64 0
    G_STORE %31(s64), %6(p0) :: (store (s64), align 1)
    %16:_(s64) = G_ZEXT %10(s1)
    $x0 = COPY %9(s64)
    $x1 = COPY %16(s64)
    RET_ReallyLR implicit $x0

...
---
name:            umulh_s32
exposesReturnsTwice: false
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $w0, $w1

    ; CHECK-LABEL: name: umulh_s32
    ; CHECK: liveins: $w0, $w1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32)
    ; CHECK-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[COPY1]](s32)
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[ZEXT1]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C]](s64)
    ; CHECK-NEXT: %mul:_(s32) = G_TRUNC [[LSHR]](s64)
    ; CHECK-NEXT: $w0 = COPY %mul(s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = COPY $w1
    %mul:_(s32) = G_UMULH %0, %1
    $w0 = COPY %mul(s32)
    RET_ReallyLR implicit $w0

...
---
name:            smulh_s32
exposesReturnsTwice: false
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $w0, $w1

    ; CHECK-LABEL: name: smulh_s32
    ; CHECK: liveins: $w0, $w1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s32)
    ; CHECK-NEXT: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[SEXT1]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MUL]], [[C]](s64)
    ; CHECK-NEXT: %mul:_(s32) = G_TRUNC [[ASHR]](s64)
    ; CHECK-NEXT: $w0 = COPY %mul(s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = COPY $w1
    %mul:_(s32) = G_SMULH %0, %1
    $w0 = COPY %mul(s32)
    RET_ReallyLR implicit $w0

...
---
name:            umulh_v8s16
exposesReturnsTwice: false
tracksRegLiveness: true
liveins:
  - { reg: '$q0', virtual-reg: '' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: umulh_v8s16
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
    ; CHECK-NEXT: %mul:_(<8 x s16>) = G_UMULH [[COPY]], [[COPY1]]
    ; CHECK-NEXT: $q0 = COPY %mul(<8 x s16>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<8 x s16>) = COPY $q0
    %1:_(<8 x s16>) = COPY $q1
    %mul:_(<8 x s16>) = G_UMULH %0, %1
    $q0 = COPY %mul(<8 x s16>)
    RET_ReallyLR implicit $q0

...
---
name:            umulh_v16s8
exposesReturnsTwice: false
tracksRegLiveness: true
liveins:
  - { reg: '$q0', virtual-reg: '' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: umulh_v16s8
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
    ; CHECK-NEXT: %mul:_(<16 x s8>) = G_UMULH [[COPY]], [[COPY1]]
    ; CHECK-NEXT: $q0 = COPY %mul(<16 x s8>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<16 x s8>) = COPY $q0
    %1:_(<16 x s8>) = COPY $q1
    %mul:_(<16 x s8>) = G_UMULH %0, %1
    $q0 = COPY %mul(<16 x s8>)
    RET_ReallyLR implicit $q0

...
---
name:            umulh_v4s32
exposesReturnsTwice: false
tracksRegLiveness: true
liveins:
  - { reg: '$q0', virtual-reg: '' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: umulh_v4s32
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
    ; CHECK-NEXT: %mul:_(<4 x s32>) = G_UMULH [[COPY]], [[COPY1]]
    ; CHECK-NEXT: $q0 = COPY %mul(<4 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(<4 x s32>) = COPY $q1
    %mul:_(<4 x s32>) = G_UMULH %0, %1
    $q0 = COPY %mul(<4 x s32>)
    RET_ReallyLR implicit $q0

...
---
name:            smulh_v8s16
exposesReturnsTwice: false
tracksRegLiveness: true
liveins:
  - { reg: '$q0', virtual-reg: '' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: smulh_v8s16
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
    ; CHECK-NEXT: %mul:_(<8 x s16>) = G_SMULH [[COPY]], [[COPY1]]
    ; CHECK-NEXT: $q0 = COPY %mul(<8 x s16>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<8 x s16>) = COPY $q0
    %1:_(<8 x s16>) = COPY $q1
    %mul:_(<8 x s16>) = G_SMULH %0, %1
    $q0 = COPY %mul(<8 x s16>)
    RET_ReallyLR implicit $q0

...
---
name:            smulh_v16s8
exposesReturnsTwice: false
tracksRegLiveness: true
liveins:
  - { reg: '$q0', virtual-reg: '' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: smulh_v16s8
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
    ; CHECK-NEXT: %mul:_(<16 x s8>) = G_SMULH [[COPY]], [[COPY1]]
    ; CHECK-NEXT: $q0 = COPY %mul(<16 x s8>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<16 x s8>) = COPY $q0
    %1:_(<16 x s8>) = COPY $q1
    %mul:_(<16 x s8>) = G_SMULH %0, %1
    $q0 = COPY %mul(<16 x s8>)
    RET_ReallyLR implicit $q0

...
---
name:            smulh_v4s32
exposesReturnsTwice: false
tracksRegLiveness: true
liveins:
  - { reg: '$q0', virtual-reg: '' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: smulh_v4s32
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
    ; CHECK-NEXT: %mul:_(<4 x s32>) = G_SMULH [[COPY]], [[COPY1]]
    ; CHECK-NEXT: $q0 = COPY %mul(<4 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(<4 x s32>) = COPY $q1
    %mul:_(<4 x s32>) = G_SMULH %0, %1
    $q0 = COPY %mul(<4 x s32>)
    RET_ReallyLR implicit $q0

...
---
name:            test_vector_mul_v16s16
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_vector_mul_v16s16
    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<8 x s16>) = G_MUL [[COPY]], [[COPY]]
    ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(<8 x s16>) = G_MUL [[COPY1]], [[COPY1]]
    ; CHECK-NEXT: $q0 = COPY [[MUL]](<8 x s16>)
    ; CHECK-NEXT: $q1 = COPY [[MUL1]](<8 x s16>)
    %1:_(<8 x s16>) = COPY $q0
    %2:_(<8 x s16>) = COPY $q1
    %0:_(<16 x s16>) = G_CONCAT_VECTORS %1(<8 x s16>), %2(<8 x s16>)
    %3:_(<16 x s16>) = G_MUL %0, %0
    %4:_(<8 x s16>), %5:_(<8 x s16>) = G_UNMERGE_VALUES %3(<16 x s16>)
    $q0 = COPY %4(<8 x s16>)
    $q1 = COPY %5(<8 x s16>)

...
---
name:            test_vector_mul_v32s8
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_vector_mul_v32s8
    ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<16 x s8>) = G_MUL [[COPY]], [[COPY]]
    ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(<16 x s8>) = G_MUL [[COPY1]], [[COPY1]]
    ; CHECK-NEXT: $q0 = COPY [[MUL]](<16 x s8>)
    ; CHECK-NEXT: $q1 = COPY [[MUL1]](<16 x s8>)
    %0:_(<16 x s8>) = COPY $q0
    %1:_(<16 x s8>) = COPY $q1
    %2:_(<32 x s8>) = G_CONCAT_VECTORS %0, %1
    %3:_(<32 x s8>) = G_MUL %2, %2
    %7:_(<16 x s8>), %8:_(<16 x s8>) = G_UNMERGE_VALUES %3(<32 x s8>)
    $q0 = COPY %7(<16 x s8>)
    $q1 = COPY %8(<16 x s8>)

...
---
name:            mul_v2s1
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $d0, $d1, $d2, $d3

    ; CHECK-LABEL: name: mul_v2s1
    ; CHECK: liveins: $d0, $d1, $d2, $d3
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $d2
    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s32>) = COPY $d3
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s32>) = G_ICMP intpred(eq), [[COPY]](<2 x s32>), [[COPY1]]
    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(<2 x s32>) = G_ICMP intpred(eq), [[COPY2]](<2 x s32>), [[COPY3]]
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<2 x s32>) = G_MUL [[ICMP]], [[ICMP1]]
    ; CHECK-NEXT: $d0 = COPY [[MUL]](<2 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $d0
    %0:_(<2 x s32>) = COPY $d0
    %1:_(<2 x s32>) = COPY $d1
    %2:_(<2 x s32>) = COPY $d2
    %3:_(<2 x s32>) = COPY $d3
    %4:_(<2 x s1>) = G_ICMP intpred(eq), %0(<2 x s32>), %1
    %5:_(<2 x s1>) = G_ICMP intpred(eq), %2(<2 x s32>), %3
    %6:_(<2 x s1>) = G_MUL %4, %5
    %7:_(<2 x s32>) = G_ANYEXT %6
    $d0 = COPY %7:_(<2 x s32>)
    RET_ReallyLR implicit $d0
...
---
name:            mul_v3s1
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $b0, $b1, $b2

    ; CHECK-LABEL: name: mul_v3s1
    ; CHECK: liveins: $b0, $b1, $b2
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s8) = COPY $b0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s8) = COPY $b1
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s8) = COPY $b2
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY1]](s8)
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY2]](s8)
    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[ANYEXT]](s16), [[ANYEXT1]](s16), [[ANYEXT2]](s16), [[DEF]](s16)
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s16>) = G_MUL [[BUILD_VECTOR]], [[BUILD_VECTOR]]
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[MUL]](<4 x s16>)
    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
    ; CHECK-NEXT: $b0 = COPY [[TRUNC]](s8)
    ; CHECK-NEXT: RET_ReallyLR implicit $b0
    %1:_(s8) = COPY $b0
    %2:_(s8) = COPY $b1
    %3:_(s8) = COPY $b2
    %4:_(<3 x s8>) = G_BUILD_VECTOR %1(s8), %2(s8), %3(s8)
    %0:_(<3 x s1>) = G_TRUNC %4(<3 x s8>)
    %5:_(<3 x s1>) = G_MUL %0, %0
    %7:_(<3 x s8>) = G_ANYEXT %5(<3 x s1>)
    %8:_(s8), %9:_(s8), %10:_(s8) = G_UNMERGE_VALUES %7(<3 x s8>)
    $b0 = COPY %8:_(s8)
    RET_ReallyLR implicit $b0
...
---
name:            mul_v4s1
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $d0, $d1, $d2, $d3

    ; CHECK-LABEL: name: mul_v4s1
    ; CHECK: liveins: $d0, $d1, $d2, $d3
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $d1
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $d2
    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<4 x s16>) = COPY $d3
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<4 x s16>) = G_ICMP intpred(eq), [[COPY]](<4 x s16>), [[COPY1]]
    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(<4 x s16>) = G_ICMP intpred(eq), [[COPY2]](<4 x s16>), [[COPY3]]
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s16>) = G_MUL [[ICMP]], [[ICMP1]]
    ; CHECK-NEXT: $d0 = COPY [[MUL]](<4 x s16>)
    ; CHECK-NEXT: RET_ReallyLR implicit $d0
    %0:_(<4 x s16>) = COPY $d0
    %1:_(<4 x s16>) = COPY $d1
    %2:_(<4 x s16>) = COPY $d2
    %3:_(<4 x s16>) = COPY $d3
    %4:_(<4 x s1>) = G_ICMP intpred(eq), %0(<4 x s16>), %1
    %5:_(<4 x s1>) = G_ICMP intpred(eq), %2(<4 x s16>), %3
    %6:_(<4 x s1>) = G_MUL %4, %5
    %7:_(<4 x s16>) = G_ANYEXT %6
    $d0 = COPY %7:_(<4 x s16>)
    RET_ReallyLR implicit $d0
...
---
name:            mul_v8s1
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $d0, $d1, $d2, $d3

    ; CHECK-LABEL: name: mul_v8s1
    ; CHECK: liveins: $d0, $d1, $d2, $d3
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d1
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<8 x s8>) = COPY $d2
    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<8 x s8>) = COPY $d3
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s8>) = G_ICMP intpred(eq), [[COPY]](<8 x s8>), [[COPY1]]
    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(<8 x s8>) = G_ICMP intpred(eq), [[COPY2]](<8 x s8>), [[COPY3]]
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<8 x s8>) = G_MUL [[ICMP]], [[ICMP1]]
    ; CHECK-NEXT: $d0 = COPY [[MUL]](<8 x s8>)
    ; CHECK-NEXT: RET_ReallyLR implicit $d0
    %0:_(<8 x s8>) = COPY $d0
    %1:_(<8 x s8>) = COPY $d1
    %2:_(<8 x s8>) = COPY $d2
    %3:_(<8 x s8>) = COPY $d3
    %4:_(<8 x s1>) = G_ICMP intpred(eq), %0(<8 x s8>), %1
    %5:_(<8 x s1>) = G_ICMP intpred(eq), %2(<8 x s8>), %3
    %6:_(<8 x s1>) = G_MUL %4, %5
    %7:_(<8 x s8>) = G_ANYEXT %6
    $d0 = COPY %7:_(<8 x s8>)
    RET_ReallyLR implicit $d0
...
---
name:            mul_v16s1
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $q0, $q1, $q2, $q3

    ; CHECK-LABEL: name: mul_v16s1
    ; CHECK: liveins: $q0, $q1, $q2, $q3
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<16 x s8>) = COPY $q2
    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<16 x s8>) = COPY $q3
    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<16 x s8>) = G_ICMP intpred(eq), [[COPY]](<16 x s8>), [[COPY1]]
    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(<16 x s8>) = G_ICMP intpred(eq), [[COPY2]](<16 x s8>), [[COPY3]]
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<16 x s8>) = G_MUL [[ICMP]], [[ICMP1]]
    ; CHECK-NEXT: $q0 = COPY [[MUL]](<16 x s8>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<16 x s8>) = COPY $q0
    %1:_(<16 x s8>) = COPY $q1
    %2:_(<16 x s8>) = COPY $q2
    %3:_(<16 x s8>) = COPY $q3
    %4:_(<16 x s1>) = G_ICMP intpred(eq), %0(<16 x s8>), %1
    %5:_(<16 x s1>) = G_ICMP intpred(eq), %2(<16 x s8>), %3
    %6:_(<16 x s1>) = G_MUL %4, %5
    %7:_(<16 x s8>) = G_ANYEXT %6
    $q0 = COPY %7:_(<16 x s8>)
    RET_ReallyLR implicit $q0
...