llvm/llvm/test/CodeGen/AArch64/GlobalISel/combine-sdiv.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -o - -mtriple=aarch64-unknown-unknown -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s | FileCheck %s
--- |
  target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"

  define void @sdiv_exact() { ret void }
  define void @sdiv_noexact() { ret void }
  define void @sdiv_exact_minsize() #0 { ret void }
  define void @div_v4s32() { ret void }
  define void @div_v4s32_splat() { ret void }

  attributes #0 = { minsize }

...
---
name:            sdiv_exact
body:             |
  bb.1:
    liveins: $w0

    ; CHECK-LABEL: name: sdiv_exact
    ; CHECK: liveins: $w0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -991146299
    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = exact G_ASHR [[COPY]], [[C]](s32)
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[ASHR]], [[C1]]
    ; CHECK-NEXT: $w0 = COPY [[MUL]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = G_CONSTANT i32 104
    %2:_(s32) = exact G_SDIV %0, %1
    $w0 = COPY %2(s32)
    RET_ReallyLR implicit $w0

...
---
name:            sdiv_noexact
body:             |
  bb.1:
    liveins: $w0

    ; CHECK-LABEL: name: sdiv_noexact
    ; CHECK: liveins: $w0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
    ; CHECK-NEXT: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[COPY]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[SDIV]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = G_CONSTANT i32 104
    %2:_(s32) = G_SDIV %0, %1
    $w0 = COPY %2(s32)
    RET_ReallyLR implicit $w0

...
---
name:            sdiv_exact_minsize
body:             |
  bb.1:
    liveins: $w0

    ; CHECK-LABEL: name: sdiv_exact_minsize
    ; CHECK: liveins: $w0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
    ; CHECK-NEXT: [[SDIV:%[0-9]+]]:_(s32) = exact G_SDIV [[COPY]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[SDIV]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = G_CONSTANT i32 104
    %2:_(s32) = exact G_SDIV %0, %1
    $w0 = COPY %2(s32)
    RET_ReallyLR implicit $w0

...
---
name:            div_v4s32
body:             |
  bb.1:
    liveins: $q0

    ; CHECK-LABEL: name: div_v4s32
    ; CHECK: liveins: $q0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -991146299
    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 954437177
    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32), [[C1]](s32), [[C2]](s32)
    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(<4 x s32>) = exact G_ASHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[ASHR]], [[BUILD_VECTOR1]]
    ; CHECK-NEXT: $q0 = COPY [[MUL]](<4 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %c1:_(s32) = G_CONSTANT i32 104
    %c2:_(s32) = G_CONSTANT i32 72
    %1:_(<4 x s32>) = G_BUILD_VECTOR %c1(s32), %c2(s32), %c1(s32), %c2(s32)
    %3:_(<4 x s32>) = exact G_SDIV %0, %1
    $q0 = COPY %3(<4 x s32>)
    RET_ReallyLR implicit $q0

...
---
name:            div_v4s32_splat
body:             |
  bb.1:
    liveins: $q0

    ; CHECK-LABEL: name: div_v4s32_splat
    ; CHECK: liveins: $q0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -991146299
    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(<4 x s32>) = exact G_ASHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[ASHR]], [[BUILD_VECTOR1]]
    ; CHECK-NEXT: $q0 = COPY [[MUL]](<4 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %c1:_(s32) = G_CONSTANT i32 104
    %1:_(<4 x s32>) = G_BUILD_VECTOR %c1(s32), %c1(s32), %c1(s32), %c1(s32)
    %3:_(<4 x s32>) = exact G_SDIV %0, %1
    $q0 = COPY %3(<4 x s32>)
    RET_ReallyLR implicit $q0

...