llvm/llvm/test/CodeGen/AArch64/GlobalISel/legalize-rotr-rotl.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -mtriple=arm64-unknown-unknown -global-isel -run-pass=legalizer -verify-machineinstrs -global-isel-abort=1 %s -o - | FileCheck %s
---
name:            rotr_s32
alignment:       4
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; CHECK-LABEL: name: rotr_s32
    ; CHECK: liveins: $w0, $w1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY1]](s32)
    ; CHECK-NEXT: %rot:_(s32) = G_ROTR [[COPY]], [[ZEXT]](s64)
    ; CHECK-NEXT: $w0 = COPY %rot(s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = COPY $w1
    %rot:_(s32) = G_ROTR %0(s32), %1(s32)
    $w0 = COPY %rot(s32)
    RET_ReallyLR implicit $w0

...
---
name:            rotr_s64
alignment:       4
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1
    ; CHECK-LABEL: name: rotr_s64
    ; CHECK: liveins: $x0, $x1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-NEXT: %rot:_(s64) = G_ROTR [[COPY]], [[COPY1]](s64)
    ; CHECK-NEXT: $x0 = COPY %rot(s64)
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %0:_(s64) = COPY $x0
    %1:_(s64) = COPY $x1
    %rot:_(s64) = G_ROTR %0(s64), %1(s64)
    $x0 = COPY %rot(s64)
    RET_ReallyLR implicit $x0

...
---
name:            rotl_s32
alignment:       4
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; CHECK-LABEL: name: rotl_s32
    ; CHECK: liveins: $w0, $w1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[SUB]](s32)
    ; CHECK-NEXT: %rot:_(s32) = G_ROTR [[COPY]], [[ZEXT]](s64)
    ; CHECK-NEXT: $w0 = COPY %rot(s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(s32) = COPY $w0
    %1:_(s32) = COPY $w1
    %rot:_(s32) = G_ROTL %0(s32), %1(s32)
    $w0 = COPY %rot(s32)
    RET_ReallyLR implicit $w0

...
---
name:            rotl_s64
alignment:       4
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1
    ; CHECK-LABEL: name: rotl_s64
    ; CHECK: liveins: $x0, $x1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
    ; CHECK-NEXT: %rot:_(s64) = G_ROTR [[COPY]], [[SUB]](s64)
    ; CHECK-NEXT: $x0 = COPY %rot(s64)
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %0:_(s64) = COPY $x0
    %1:_(s64) = COPY $x1
    %rot:_(s64) = G_ROTL %0(s64), %1(s64)
    $x0 = COPY %rot(s64)
    RET_ReallyLR implicit $x0

...
---
name:            test_rotl_v4s32
alignment:       4
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: test_rotl_v4s32
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY1]]
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[BUILD_VECTOR1]]
    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[AND]](<4 x s32>)
    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[SUB]], [[BUILD_VECTOR1]]
    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[AND1]](<4 x s32>)
    ; CHECK-NEXT: %rot:_(<4 x s32>) = G_OR [[SHL]], [[LSHR]]
    ; CHECK-NEXT: $q0 = COPY %rot(<4 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(<4 x s32>) = COPY $q1
    %rot:_(<4 x s32>) = G_ROTL %0(<4 x s32>), %1(<4 x s32>)
    $q0 = COPY %rot(<4 x s32>)
    RET_ReallyLR implicit $q0

...
---
name:            test_rotr_v4s32
alignment:       4
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: test_rotr_v4s32
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY1]]
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[BUILD_VECTOR1]]
    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[AND]](<4 x s32>)
    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[SUB]], [[BUILD_VECTOR1]]
    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[AND1]](<4 x s32>)
    ; CHECK-NEXT: %rot:_(<4 x s32>) = G_OR [[LSHR]], [[SHL]]
    ; CHECK-NEXT: $q0 = COPY %rot(<4 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(<4 x s32>) = COPY $q1
    %rot:_(<4 x s32>) = G_ROTR %0(<4 x s32>), %1(<4 x s32>)
    $q0 = COPY %rot(<4 x s32>)
    RET_ReallyLR implicit $q0

...