llvm/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s
#
# Verify register banks for intrinsics with known constraints. (E.g. all
# operands must be FPRs.
#

...
---
name:            uaddlv_fpr
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $q0

    ; CHECK-LABEL: name: uaddlv_fpr
    ; CHECK: liveins: $q0
    ; CHECK: %copy:fpr(<16 x s8>) = COPY $q0
    ; CHECK: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<16 x s8>)
    ; CHECK: $w0 = COPY %intrin(s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %copy:_(<16 x s8>) = COPY $q0
    %intrin:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<16 x s8>)
    $w0 = COPY %intrin(s32)
    RET_ReallyLR implicit $w0

...
---
name:            uaddlv_fpr_load
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: uaddlv_fpr_load
    ; CHECK: liveins: $x0
    ; CHECK: %ptr:gpr(p0) = COPY $x0
    ; CHECK: %load:fpr(<2 x s32>) = G_LOAD %ptr(p0) :: (load (<2 x s32>))
    ; CHECK: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %load(<2 x s32>)
    ; CHECK: $w0 = COPY %intrin(s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %ptr:_(p0) = COPY $x0
    %load:_(<2 x s32>) = G_LOAD %ptr :: (load (<2 x s32>))
    %intrin:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %load(<2 x s32>)
    $w0 = COPY %intrin(s32)
    RET_ReallyLR implicit $w0

...
---
name:            uaddlv_fpr_store
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1
    ; CHECK-LABEL: name: uaddlv_fpr_store
    ; CHECK: liveins: $x0, $x1
    ; CHECK: %copy:gpr(<2 x s32>) = COPY $x0
    ; CHECK: %ptr:gpr(p0) = COPY $x0
    ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY %copy(<2 x s32>)
    ; CHECK: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[COPY]](<2 x s32>)
    ; CHECK: G_STORE %intrin(s32), %ptr(p0) :: (store (s32))
    %copy:_(<2 x s32>) = COPY $x0
    %ptr:_(p0) = COPY $x0
    %intrin:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<2 x s32>)
    G_STORE %intrin, %ptr :: (store (s32))