llvm/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc %s -mtriple=riscv64 -mattr=v -riscv-v-vector-bits-min=128 -run-pass=finalize-isel -o - | FileCheck %s
# RUN: llc %s -mtriple=riscv64 -mattr=v -riscv-v-vector-bits-min=128 -passes=finalize-isel -o - | FileCheck %s

# This test makes sure we peak through the COPY instruction between the
# IMPLICIT_DEF and PseudoVLE64_V_M8_MASK in order to select the tail agnostic
# policy. The test is working if the second argument to PseudoVSETVLI has bit 6
# set.

--- |
  ; ModuleID = 'test.ll'
  source_filename = "test.ll"
  target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
  target triple = "riscv64"

  ; Function Attrs: nounwind
  define <vscale x 8 x i64> @masked_load_nxv8i64(ptr %a, <vscale x 8 x i1> %mask) #0 {
    %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
    ret <vscale x 8 x i64> %load
  }

  ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn
  declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i64>) #1

  attributes #0 = { nounwind "target-features"="+v" }
  attributes #1 = { argmemonly nofree nosync nounwind readonly willreturn "target-features"="+v" }

...
---
name:            masked_load_nxv8i64
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: vr }
  - { id: 2, class: vrm8nov0 }
  - { id: 3, class: vrm8 }
  - { id: 4, class: vrm8nov0 }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
  - { reg: '$v0', virtual-reg: '%1' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0 (%ir-block.0):
    liveins: $x10, $v0

    ; CHECK-LABEL: name: masked_load_nxv8i64
    ; CHECK: liveins: $x10, $v0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: $v0 = COPY [[COPY]]
    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
    ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 /* e64 */, 1 /* ta, mu */ :: (load (s512) from %ir.a, align 8)
    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
    ; CHECK-NEXT: PseudoRET implicit $v8m8
    %1:vr = COPY $v0
    %0:gpr = COPY $x10
    $v0 = COPY %1
    %3:vrm8 = IMPLICIT_DEF
    %4:vrm8nov0 = COPY %3
    %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 6, 1 :: (load (s512) from %ir.a, align 8)
    $v8m8 = COPY %2
    PseudoRET implicit $v8m8

...