llvm/llvm/test/CodeGen/AArch64/sme2p1-intrinsics-zero.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -verify-machineinstrs -force-streaming < %s | FileCheck %s

target triple = "aarch64-linux"

define  void @test_svzero_za64_vg1x2(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg1x2:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 0, vgx2]
; CHECK-NEXT:    ret
entry:
  tail call void @llvm.aarch64.sme.zero.za64.vg1x2(i32 %slice)
  ret void
}

define  void @test_svzero_za64_vg1x2_offset(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg1x2_offset:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 7, vgx2]
; CHECK-NEXT:    ret
entry:
  %slice.max = add i32 %slice, 7
  tail call void @llvm.aarch64.sme.zero.za64.vg1x2(i32 %slice.max)
  ret void
}

define  void @test_svzero_za64_vg1x4(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg1x4:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 0, vgx4]
; CHECK-NEXT:    ret
entry:
  tail call void @llvm.aarch64.sme.zero.za64.vg1x4(i32 %slice)
  ret void
}

define  void @test_svzero_za64_vg1x4_offset(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg1x4_offset:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 1, vgx4]
; CHECK-NEXT:    ret
entry:
  %slice.min = add i32 %slice, 1
  tail call void @llvm.aarch64.sme.zero.za64.vg1x4(i32 %slice.min)
  ret void
}

define  void @test_svzero_za64_vg2x1(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg2x1:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 0:1]
; CHECK-NEXT:    ret
entry:
  tail call void @llvm.aarch64.sme.zero.za64.vg2x1(i32 %slice)
  ret void
}

define  void @test_svzero_za64_vg2x1_offset(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg2x1_offset:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 6:7]
; CHECK-NEXT:    ret
entry:
  %slice.max = add i32 %slice, 6
  tail call void @llvm.aarch64.sme.zero.za64.vg2x1(i32 %slice.max)
  ret void
}

define  void @test_svzero_za64_vg2x2(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg2x2:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 0:1, vgx2]
; CHECK-NEXT:    ret
entry:
  tail call void @llvm.aarch64.sme.zero.za64.vg2x2(i32 %slice)
  ret void
}

define  void @test_svzero_za64_vg2x2_offset(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg2x2_offset:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 2:3, vgx2]
; CHECK-NEXT:    ret
entry:
  %slice.max = add i32 %slice, 2
  tail call void @llvm.aarch64.sme.zero.za64.vg2x2(i32 %slice.max)
  ret void
}

define  void @test_svzero_za64_vg2x4(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg2x4:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 0:1, vgx4]
; CHECK-NEXT:    ret
entry:
  tail call void @llvm.aarch64.sme.zero.za64.vg2x4(i32 %slice)
  ret void
}

define  void @test_svzero_za64_vg2x4_offset(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg2x4_offset:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    add w8, w0, #1
; CHECK-NEXT:    zero za.d[w8, 0:1, vgx4]
; CHECK-NEXT:    ret
entry:
  %slice.min = add i32 %slice, 1
  tail call void @llvm.aarch64.sme.zero.za64.vg2x4(i32 %slice.min)
  ret void
}

define  void @test_svzero_za64_vg4x1(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg4x1:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 0:3]
; CHECK-NEXT:    ret
entry:
  tail call void @llvm.aarch64.sme.zero.za64.vg4x1(i32 %slice)
  ret void
}

define  void @test_svzero_za64_vg4x1_offset(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg4x1_offset:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 4:7]
; CHECK-NEXT:    ret
entry:
  %slice.max = add i32 %slice, 4
  tail call void @llvm.aarch64.sme.zero.za64.vg4x1(i32 %slice.max)
  ret void
}

define  void @test_svzero_za64_vg4x2(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg4x2:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 0:3, vgx2]
; CHECK-NEXT:    ret
entry:
  tail call void @llvm.aarch64.sme.zero.za64.vg4x2(i32 %slice)
  ret void
}

define  void @test_svzero_za64_vg4x2_offset(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg4x2_offset:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 0:3, vgx2]
; CHECK-NEXT:    ret
entry:
  %slice.max = add i32 %slice, 0
  tail call void @llvm.aarch64.sme.zero.za64.vg4x2(i32 %slice.max)
  ret void
}

define  void @test_svzero_za64_vg4x4(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg4x4:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    zero za.d[w8, 0:3, vgx4]
; CHECK-NEXT:    ret
entry:
  tail call void @llvm.aarch64.sme.zero.za64.vg4x4(i32 %slice)
  ret void
}

define  void @test_svzero_za64_vg4x4_offset(i32  %slice)  #0 {
; CHECK-LABEL: test_svzero_za64_vg4x4_offset:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    add w8, w0, #1
; CHECK-NEXT:    zero za.d[w8, 0:3, vgx4]
; CHECK-NEXT:    ret
entry:
  %slice.min = add i32 %slice, 1
  tail call void @llvm.aarch64.sme.zero.za64.vg4x4(i32 %slice.min)
  ret void
}

attributes #0 = { nounwind "target-features" = "+sme2p1"}