llvm/llvm/test/CodeGen/AArch64/sme2-intrinsics-mlals.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -mattr=+bf16 -force-streaming -verify-machineinstrs < %s | FileCheck %s

;
; BF/F/S/UMLAL x1 (SINGLE)
;

define void @multi_vector_add_single_vg2x1_bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x1_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    bfmlal za.s[w8, 0:1], z0.h, z1.h
; CHECK-NEXT:    bfmlal za.s[w8, 14:15], z0.h, z1.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8bf16(i32 %slice.14, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
  ret void
}

define void @multi_vector_add_single_vg2x1_f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x1_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    fmlal za.s[w8, 0:1], z0.h, z1.h
; CHECK-NEXT:    fmlal za.s[w8, 14:15], z0.h, z1.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8f16(i32 %slice.14, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
  ret void
}

define void @multi_vector_add_single_vg2x1_s16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x1_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    smlal za.s[w8, 0:1], z0.h, z1.h
; CHECK-NEXT:    smlal za.s[w8, 14:15], z0.h, z1.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlal.single.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.smlal.single.vg2x1.nxv8i16(i32 %slice.14, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
  ret void
}

define void @multi_vector_add_single_vg2x1_u16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x1_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    umlal za.s[w8, 0:1], z0.h, z1.h
; CHECK-NEXT:    umlal za.s[w8, 14:15], z0.h, z1.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlal.single.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.umlal.single.vg2x1.nxv8i16(i32 %slice.14, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
  ret void
}

;
; BF/F/S/UMLSL x1 (SINGLE)
;

define void @multi_vector_sub_single_vg2x1_bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x1_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    bfmlsl za.s[w8, 0:1], z0.h, z1.h
; CHECK-NEXT:    bfmlsl za.s[w8, 14:15], z0.h, z1.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.single.vg2x1.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.fmlsl.single.vg2x1.nxv8bf16(i32 %slice.14, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
  ret void
}

define void @multi_vector_sub_single_vg2x1_f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x1_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    fmlsl za.s[w8, 0:1], z0.h, z1.h
; CHECK-NEXT:    fmlsl za.s[w8, 14:15], z0.h, z1.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.single.vg2x1.nxv8f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.fmlsl.single.vg2x1.nxv8f16(i32 %slice.14, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
  ret void
}

define void @multi_vector_sub_single_vg2x1_s16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x1_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    smlsl za.s[w8, 0:1], z0.h, z1.h
; CHECK-NEXT:    smlsl za.s[w8, 14:15], z0.h, z1.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlsl.single.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.smlsl.single.vg2x1.nxv8i16(i32 %slice.14, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
  ret void
}

define void @multi_vector_sub_single_vg2x1_u16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x1_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    umlsl za.s[w8, 0:1], z0.h, z1.h
; CHECK-NEXT:    umlsl za.s[w8, 14:15], z0.h, z1.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlsl.single.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.umlsl.single.vg2x1.nxv8i16(i32 %slice.14, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
  ret void
}

;
; BF/F/S/UMLAL x2 (SINGLE)
;

define void @multi_vector_add_single_vg2x2_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x2_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    bfmlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    bfmlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.single.vg2x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.single.vg2x2.nxv8bf16(i32 %slice.6, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm)
  ret void
}

define void @multi_vector_add_single_vg2x2_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x2_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    fmlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    fmlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.single.vg2x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.single.vg2x2.nxv8f16(i32 %slice.6, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm)
  ret void
}

define void @multi_vector_add_single_vg2x2_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x2_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    smlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    smlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlal.single.vg2x2.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlal.single.vg2x2.nxv8i16(i32 %slice.6, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm)
  ret void
}

define void @multi_vector_add_single_vg2x2_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x2_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    umlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    umlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlal.single.vg2x2.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlal.single.vg2x2.nxv8i16(i32 %slice.6, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm)
  ret void
}

;
; BF/F/S/UMLSL x2 (SINGLE)
;

define void @multi_vector_sub_single_vg2x2_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x2_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    bfmlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    bfmlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.single.vg2x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.single.vg2x2.nxv8bf16(i32 %slice.6, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm)
  ret void
}

define void @multi_vector_sub_single_vg2x2_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x2_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    fmlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    fmlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.single.vg2x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.single.vg2x2.nxv8f16(i32 %slice.6, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm)
  ret void
}

define void @multi_vector_sub_single_vg2x2_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x2_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    smlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    smlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlsl.single.vg2x2.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlsl.single.vg2x2.nxv8i16(i32 %slice.6, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm)
  ret void
}

define void @multi_vector_sub_single_vg2x2_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x2_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    umlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    umlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlsl.single.vg2x2.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlsl.single.vg2x2.nxv8i16(i32 %slice.6, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm)
  ret void
}

;
; BF/F/S/UMLAL x4 (SINGLE)
;

define void @multi_vector_add_single_vg2x4_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x4_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    bfmlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    bfmlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.single.vg2x4.nxv8bf16(i32 %slice,
                                                            <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                            <vscale x 8 x bfloat> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.single.vg2x4.nxv8bf16(i32 %slice.6,
                                                            <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                            <vscale x 8 x bfloat> %zm)
  ret void
}

define void @multi_vector_add_single_vg2x4_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x4_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov z3.d, z2.d
; CHECK-NEXT:    fmlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    fmlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.single.vg2x4.nxv8f16(i32 %slice,
                                                         <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn2,
                                                         <vscale x 8 x half> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.single.vg2x4.nxv8f16(i32 %slice.6,
                                                         <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn2,
                                                         <vscale x 8 x half> %zm)
  ret void
}

define void @multi_vector_add_single_vg2x4_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x4_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    smlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    smlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlal.single.vg2x4.nxv8i16(i32 %slice,
                                                         <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                         <vscale x 8 x i16> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlal.single.vg2x4.nxv8i16(i32 %slice.6,
                                                         <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                         <vscale x 8 x i16> %zm)
  ret void
}

define void @multi_vector_add_single_vg2x4_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_single_vg2x4_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    umlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    umlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlal.single.vg2x4.nxv8i16(i32 %slice,
                                                         <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                         <vscale x 8 x i16> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlal.single.vg2x4.nxv8i16(i32 %slice.6,
                                                         <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                         <vscale x 8 x i16> %zm)
  ret void
}

;
; BF/F/S/UMLSL x4 (SINGLE)
;

define void @multi_vector_sub_single_vg2x4_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x4_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    bfmlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    bfmlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.single.vg2x4.nxv8bf16(i32 %slice,
                                                           <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                           <vscale x 8 x bfloat> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.single.vg2x4.nxv8bf16(i32 %slice.6,
                                                           <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                           <vscale x 8 x bfloat> %zm)
  ret void
}

define void @multi_vector_sub_single_vg2x4_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x4_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    fmlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    fmlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.single.vg2x4.nxv8f16(i32 %slice,
                                                         <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                         <vscale x 8 x half> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.single.vg2x4.nxv8f16(i32 %slice.6,
                                                         <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                         <vscale x 8 x half> %zm)
  ret void
}

define void @multi_vector_sub_single_vg2x4_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x4_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    smlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    smlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlsl.single.vg2x4.nxv8i16(i32 %slice,
                                                         <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                         <vscale x 8 x i16> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlsl.single.vg2x4.nxv8i16(i32 %slice.6,
                                                         <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                         <vscale x 8 x i16> %zm)
  ret void
}

define void @multi_vector_sub_single_vg2x4_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_single_vg2x4_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    umlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    umlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlsl.single.vg2x4.nxv8i16(i32 %slice,
                                                         <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                         <vscale x 8 x i16> %zm)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlsl.single.vg2x4.nxv8i16(i32 %slice.6,
                                                         <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                         <vscale x 8 x i16> %zm)
  ret void
}

;
; BF/F/S/UMLAL x2 (MULTI)
;

define void @multi_vector_add_multi_vg2x2_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm0,  <vscale x 8 x bfloat> %zm1) {
; CHECK-LABEL: multi_vector_add_multi_vg2x2_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    bfmlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    bfmlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.vg2x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
                                                               <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.vg2x2.nxv8bf16(i32 %slice.6, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
                                                                 <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1)
  ret void
}

define void @multi_vector_add_multi_vg2x2_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1) {
; CHECK-LABEL: multi_vector_add_multi_vg2x2_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    fmlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    fmlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.vg2x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
                                                              <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.vg2x2.nxv8f16(i32 %slice.6, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
                                                                <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1)
  ret void
}

define void @multi_vector_add_multi_vg2x2_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1) {
; CHECK-LABEL: multi_vector_add_multi_vg2x2_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    smlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    smlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlal.vg2x2.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1,
                                                              <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlal.vg2x2.nxv8i16(i32 %slice.6, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1,
                                                                <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1)
  ret void
}

define void @multi_vector_add_multi_vg2x2_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1) {
; CHECK-LABEL: multi_vector_add_multi_vg2x2_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    umlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    umlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlal.vg2x2.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1,
                                                              <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlal.vg2x2.nxv8i16(i32 %slice.6, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1,
                                                                <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1)
  ret void
}

;
; BF/F/S/UMLSL x2 (MULTI)
;

define void @multi_vector_sub_multi_vg2x2_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1) {
; CHECK-LABEL: multi_vector_sub_multi_vg2x2_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    bfmlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    bfmlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.vg2x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
                                                               <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.vg2x2.nxv8bf16(i32 %slice.6, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
                                                                 <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1)
  ret void
}

define void @multi_vector_sub_multi_vg2x2_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1) {
; CHECK-LABEL: multi_vector_sub_multi_vg2x2_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    fmlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    fmlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.vg2x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
                                                              <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.vg2x2.nxv8f16(i32 %slice.6, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
                                                                <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1)
  ret void
}

define void @multi_vector_sub_multi_vg2x2_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1) {
; CHECK-LABEL: multi_vector_sub_multi_vg2x2_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    smlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    smlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlsl.vg2x2.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1,
                                                              <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlsl.vg2x2.nxv8i16(i32 %slice.6, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1,
                                                                <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1)
  ret void
}

define void @multi_vector_sub_multi_vg2x2_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1) {
; CHECK-LABEL: multi_vector_sub_multi_vg2x2_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    umlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    umlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, { z2.h, z3.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlsl.vg2x2.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1,
                                                              <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlsl.vg2x2.nxv8i16(i32 %slice.6, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1,
                                                                <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1)
  ret void
}

;
; BF/F/S/UMLAL x4 (MULTI)
;

define void @multi_vector_add_multi_vg2x4_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
; CHECK-LABEL: multi_vector_add_multi_vg2x4_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    bfmlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    bfmlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    ret
                                               <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1, <vscale x 8 x bfloat> %zm2, <vscale x 8 x bfloat> %zm3) {
  call void @llvm.aarch64.sme.fmlal.vg2x4.nxv8bf16(i32 %slice,
                                                   <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                   <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1, <vscale x 8 x bfloat> %zm2, <vscale x 8 x bfloat> %zm3)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.vg2x4.nxv8bf16(i32 %slice.6,
                                                   <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                   <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1, <vscale x 8 x bfloat> %zm2, <vscale x 8 x bfloat> %zm3)
  ret void
}

define void @multi_vector_add_multi_vg2x4_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
; CHECK-LABEL: multi_vector_add_multi_vg2x4_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    fmlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    fmlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    ret
                                              <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3) {
  call void @llvm.aarch64.sme.fmlal.vg2x4.nxv8f16(i32 %slice,
                                                  <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                  <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.vg2x4.nxv8f16(i32 %slice.6,
                                                  <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                  <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3)
  ret void
}

define void @multi_vector_add_multi_vg2x4_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
; CHECK-LABEL: multi_vector_add_multi_vg2x4_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    smlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    smlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    ret
                                              <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3) {
  call void @llvm.aarch64.sme.smlal.vg2x4.nxv8i16(i32 %slice,
                                                  <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                  <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlal.vg2x4.nxv8i16(i32 %slice.6,
                                                  <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                  <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3)
  ret void
}

define void @multi_vector_add_multi_vg2x4_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
; CHECK-LABEL: multi_vector_add_multi_vg2x4_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    umlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    umlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    ret
                                              <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3) {
  call void @llvm.aarch64.sme.umlal.vg2x4.nxv8i16(i32 %slice,
                                                  <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                  <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlal.vg2x4.nxv8i16(i32 %slice.6,
                                                  <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                  <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3)
  ret void
}

;
; BF/F/S/UMLSL x4 (MULTI)
;

define void @multi_vector_sub_multi_vg2x4_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
; CHECK-LABEL: multi_vector_sub_multi_vg2x4_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    bfmlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    bfmlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    ret
                                               <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1, <vscale x 8 x bfloat> %zm2, <vscale x 8 x bfloat> %zm3) {
  call void @llvm.aarch64.sme.fmlsl.vg2x4.nxv8bf16(i32 %slice,
                                                   <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                   <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1, <vscale x 8 x bfloat> %zm2, <vscale x 8 x bfloat> %zm3)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.vg2x4.nxv8bf16(i32 %slice.6,
                                                   <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                   <vscale x 8 x bfloat> %zm0, <vscale x 8 x bfloat> %zm1, <vscale x 8 x bfloat> %zm2, <vscale x 8 x bfloat> %zm3)
  ret void
}

define void @multi_vector_sub_multi_vg2x4_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
; CHECK-LABEL: multi_vector_sub_multi_vg2x4_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    fmlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    fmlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    ret
                                              <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3) {
  call void @llvm.aarch64.sme.fmlsl.vg2x4.nxv8f16(i32 %slice,
                                                  <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                  <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.vg2x4.nxv8f16(i32 %slice.6,
                                                  <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                  <vscale x 8 x half> %zm0, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3)
  ret void
}

define void @multi_vector_sub_multi_vg2x4_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
; CHECK-LABEL: multi_vector_sub_multi_vg2x4_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    smlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    smlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    ret
                                              <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3) {
  call void @llvm.aarch64.sme.smlsl.vg2x4.nxv8i16(i32 %slice,
                                                  <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                  <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlsl.vg2x4.nxv8i16(i32 %slice.6,
                                                  <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                  <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3)
  ret void
}

define void @multi_vector_sub_multi_vg2x4_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
; CHECK-LABEL: multi_vector_sub_multi_vg2x4_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    umlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    umlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
; CHECK-NEXT:    ret
                                              <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3) {
  call void @llvm.aarch64.sme.umlsl.vg2x4.nxv8i16(i32 %slice,
                                                  <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                  <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlsl.vg2x4.nxv8i16(i32 %slice.6,
                                                  <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                  <vscale x 8 x i16> %zm0, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2, <vscale x 8 x i16> %zm3)
  ret void
}

;
; BF/F/S/UMLAL x1 (INDEXED)
;

define void @multi_vector_add_lane_vg2x1_f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x1_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    fmlal za.s[w8, 0:1], z0.h, z1.h[0]
; CHECK-NEXT:    fmlal za.s[w8, 14:15], z0.h, z1.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.lane.vg2x1.nxv8f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm, i32 0)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.fmlal.lane.vg2x1.nxv8f16(i32 %slice.14, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm, i32 7)
  ret void
}

define void @multi_vector_add_lane_vg2x1_bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x1_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    bfmlal za.s[w8, 0:1], z0.h, z1.h[0]
; CHECK-NEXT:    bfmlal za.s[w8, 14:15], z0.h, z1.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.lane.vg2x1.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm, i32 0)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.fmlal.lane.vg2x1.nxv8bf16(i32 %slice.14, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm, i32 7)
  ret void
}

define void @multi_vector_add_lane_vg2x1_s16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x1_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    smlal za.s[w8, 0:1], z0.h, z1.h[0]
; CHECK-NEXT:    smlal za.s[w8, 14:15], z0.h, z1.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlal.lane.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm, i32 0)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.smlal.lane.vg2x1.nxv8i16(i32 %slice.14, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm, i32 7)
  ret void
}

define void @multi_vector_add_lane_vg2x1_u16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x1_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    umlal za.s[w8, 0:1], z0.h, z1.h[0]
; CHECK-NEXT:    umlal za.s[w8, 14:15], z0.h, z1.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlal.lane.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm, i32 0)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.umlal.lane.vg2x1.nxv8i16(i32 %slice.14, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm, i32 7)
  ret void
}

;
; BF/F/S/UMLSL x1 (INDEXED)
;

define void @multi_vector_sub_lane_vg2x1_f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x1_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    fmlsl za.s[w8, 0:1], z0.h, z1.h[0]
; CHECK-NEXT:    fmlsl za.s[w8, 14:15], z0.h, z1.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x1.nxv8f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm, i32 0)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x1.nxv8f16(i32 %slice.14, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm, i32 7)
  ret void
}

define void @multi_vector_sub_lane_vg2x1_bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x1_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    bfmlsl za.s[w8, 0:1], z0.h, z1.h[0]
; CHECK-NEXT:    bfmlsl za.s[w8, 14:15], z0.h, z1.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x1.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm, i32 0)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x1.nxv8bf16(i32 %slice.14, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm, i32 7)
  ret void
}

define void @multi_vector_sub_lane_vg2x1_s16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x1_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    smlsl za.s[w8, 0:1], z0.h, z1.h[0]
; CHECK-NEXT:    smlsl za.s[w8, 14:15], z0.h, z1.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlsl.lane.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm, i32 0)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.smlsl.lane.vg2x1.nxv8i16(i32 %slice.14, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm, i32 7)
  ret void
}

define void @multi_vector_sub_lane_vg2x1_u16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x1_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    umlsl za.s[w8, 0:1], z0.h, z1.h[0]
; CHECK-NEXT:    umlsl za.s[w8, 14:15], z0.h, z1.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlsl.lane.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm, i32 0)
  %slice.14 = add i32 %slice, 14
  call void @llvm.aarch64.sme.umlsl.lane.vg2x1.nxv8i16(i32 %slice.14, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm, i32 7)
  ret void
}

;
; BF/F/S/UMLAL x2 (INDEXED)
;

define void @multi_vector_add_lane_vg2x2_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x2_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    fmlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h[0]
; CHECK-NEXT:    fmlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.lane.vg2x2.nxv8f16(i32 %slice,
                                                       <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.lane.vg2x2.nxv8f16(i32 %slice.6,
                                                       <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm, i32 7)
  ret void
}

define void @multi_vector_add_lane_vg2x2_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x2_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    bfmlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h[0]
; CHECK-NEXT:    bfmlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.lane.vg2x2.nxv8bf16(i32 %slice,
                                                         <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.lane.vg2x2.nxv8bf16(i32 %slice.6,
                                                         <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm, i32 7)
  ret void
}

define void @multi_vector_add_lane_vg2x2_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x2_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    smlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h[0]
; CHECK-NEXT:    smlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlal.lane.vg2x2.nxv8i16(i32 %slice,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlal.lane.vg2x2.nxv8i16(i32 %slice.6,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm, i32 7)
  ret void
}

define void @multi_vector_add_lane_vg2x2_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x2_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    umlal za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h[0]
; CHECK-NEXT:    umlal za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlal.lane.vg2x2.nxv8i16(i32 %slice,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlal.lane.vg2x2.nxv8i16(i32 %slice.6,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm, i32 7)
  ret void
}

;
; BF/F/S/UMLSL x2 (INDEXED)
;

define void @multi_vector_sub_lane_vg2x2_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x2_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    fmlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h[0]
; CHECK-NEXT:    fmlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x2.nxv8f16(i32 %slice,
                                                       <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x2.nxv8f16(i32 %slice.6,
                                                       <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zm, i32 7)
  ret void
}

define void @multi_vector_sub_lane_vg2x2_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x2_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    bfmlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h[0]
; CHECK-NEXT:    bfmlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x2.nxv8bf16(i32 %slice,
                                                         <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x2.nxv8bf16(i32 %slice.6,
                                                         <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zm, i32 7)
  ret void
}

define void @multi_vector_sub_lane_vg2x2_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x2_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    smlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h[0]
; CHECK-NEXT:    smlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlsl.lane.vg2x2.nxv8i16(i32 %slice,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlsl.lane.vg2x2.nxv8i16(i32 %slice.6,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm, i32 7)
  ret void
}

define void @multi_vector_sub_lane_vg2x2_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x2_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
; CHECK-NEXT:    umlsl za.s[w8, 0:1, vgx2], { z0.h, z1.h }, z2.h[0]
; CHECK-NEXT:    umlsl za.s[w8, 6:7, vgx2], { z0.h, z1.h }, z2.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlsl.lane.vg2x2.nxv8i16(i32 %slice,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlsl.lane.vg2x2.nxv8i16(i32 %slice.6,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zm, i32 7)
  ret void
}

;
; BF/F/S/UMLAL x4 (INDEXED)
;

define void @multi_vector_add_lane_vg2x4_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x4_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    fmlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT:    fmlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.lane.vg2x4.nxv8f16(i32 %slice,
                                                       <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                       <vscale x 8 x half> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.lane.vg2x4.nxv8f16(i32 %slice.6,
                                                       <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                       <vscale x 8 x half> %zm, i32 7)
  ret void
}

define void @multi_vector_add_lane_vg2x4_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x4_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    bfmlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT:    bfmlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlal.lane.vg2x4.nxv8bf16(i32 %slice,
                                                         <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                         <vscale x 8 x bfloat> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlal.lane.vg2x4.nxv8bf16(i32 %slice.6,
                                                         <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                         <vscale x 8 x bfloat> %zm, i32 7)
  ret void
}

define void @multi_vector_add_lane_vg2x4_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x4_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    smlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT:    smlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlal.lane.vg2x4.nxv8i16(i32 %slice,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                       <vscale x 8 x i16> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlal.lane.vg2x4.nxv8i16(i32 %slice.6,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                       <vscale x 8 x i16> %zm, i32 7)
  ret void
}

define void @multi_vector_add_lane_vg2x4_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_add_lane_vg2x4_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    umlal za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT:    umlal za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlal.lane.vg2x4.nxv8i16(i32 %slice,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                       <vscale x 8 x i16> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlal.lane.vg2x4.nxv8i16(i32 %slice.6,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                       <vscale x 8 x i16> %zm, i32 7)
  ret void
}

;
; BF/F/S/UMLSL x4 (INDEXED)
;

define void @multi_vector_sub_lane_vg2x4_f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x4_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    fmlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT:    fmlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x4.nxv8f16(i32 %slice,
                                                       <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                       <vscale x 8 x half> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x4.nxv8f16(i32 %slice.6,
                                                       <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                       <vscale x 8 x half> %zm, i32 7)
  ret void
}

define void @multi_vector_sub_lane_vg2x4_bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x4_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    bfmlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT:    bfmlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x4.nxv8bf16(i32 %slice,
                                                         <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                         <vscale x 8 x bfloat> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.fmlsl.lane.vg2x4.nxv8bf16(i32 %slice.6,
                                                         <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                         <vscale x 8 x bfloat> %zm, i32 7)
  ret void
}

define void @multi_vector_sub_lane_vg2x4_s16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x4_s16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    smlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT:    smlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.smlsl.lane.vg2x4.nxv8i16(i32 %slice,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                       <vscale x 8 x i16> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.smlsl.lane.vg2x4.nxv8i16(i32 %slice.6,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                       <vscale x 8 x i16> %zm, i32 7)
  ret void
}

define void @multi_vector_sub_lane_vg2x4_u16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zm) {
; CHECK-LABEL: multi_vector_sub_lane_vg2x4_u16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; CHECK-NEXT:    umlsl za.s[w8, 0:1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT:    umlsl za.s[w8, 6:7, vgx4], { z0.h - z3.h }, z4.h[7]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.umlsl.lane.vg2x4.nxv8i16(i32 %slice,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                       <vscale x 8 x i16> %zm, i32 0)
  %slice.6 = add i32 %slice, 6
  call void @llvm.aarch64.sme.umlsl.lane.vg2x4.nxv8i16(i32 %slice.6,
                                                       <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
                                                       <vscale x 8 x i16> %zm, i32 7)
  ret void
}

declare void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlal.single.vg2x1.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlal.single.vg2x1.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlsl.single.vg2x1.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlsl.single.vg2x1.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlsl.single.vg2x1.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlsl.single.vg2x1.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlal.single.vg2x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlal.single.vg2x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlal.single.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlal.single.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlsl.single.vg2x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlsl.single.vg2x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlsl.single.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlsl.single.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlal.single.vg2x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>,
                                                                 <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlal.single.vg2x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>,
                                                               <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlal.single.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>,
                                                               <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlal.single.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>,
                                                               <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlsl.single.vg2x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>,
                                                                 <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlsl.single.vg2x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>,
                                                               <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlsl.single.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>,
                                                               <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlsl.single.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>,
                                                               <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlal.vg2x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlal.vg2x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlal.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlal.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlsl.vg2x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlsl.vg2x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlsl.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlsl.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlal.vg2x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>,
                                                         <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlal.vg2x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>,
                                                        <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlal.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>,
                                                        <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlal.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>,
                                                        <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlsl.vg2x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>,
                                                         <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fmlsl.vg2x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>,
                                                        <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.smlsl.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>,
                                                        <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare void @llvm.aarch64.sme.umlsl.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>,
                                                        <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)

declare void @llvm.aarch64.sme.fmlal.lane.vg2x1.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)
declare void @llvm.aarch64.sme.fmlal.lane.vg2x1.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, i32)
declare void @llvm.aarch64.sme.smlal.lane.vg2x1.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
declare void @llvm.aarch64.sme.umlal.lane.vg2x1.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)

declare void @llvm.aarch64.sme.fmlsl.lane.vg2x1.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)
declare void @llvm.aarch64.sme.fmlsl.lane.vg2x1.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, i32)
declare void @llvm.aarch64.sme.smlsl.lane.vg2x1.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
declare void @llvm.aarch64.sme.umlsl.lane.vg2x1.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)

declare void @llvm.aarch64.sme.fmlal.lane.vg2x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)
declare void @llvm.aarch64.sme.fmlal.lane.vg2x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
declare void @llvm.aarch64.sme.smlal.lane.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
declare void @llvm.aarch64.sme.umlal.lane.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)

declare void @llvm.aarch64.sme.fmlsl.lane.vg2x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)
declare void @llvm.aarch64.sme.fmlsl.lane.vg2x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
declare void @llvm.aarch64.sme.smlsl.lane.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
declare void @llvm.aarch64.sme.umlsl.lane.vg2x2.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)

declare void @llvm.aarch64.sme.fmlal.lane.vg2x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)
declare void @llvm.aarch64.sme.fmlal.lane.vg2x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
declare void @llvm.aarch64.sme.smlal.lane.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
declare void @llvm.aarch64.sme.umlal.lane.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)

declare void @llvm.aarch64.sme.fmlsl.lane.vg2x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)
declare void @llvm.aarch64.sme.fmlsl.lane.vg2x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
declare void @llvm.aarch64.sme.smlsl.lane.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
declare void @llvm.aarch64.sme.umlsl.lane.vg2x4.nxv8i16(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)