llvm/llvm/test/CodeGen/AArch64/sme2-intrinsics-fp-dots.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -force-streaming -verify-machineinstrs < %s | FileCheck %s

target triple="aarch64-linux-gnu"


; == Multi, multi (16-bit float) ==

define void @fdot_multi_za32_f16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3) #0 {
; CHECK-LABEL: fdot_multi_za32_f16_vg1x2:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z5.d, z4.d
; CHECK-NEXT:    mov z7.d, z2.d
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    mov z4.d, z3.d
; CHECK-NEXT:    mov z6.d, z1.d
; CHECK-NEXT:    fdot za.s[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h }
; CHECK-NEXT:    fdot za.s[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3)
  ret void
}

define void @fdot_multi_za32_f16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
; CHECK-LABEL: fdot_multi_za32_f16_vg1x4:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z26.d, z7.d
; CHECK-NEXT:    mov z31.d, z4.d
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    mov z25.d, z6.d
; CHECK-NEXT:    mov z30.d, z3.d
; CHECK-NEXT:    mov z24.d, z5.d
; CHECK-NEXT:    mov z29.d, z2.d
; CHECK-NEXT:    ld1h { z27.h }, p0/z, [x1]
; CHECK-NEXT:    mov z28.d, z1.d
; CHECK-NEXT:    fdot za.s[w8, 0, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
; CHECK-NEXT:    fdot za.s[w8, 7, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
; CHECK-NEXT:    ret
                                        <vscale x 8 x half> %zn4, <vscale x 8 x half> %zn5, <vscale x 8 x half> %zn6, <vscale x 8 x half> %zn7) #0 {
  call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                      <vscale x 8 x half> %zn4, <vscale x 8 x half> %zn5, <vscale x 8 x half> %zn6, <vscale x 8 x half> %zn7)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                      <vscale x 8 x half> %zn4, <vscale x 8 x half> %zn5, <vscale x 8 x half> %zn6, <vscale x 8 x half> %zn7)
  ret void
}


; == Multi, multi (16-bit bfloat) ==

define void @bfdot_multi_za32_bf16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3) #0 {
; CHECK-LABEL: bfdot_multi_za32_bf16_vg1x2:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z5.d, z4.d
; CHECK-NEXT:    mov z7.d, z2.d
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    mov z4.d, z3.d
; CHECK-NEXT:    mov z6.d, z1.d
; CHECK-NEXT:    bfdot za.s[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h }
; CHECK-NEXT:    bfdot za.s[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h }
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3)
  ret void
}

define void @fdot_multi_za32_bf16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
; CHECK-LABEL: fdot_multi_za32_bf16_vg1x4:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z26.d, z7.d
; CHECK-NEXT:    mov z31.d, z4.d
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    mov z25.d, z6.d
; CHECK-NEXT:    mov z30.d, z3.d
; CHECK-NEXT:    mov z24.d, z5.d
; CHECK-NEXT:    mov z29.d, z2.d
; CHECK-NEXT:    ld1h { z27.h }, p0/z, [x1]
; CHECK-NEXT:    mov z28.d, z1.d
; CHECK-NEXT:    bfdot za.s[w8, 0, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
; CHECK-NEXT:    bfdot za.s[w8, 7, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
; CHECK-NEXT:    ret
                                        <vscale x 8 x bfloat> %zn4, <vscale x 8 x bfloat> %zn5, <vscale x 8 x bfloat> %zn6, <vscale x 8 x bfloat> %zn7) #0 {
  call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                       <vscale x 8 x bfloat> %zn4, <vscale x 8 x bfloat> %zn5, <vscale x 8 x bfloat> %zn6, <vscale x 8 x bfloat> %zn7)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                        <vscale x 8 x bfloat> %zn4, <vscale x 8 x bfloat> %zn5, <vscale x 8 x bfloat> %zn6, <vscale x 8 x bfloat> %zn7)
  ret void
}


; == Multi, single (16-bit float) ==

define void @fdot_single_za32_f16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2) #0 {
; CHECK-LABEL: fdot_single_za32_f16_vg1x2:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2
; CHECK-NEXT:    fdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h
; CHECK-NEXT:    fdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2)
  ret void
}

define void @fdot_single_za32_f16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) #0 {
; CHECK-LABEL: fdot_single_za32_f16_vg1x4:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4
; CHECK-NEXT:    fdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h
; CHECK-NEXT:    fdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
  ret void
}


; == Multi, single (16-bit bfloat) ==

define void @bfdot_single_za32_bf16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2) #0 {
; CHECK-LABEL: bfdot_single_za32_bf16_vg1x2:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2
; CHECK-NEXT:    bfdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h
; CHECK-NEXT:    bfdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2)
  ret void
}

define void @bfdot_single_za32_bf16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) #0 {
; CHECK-LABEL: bfdot_single_za32_bf16_vg1x4:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4
; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4
; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4
; CHECK-NEXT:    bfdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h
; CHECK-NEXT:    bfdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
  ret void
}


; == Multi, indexed (16-bit float) ==

define void @fdot_lane_za32_f16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2) #0 {
; CHECK-LABEL: fdot_lane_za32_f16_vg1x2:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z5.d, z2.d
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    mov z4.d, z1.d
; CHECK-NEXT:    fdot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3]
; CHECK-NEXT:    fdot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, i32 3)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, i32 3)
  ret void
}

define void @fdot_lane_za32_f16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) #0 {
; CHECK-LABEL: fdot_lane_za32_f16_vg1x4:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z27.d, z4.d
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    mov z26.d, z3.d
; CHECK-NEXT:    mov z25.d, z2.d
; CHECK-NEXT:    mov z24.d, z1.d
; CHECK-NEXT:    fdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z5.h[3]
; CHECK-NEXT:    fdot za.s[w8, 7, vgx4], { z24.h - z27.h }, z5.h[3]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                           <vscale x 8 x half> %zn4, i32 3)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
                                                           <vscale x 8 x half> %zn4, i32 3)
  ret void
}


; == Multi, indexed (16-bit bfloat) ==

define void @bfdot_lane_za32_bf16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2) #0 {
; CHECK-LABEL: bfdot_lane_za32_bf16_vg1x2:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z5.d, z2.d
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    mov z4.d, z1.d
; CHECK-NEXT:    bfdot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3]
; CHECK-NEXT:    bfdot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, i32 3)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, i32 3)
  ret void
}

define void @bfdot_lane_za32_bf16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) #0 {
; CHECK-LABEL: bfdot_lane_za32_bf16_vg1x4:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z27.d, z4.d
; CHECK-NEXT:    mov w8, w0
; CHECK-NEXT:    mov z26.d, z3.d
; CHECK-NEXT:    mov z25.d, z2.d
; CHECK-NEXT:    mov z24.d, z1.d
; CHECK-NEXT:    bfdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z5.h[3]
; CHECK-NEXT:    bfdot za.s[w8, 7, vgx4], { z24.h - z27.h }, z5.h[3]
; CHECK-NEXT:    ret
  call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                             <vscale x 8 x bfloat> %zn4, i32 3)
  %slice2 = add i32 %slice, 7
  call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
                                                             <vscale x 8 x bfloat> %zn4, i32 3)
  ret void
}


attributes #0 = { nounwind "target-features"="+sme2" }


; == Multi, multi (16-bit float)

declare void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>,
                                                       <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)

; == Multi, multi (16-bit bfloat)

declare void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>,
                                                         <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)

; == Multi, single (16-bit float)

declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)

; == Multi, single (16-bit bfloat)

declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)

; == Multi, indexed (16-bit float)

declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)

; == Multi, indexed (16-bit bfloat)

declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)