llvm/llvm/test/CodeGen/AArch64/sme2-intrinsics-cvt.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme-f16f16 -force-streaming -verify-machineinstrs < %s | FileCheck %s

;
; FCVT
;

define <vscale x 8 x half> @multi_vector_cvt_x2_f16(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2) {
; CHECK-LABEL: multi_vector_cvt_x2_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z3.d, z2.d
; CHECK-NEXT:    mov z2.d, z1.d
; CHECK-NEXT:    fcvt z0.h, { z2.s, z3.s }
; CHECK-NEXT:    ret
  %res = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.x2.nxv4f32(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2)
  ret <vscale x 8 x half> %res
}

;
; BFCVT
;

define <vscale x 8 x bfloat> @multi_vector_cvt_x2_bf16(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2) {
; CHECK-LABEL: multi_vector_cvt_x2_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z3.d, z2.d
; CHECK-NEXT:    mov z2.d, z1.d
; CHECK-NEXT:    bfcvt z0.h, { z2.s, z3.s }
; CHECK-NEXT:    ret
  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.bfcvt.x2(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2)
  ret <vscale x 8 x bfloat> %res
}

;
; FCVTZS
;
define {<vscale x 4 x i32>, <vscale x 4 x i32>}  @multi_vector_cvt_x2_s32_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1) {
; CHECK-LABEL: multi_vector_cvt_x2_s32_f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z3.d, z2.d
; CHECK-NEXT:    mov z2.d, z1.d
; CHECK-NEXT:    fcvtzs { z0.s, z1.s }, { z2.s, z3.s }
; CHECK-NEXT:    ret
  %res = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sve.fcvtzs.x2.nxv4i32.nxv4f32(<vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1)
  ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %res
}

define {<vscale x 4 x i32>, <vscale x 4 x i32>,<vscale x 4 x i32>, <vscale x 4 x i32>}  @multi_vector_cvt_x4_s32_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3) {
; CHECK-LABEL: multi_vector_cvt_x4_s32_f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z7.d, z4.d
; CHECK-NEXT:    mov z6.d, z3.d
; CHECK-NEXT:    mov z5.d, z2.d
; CHECK-NEXT:    mov z4.d, z1.d
; CHECK-NEXT:    fcvtzs { z0.s - z3.s }, { z4.s - z7.s }
; CHECK-NEXT:    ret
  %res = call {<vscale x 4 x i32>, <vscale x 4 x i32>,<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sve.fcvtzs.x4.nxv4i32.nxv4f32(<vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3)
  ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %res
}

;
; FCVTZU
;
define {<vscale x 4 x i32>, <vscale x 4 x i32>}  @multi_vector_cvt_x2_u32_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1) {
; CHECK-LABEL: multi_vector_cvt_x2_u32_f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z3.d, z2.d
; CHECK-NEXT:    mov z2.d, z1.d
; CHECK-NEXT:    fcvtzu { z0.s, z1.s }, { z2.s, z3.s }
; CHECK-NEXT:    ret
  %res = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sve.fcvtzu.x2.nxv4i32.nxv4f32(<vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1)
  ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %res
}

define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>}  @multi_vector_cvt_x4_u32_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3) {
; CHECK-LABEL: multi_vector_cvt_x4_u32_f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z7.d, z4.d
; CHECK-NEXT:    mov z6.d, z3.d
; CHECK-NEXT:    mov z5.d, z2.d
; CHECK-NEXT:    mov z4.d, z1.d
; CHECK-NEXT:    fcvtzu { z0.s - z3.s }, { z4.s - z7.s }
; CHECK-NEXT:    ret
  %res = call {<vscale x 4 x i32>, <vscale x 4 x i32>,<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sve.fcvtzu.x4.nxv4i32.nxv4f32(<vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3)
  ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %res
}

;
; SCVTF
;
define {<vscale x 4 x float>, <vscale x 4 x float>}  @multi_vector_cvt_x2_f32_s32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1) {
; CHECK-LABEL: multi_vector_cvt_x2_f32_s32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z3.d, z2.d
; CHECK-NEXT:    mov z2.d, z1.d
; CHECK-NEXT:    scvtf { z0.s, z1.s }, { z2.s, z3.s }
; CHECK-NEXT:    ret
  %res = call {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sve.scvtf.x2.nxv4f32.nxv4i32(<vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1)
  ret {<vscale x 4 x float>, <vscale x 4 x float>} %res
}

define {<vscale x 4 x float>, <vscale x 4 x float>,<vscale x 4 x float>, <vscale x 4 x float>}  @multi_vector_cvt_x4_f32_s32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3) {
; CHECK-LABEL: multi_vector_cvt_x4_f32_s32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z7.d, z4.d
; CHECK-NEXT:    mov z6.d, z3.d
; CHECK-NEXT:    mov z5.d, z2.d
; CHECK-NEXT:    mov z4.d, z1.d
; CHECK-NEXT:    scvtf { z0.s - z3.s }, { z4.s - z7.s }
; CHECK-NEXT:    ret
  %res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sve.scvtf.x4.nxv4f32.nxv4i32(<vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3)
  ret {<vscale x 4 x float>, <vscale x 4 x float>,<vscale x 4 x float>, <vscale x 4 x float>} %res
}

;
; UCVTF
;
define {<vscale x 4 x float>, <vscale x 4 x float>}  @multi_vector_cvt_x2_f32_u32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1) {
; CHECK-LABEL: multi_vector_cvt_x2_f32_u32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z3.d, z2.d
; CHECK-NEXT:    mov z2.d, z1.d
; CHECK-NEXT:    ucvtf { z0.s, z1.s }, { z2.s, z3.s }
; CHECK-NEXT:    ret
  %res = call {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sve.ucvtf.x2.nxv4f32.nxv4i32(<vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1)
  ret {<vscale x 4 x float>, <vscale x 4 x float>} %res
}

define {<vscale x 4 x float>, <vscale x 4 x float>,<vscale x 4 x float>, <vscale x 4 x float>}  @multi_vector_cvt_x4_f32_u32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,<vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3) {
; CHECK-LABEL: multi_vector_cvt_x4_f32_u32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z7.d, z4.d
; CHECK-NEXT:    mov z6.d, z3.d
; CHECK-NEXT:    mov z5.d, z2.d
; CHECK-NEXT:    mov z4.d, z1.d
; CHECK-NEXT:    ucvtf { z0.s - z3.s }, { z4.s - z7.s }
; CHECK-NEXT:    ret
  %res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sve.ucvtf.x4.nxv4f32.nxv4i32(<vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3)
  ret {<vscale x 4 x float>, <vscale x 4 x float>,<vscale x 4 x float>, <vscale x 4 x float>} %res
}

define {<vscale x 4 x float>, <vscale x 4 x float>}  @multi_vector_cvt_widen_x2_f16(<vscale x 8 x half> %zn0) {
; CHECK-LABEL: multi_vector_cvt_widen_x2_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    fcvt { z0.s, z1.s }, z0.h
; CHECK-NEXT:    ret
  %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fcvt.widen.x2.nxv4f32(<vscale x 8 x half> %zn0)
  ret {<vscale x 4 x float>, <vscale x 4 x float>} %res
}

declare <vscale x 8 x half> @llvm.aarch64.sve.fcvt.x2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.bfcvt.x2(<vscale x 4 x float>, <vscale x 4 x float>)
declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sve.fcvtzs.x2.nxv4i32.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>)
declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sve.fcvtzu.x2.nxv4i32.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>)
declare {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sve.scvtf.x2.nxv4f32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>)
declare {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sve.ucvtf.x2.nxv4f32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>)
declare {<vscale x 4 x i32>, <vscale x 4 x i32>,<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sve.fcvtzs.x4.nxv4i32.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>)
declare {<vscale x 4 x i32>, <vscale x 4 x i32>,<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sve.fcvtzu.x4.nxv4i32.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>)
declare {<vscale x 4 x float>, <vscale x 4 x float>,<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sve.scvtf.x4.nxv4f32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare {<vscale x 4 x float>, <vscale x 4 x float>,<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sve.ucvtf.x4.nxv4f32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)