llvm/llvm/test/CodeGen/LoongArch/lsx/intrinsic-slt.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s

declare <16 x i8> @llvm.loongarch.lsx.vslt.b(<16 x i8>, <16 x i8>)

define <16 x i8> @lsx_vslt_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
; CHECK-LABEL: lsx_vslt_b:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslt.b $vr0, $vr0, $vr1
; CHECK-NEXT:    ret
entry:
  %res = call <16 x i8> @llvm.loongarch.lsx.vslt.b(<16 x i8> %va, <16 x i8> %vb)
  ret <16 x i8> %res
}

declare <8 x i16> @llvm.loongarch.lsx.vslt.h(<8 x i16>, <8 x i16>)

define <8 x i16> @lsx_vslt_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
; CHECK-LABEL: lsx_vslt_h:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslt.h $vr0, $vr0, $vr1
; CHECK-NEXT:    ret
entry:
  %res = call <8 x i16> @llvm.loongarch.lsx.vslt.h(<8 x i16> %va, <8 x i16> %vb)
  ret <8 x i16> %res
}

declare <4 x i32> @llvm.loongarch.lsx.vslt.w(<4 x i32>, <4 x i32>)

define <4 x i32> @lsx_vslt_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
; CHECK-LABEL: lsx_vslt_w:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslt.w $vr0, $vr0, $vr1
; CHECK-NEXT:    ret
entry:
  %res = call <4 x i32> @llvm.loongarch.lsx.vslt.w(<4 x i32> %va, <4 x i32> %vb)
  ret <4 x i32> %res
}

declare <2 x i64> @llvm.loongarch.lsx.vslt.d(<2 x i64>, <2 x i64>)

define <2 x i64> @lsx_vslt_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
; CHECK-LABEL: lsx_vslt_d:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslt.d $vr0, $vr0, $vr1
; CHECK-NEXT:    ret
entry:
  %res = call <2 x i64> @llvm.loongarch.lsx.vslt.d(<2 x i64> %va, <2 x i64> %vb)
  ret <2 x i64> %res
}

declare <16 x i8> @llvm.loongarch.lsx.vslti.b(<16 x i8>, i32)

define <16 x i8> @lsx_vslti_b(<16 x i8> %va) nounwind {
; CHECK-LABEL: lsx_vslti_b:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslti.b $vr0, $vr0, 15
; CHECK-NEXT:    ret
entry:
  %res = call <16 x i8> @llvm.loongarch.lsx.vslti.b(<16 x i8> %va, i32 15)
  ret <16 x i8> %res
}

declare <8 x i16> @llvm.loongarch.lsx.vslti.h(<8 x i16>, i32)

define <8 x i16> @lsx_vslti_h(<8 x i16> %va) nounwind {
; CHECK-LABEL: lsx_vslti_h:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslti.h $vr0, $vr0, 15
; CHECK-NEXT:    ret
entry:
  %res = call <8 x i16> @llvm.loongarch.lsx.vslti.h(<8 x i16> %va, i32 15)
  ret <8 x i16> %res
}

declare <4 x i32> @llvm.loongarch.lsx.vslti.w(<4 x i32>, i32)

define <4 x i32> @lsx_vslti_w(<4 x i32> %va) nounwind {
; CHECK-LABEL: lsx_vslti_w:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslti.w $vr0, $vr0, -16
; CHECK-NEXT:    ret
entry:
  %res = call <4 x i32> @llvm.loongarch.lsx.vslti.w(<4 x i32> %va, i32 -16)
  ret <4 x i32> %res
}

declare <2 x i64> @llvm.loongarch.lsx.vslti.d(<2 x i64>, i32)

define <2 x i64> @lsx_vslti_d(<2 x i64> %va) nounwind {
; CHECK-LABEL: lsx_vslti_d:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslti.d $vr0, $vr0, -16
; CHECK-NEXT:    ret
entry:
  %res = call <2 x i64> @llvm.loongarch.lsx.vslti.d(<2 x i64> %va, i32 -16)
  ret <2 x i64> %res
}

declare <16 x i8> @llvm.loongarch.lsx.vslt.bu(<16 x i8>, <16 x i8>)

define <16 x i8> @lsx_vslt_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
; CHECK-LABEL: lsx_vslt_bu:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslt.bu $vr0, $vr0, $vr1
; CHECK-NEXT:    ret
entry:
  %res = call <16 x i8> @llvm.loongarch.lsx.vslt.bu(<16 x i8> %va, <16 x i8> %vb)
  ret <16 x i8> %res
}

declare <8 x i16> @llvm.loongarch.lsx.vslt.hu(<8 x i16>, <8 x i16>)

define <8 x i16> @lsx_vslt_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
; CHECK-LABEL: lsx_vslt_hu:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslt.hu $vr0, $vr0, $vr1
; CHECK-NEXT:    ret
entry:
  %res = call <8 x i16> @llvm.loongarch.lsx.vslt.hu(<8 x i16> %va, <8 x i16> %vb)
  ret <8 x i16> %res
}

declare <4 x i32> @llvm.loongarch.lsx.vslt.wu(<4 x i32>, <4 x i32>)

define <4 x i32> @lsx_vslt_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
; CHECK-LABEL: lsx_vslt_wu:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslt.wu $vr0, $vr0, $vr1
; CHECK-NEXT:    ret
entry:
  %res = call <4 x i32> @llvm.loongarch.lsx.vslt.wu(<4 x i32> %va, <4 x i32> %vb)
  ret <4 x i32> %res
}

declare <2 x i64> @llvm.loongarch.lsx.vslt.du(<2 x i64>, <2 x i64>)

define <2 x i64> @lsx_vslt_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
; CHECK-LABEL: lsx_vslt_du:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslt.du $vr0, $vr0, $vr1
; CHECK-NEXT:    ret
entry:
  %res = call <2 x i64> @llvm.loongarch.lsx.vslt.du(<2 x i64> %va, <2 x i64> %vb)
  ret <2 x i64> %res
}

declare <16 x i8> @llvm.loongarch.lsx.vslti.bu(<16 x i8>, i32)

define <16 x i8> @lsx_vslti_bu(<16 x i8> %va) nounwind {
; CHECK-LABEL: lsx_vslti_bu:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslti.bu $vr0, $vr0, 1
; CHECK-NEXT:    ret
entry:
  %res = call <16 x i8> @llvm.loongarch.lsx.vslti.bu(<16 x i8> %va, i32 1)
  ret <16 x i8> %res
}

declare <8 x i16> @llvm.loongarch.lsx.vslti.hu(<8 x i16>, i32)

define <8 x i16> @lsx_vslti_hu(<8 x i16> %va) nounwind {
; CHECK-LABEL: lsx_vslti_hu:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslti.hu $vr0, $vr0, 1
; CHECK-NEXT:    ret
entry:
  %res = call <8 x i16> @llvm.loongarch.lsx.vslti.hu(<8 x i16> %va, i32 1)
  ret <8 x i16> %res
}

declare <4 x i32> @llvm.loongarch.lsx.vslti.wu(<4 x i32>, i32)

define <4 x i32> @lsx_vslti_wu(<4 x i32> %va) nounwind {
; CHECK-LABEL: lsx_vslti_wu:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslti.wu $vr0, $vr0, 31
; CHECK-NEXT:    ret
entry:
  %res = call <4 x i32> @llvm.loongarch.lsx.vslti.wu(<4 x i32> %va, i32 31)
  ret <4 x i32> %res
}

declare <2 x i64> @llvm.loongarch.lsx.vslti.du(<2 x i64>, i32)

define <2 x i64> @lsx_vslti_du(<2 x i64> %va) nounwind {
; CHECK-LABEL: lsx_vslti_du:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vslti.du $vr0, $vr0, 31
; CHECK-NEXT:    ret
entry:
  %res = call <2 x i64> @llvm.loongarch.lsx.vslti.du(<2 x i64> %va, i32 31)
  ret <2 x i64> %res
}