; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
declare <16 x i8> @llvm.loongarch.lsx.vseq.b(<16 x i8>, <16 x i8>)
define <16 x i8> @lsx_vseq_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
; CHECK-LABEL: lsx_vseq_b:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vseq.b $vr0, $vr0, $vr1
; CHECK-NEXT: ret
entry:
%res = call <16 x i8> @llvm.loongarch.lsx.vseq.b(<16 x i8> %va, <16 x i8> %vb)
ret <16 x i8> %res
}
declare <8 x i16> @llvm.loongarch.lsx.vseq.h(<8 x i16>, <8 x i16>)
define <8 x i16> @lsx_vseq_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
; CHECK-LABEL: lsx_vseq_h:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vseq.h $vr0, $vr0, $vr1
; CHECK-NEXT: ret
entry:
%res = call <8 x i16> @llvm.loongarch.lsx.vseq.h(<8 x i16> %va, <8 x i16> %vb)
ret <8 x i16> %res
}
declare <4 x i32> @llvm.loongarch.lsx.vseq.w(<4 x i32>, <4 x i32>)
define <4 x i32> @lsx_vseq_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
; CHECK-LABEL: lsx_vseq_w:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vseq.w $vr0, $vr0, $vr1
; CHECK-NEXT: ret
entry:
%res = call <4 x i32> @llvm.loongarch.lsx.vseq.w(<4 x i32> %va, <4 x i32> %vb)
ret <4 x i32> %res
}
declare <2 x i64> @llvm.loongarch.lsx.vseq.d(<2 x i64>, <2 x i64>)
define <2 x i64> @lsx_vseq_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
; CHECK-LABEL: lsx_vseq_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vseq.d $vr0, $vr0, $vr1
; CHECK-NEXT: ret
entry:
%res = call <2 x i64> @llvm.loongarch.lsx.vseq.d(<2 x i64> %va, <2 x i64> %vb)
ret <2 x i64> %res
}
declare <16 x i8> @llvm.loongarch.lsx.vseqi.b(<16 x i8>, i32)
define <16 x i8> @lsx_vseqi_b(<16 x i8> %va) nounwind {
; CHECK-LABEL: lsx_vseqi_b:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vseqi.b $vr0, $vr0, 15
; CHECK-NEXT: ret
entry:
%res = call <16 x i8> @llvm.loongarch.lsx.vseqi.b(<16 x i8> %va, i32 15)
ret <16 x i8> %res
}
declare <8 x i16> @llvm.loongarch.lsx.vseqi.h(<8 x i16>, i32)
define <8 x i16> @lsx_vseqi_h(<8 x i16> %va) nounwind {
; CHECK-LABEL: lsx_vseqi_h:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vseqi.h $vr0, $vr0, 15
; CHECK-NEXT: ret
entry:
%res = call <8 x i16> @llvm.loongarch.lsx.vseqi.h(<8 x i16> %va, i32 15)
ret <8 x i16> %res
}
declare <4 x i32> @llvm.loongarch.lsx.vseqi.w(<4 x i32>, i32)
define <4 x i32> @lsx_vseqi_w(<4 x i32> %va) nounwind {
; CHECK-LABEL: lsx_vseqi_w:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vseqi.w $vr0, $vr0, -16
; CHECK-NEXT: ret
entry:
%res = call <4 x i32> @llvm.loongarch.lsx.vseqi.w(<4 x i32> %va, i32 -16)
ret <4 x i32> %res
}
declare <2 x i64> @llvm.loongarch.lsx.vseqi.d(<2 x i64>, i32)
define <2 x i64> @lsx_vseqi_d(<2 x i64> %va) nounwind {
; CHECK-LABEL: lsx_vseqi_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vseqi.d $vr0, $vr0, -16
; CHECK-NEXT: ret
entry:
%res = call <2 x i64> @llvm.loongarch.lsx.vseqi.d(<2 x i64> %va, i32 -16)
ret <2 x i64> %res
}