llvm/llvm/test/CodeGen/LoongArch/lsx/intrinsic-exth.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s

declare <8 x i16> @llvm.loongarch.lsx.vexth.h.b(<16 x i8>)

define <8 x i16> @lsx_vexth_h_b(<16 x i8> %va) nounwind {
; CHECK-LABEL: lsx_vexth_h_b:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vexth.h.b $vr0, $vr0
; CHECK-NEXT:    ret
entry:
  %res = call <8 x i16> @llvm.loongarch.lsx.vexth.h.b(<16 x i8> %va)
  ret <8 x i16> %res
}

declare <4 x i32> @llvm.loongarch.lsx.vexth.w.h(<8 x i16>)

define <4 x i32> @lsx_vexth_w_h(<8 x i16> %va) nounwind {
; CHECK-LABEL: lsx_vexth_w_h:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vexth.w.h $vr0, $vr0
; CHECK-NEXT:    ret
entry:
  %res = call <4 x i32> @llvm.loongarch.lsx.vexth.w.h(<8 x i16> %va)
  ret <4 x i32> %res
}

declare <2 x i64> @llvm.loongarch.lsx.vexth.d.w(<4 x i32>)

define <2 x i64> @lsx_vexth_d_w(<4 x i32> %va) nounwind {
; CHECK-LABEL: lsx_vexth_d_w:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vexth.d.w $vr0, $vr0
; CHECK-NEXT:    ret
entry:
  %res = call <2 x i64> @llvm.loongarch.lsx.vexth.d.w(<4 x i32> %va)
  ret <2 x i64> %res
}

declare <2 x i64> @llvm.loongarch.lsx.vexth.q.d(<2 x i64>)

define <2 x i64> @lsx_vexth_q_d(<2 x i64> %va) nounwind {
; CHECK-LABEL: lsx_vexth_q_d:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vexth.q.d $vr0, $vr0
; CHECK-NEXT:    ret
entry:
  %res = call <2 x i64> @llvm.loongarch.lsx.vexth.q.d(<2 x i64> %va)
  ret <2 x i64> %res
}

declare <8 x i16> @llvm.loongarch.lsx.vexth.hu.bu(<16 x i8>)

define <8 x i16> @lsx_vexth_hu_bu(<16 x i8> %va) nounwind {
; CHECK-LABEL: lsx_vexth_hu_bu:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vexth.hu.bu $vr0, $vr0
; CHECK-NEXT:    ret
entry:
  %res = call <8 x i16> @llvm.loongarch.lsx.vexth.hu.bu(<16 x i8> %va)
  ret <8 x i16> %res
}

declare <4 x i32> @llvm.loongarch.lsx.vexth.wu.hu(<8 x i16>)

define <4 x i32> @lsx_vexth_wu_hu(<8 x i16> %va) nounwind {
; CHECK-LABEL: lsx_vexth_wu_hu:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vexth.wu.hu $vr0, $vr0
; CHECK-NEXT:    ret
entry:
  %res = call <4 x i32> @llvm.loongarch.lsx.vexth.wu.hu(<8 x i16> %va)
  ret <4 x i32> %res
}

declare <2 x i64> @llvm.loongarch.lsx.vexth.du.wu(<4 x i32>)

define <2 x i64> @lsx_vexth_du_wu(<4 x i32> %va) nounwind {
; CHECK-LABEL: lsx_vexth_du_wu:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vexth.du.wu $vr0, $vr0
; CHECK-NEXT:    ret
entry:
  %res = call <2 x i64> @llvm.loongarch.lsx.vexth.du.wu(<4 x i32> %va)
  ret <2 x i64> %res
}

declare <2 x i64> @llvm.loongarch.lsx.vexth.qu.du(<2 x i64>)

define <2 x i64> @lsx_vexth_qu_du(<2 x i64> %va) nounwind {
; CHECK-LABEL: lsx_vexth_qu_du:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vexth.qu.du $vr0, $vr0
; CHECK-NEXT:    ret
entry:
  %res = call <2 x i64> @llvm.loongarch.lsx.vexth.qu.du(<2 x i64> %va)
  ret <2 x i64> %res
}