llvm/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/xor.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s

define void @xor_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: xor_v16i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vld $vr0, $a1, 0
; CHECK-NEXT:    vld $vr1, $a2, 0
; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
; CHECK-NEXT:    vst $vr0, $a0, 0
; CHECK-NEXT:    ret
entry:
  %v0 = load <16 x i8>, ptr %a0
  %v1 = load <16 x i8>, ptr %a1
  %v2 = xor <16 x i8> %v0, %v1
  store <16 x i8> %v2, ptr %res
  ret void
}

define void @xor_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: xor_v8i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vld $vr0, $a1, 0
; CHECK-NEXT:    vld $vr1, $a2, 0
; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
; CHECK-NEXT:    vst $vr0, $a0, 0
; CHECK-NEXT:    ret
entry:
  %v0 = load <8 x i16>, ptr %a0
  %v1 = load <8 x i16>, ptr %a1
  %v2 = xor <8 x i16> %v0, %v1
  store <8 x i16> %v2, ptr %res
  ret void
}

define void @xor_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: xor_v4i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vld $vr0, $a1, 0
; CHECK-NEXT:    vld $vr1, $a2, 0
; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
; CHECK-NEXT:    vst $vr0, $a0, 0
; CHECK-NEXT:    ret
entry:
  %v0 = load <4 x i32>, ptr %a0
  %v1 = load <4 x i32>, ptr %a1
  %v2 = xor <4 x i32> %v0, %v1
  store <4 x i32> %v2, ptr %res
  ret void
}

define void @xor_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: xor_v2i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vld $vr0, $a1, 0
; CHECK-NEXT:    vld $vr1, $a2, 0
; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
; CHECK-NEXT:    vst $vr0, $a0, 0
; CHECK-NEXT:    ret
entry:
  %v0 = load <2 x i64>, ptr %a0
  %v1 = load <2 x i64>, ptr %a1
  %v2 = xor <2 x i64> %v0, %v1
  store <2 x i64> %v2, ptr %res
  ret void
}

define void @xor_u_v16i8(ptr %res, ptr %a0) nounwind {
; CHECK-LABEL: xor_u_v16i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vld $vr0, $a1, 0
; CHECK-NEXT:    vxori.b $vr0, $vr0, 31
; CHECK-NEXT:    vst $vr0, $a0, 0
; CHECK-NEXT:    ret
entry:
  %v0 = load <16 x i8>, ptr %a0
  %v1 = xor <16 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
  store <16 x i8> %v1, ptr %res
  ret void
}

define void @xor_u_v8i16(ptr %res, ptr %a0) nounwind {
; CHECK-LABEL: xor_u_v8i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vld $vr0, $a1, 0
; CHECK-NEXT:    vrepli.h $vr1, 31
; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
; CHECK-NEXT:    vst $vr0, $a0, 0
; CHECK-NEXT:    ret
entry:
  %v0 = load <8 x i16>, ptr %a0
  %v1 = xor <8 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
  store <8 x i16> %v1, ptr %res
  ret void
}

define void @xor_u_v4i32(ptr %res, ptr %a0) nounwind {
; CHECK-LABEL: xor_u_v4i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vld $vr0, $a1, 0
; CHECK-NEXT:    vrepli.w $vr1, 31
; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
; CHECK-NEXT:    vst $vr0, $a0, 0
; CHECK-NEXT:    ret
entry:
  %v0 = load <4 x i32>, ptr %a0
  %v1 = xor <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
  store <4 x i32> %v1, ptr %res
  ret void
}

define void @xor_u_v2i64(ptr %res, ptr %a0) nounwind {
; CHECK-LABEL: xor_u_v2i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vld $vr0, $a1, 0
; CHECK-NEXT:    vrepli.d $vr1, 31
; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
; CHECK-NEXT:    vst $vr0, $a0, 0
; CHECK-NEXT:    ret
entry:
  %v0 = load <2 x i64>, ptr %a0
  %v1 = xor <2 x i64> %v0, <i64 31, i64 31>
  store <2 x i64> %v1, ptr %res
  ret void
}