llvm/llvm/test/CodeGen/RISCV/rvv/trunc-select-to-max-usat.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
; RUN: llc < %s -mtriple=riscv64 -mattr=+zve64x,+zvl128b | FileCheck %s

define <4 x i8> @test_v4i16_v4i8(<4 x i16> %x) {
; CHECK-LABEL: test_v4i16_v4i8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT:    vmax.vx v8, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v8, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <4 x i16> %x, zeroinitializer
  %b = sext <4 x i1> %a to <4 x i16>
  %c = icmp ult <4 x i16> %x, splat (i16 256)
  %d = select <4 x i1> %c, <4 x i16> %x, <4 x i16> %b
  %e = trunc <4 x i16> %d to <4 x i8>
  ret <4 x i8> %e
}

define <4 x i8> @test_v4i32_v4i8(<4 x i32> %x) {
; CHECK-LABEL: test_v4i32_v4i8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vmax.vx v8, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v8, 0
; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v8, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <4 x i32> %x, zeroinitializer
  %b = sext <4 x i1> %a to <4 x i32>
  %c = icmp ult <4 x i32> %x, splat (i32 256)
  %d = select <4 x i1> %c, <4 x i32> %x, <4 x i32> %b
  %e = trunc <4 x i32> %d to <4 x i8>
  ret <4 x i8> %e
}

define <4 x i8> @test_v4i64_v4i8(<4 x i64> %x) {
; CHECK-LABEL: test_v4i64_v4i8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT:    vmax.vx v8, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT:    vnclipu.wi v10, v8, 0
; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v10, 0
; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v8, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <4 x i64> %x, zeroinitializer
  %b = sext <4 x i1> %a to <4 x i64>
  %c = icmp ult <4 x i64> %x, splat (i64 256)
  %d = select <4 x i1> %c, <4 x i64> %x, <4 x i64> %b
  %e = trunc <4 x i64> %d to <4 x i8>
  ret <4 x i8> %e
}

define <4 x i16> @test_v4i32_v4i16(<4 x i32> %x) {
; CHECK-LABEL: test_v4i32_v4i16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT:    vmax.vx v8, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v8, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <4 x i32> %x, zeroinitializer
  %b = sext <4 x i1> %a to <4 x i32>
  %c = icmp ult <4 x i32> %x, splat (i32 65536)
  %d = select <4 x i1> %c, <4 x i32> %x, <4 x i32> %b
  %e = trunc <4 x i32> %d to <4 x i16>
  ret <4 x i16> %e
}

define <4 x i16> @test_v4i64_v4i16(<4 x i64> %x) {
; CHECK-LABEL: test_v4i64_v4i16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT:    vmax.vx v8, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT:    vnclipu.wi v10, v8, 0
; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v10, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <4 x i64> %x, zeroinitializer
  %b = sext <4 x i1> %a to <4 x i64>
  %c = icmp ult <4 x i64> %x, splat (i64 65536)
  %d = select <4 x i1> %c, <4 x i64> %x, <4 x i64> %b
  %e = trunc <4 x i64> %d to <4 x i16>
  ret <4 x i16> %e
}

define <4 x i32> @test_v4i64_v4i32(<4 x i64> %x) {
; CHECK-LABEL: test_v4i64_v4i32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT:    vmax.vx v10, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v10, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <4 x i64> %x, zeroinitializer
  %b = sext <4 x i1> %a to <4 x i64>
  %c = icmp ult <4 x i64> %x, splat (i64 4294967296)
  %d = select <4 x i1> %c, <4 x i64> %x, <4 x i64> %b
  %e = trunc <4 x i64> %d to <4 x i32>
  ret <4 x i32> %e
}

define <vscale x 4 x i8> @test_nxv4i16_nxv4i8(<vscale x 4 x i16> %x) {
; CHECK-LABEL: test_nxv4i16_nxv4i8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT:    vmax.vx v8, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v8, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <vscale x 4 x i16> %x, zeroinitializer
  %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i16>
  %c = icmp ult <vscale x 4 x i16> %x, splat (i16 256)
  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i16> %x, <vscale x 4 x i16> %b
  %e = trunc <vscale x 4 x i16> %d to <vscale x 4 x i8>
  ret <vscale x 4 x i8> %e
}

define <vscale x 4 x i8> @test_nxv4i32_nxv4i8(<vscale x 4 x i32> %x) {
; CHECK-LABEL: test_nxv4i32_nxv4i8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT:    vmax.vx v8, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT:    vnclipu.wi v10, v8, 0
; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v10, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <vscale x 4 x i32> %x, zeroinitializer
  %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i32>
  %c = icmp ult <vscale x 4 x i32> %x, splat (i32 256)
  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %x, <vscale x 4 x i32> %b
  %e = trunc <vscale x 4 x i32> %d to <vscale x 4 x i8>
  ret <vscale x 4 x i8> %e
}

define <vscale x 4 x i8> @test_nxv4i64_nxv4i8(<vscale x 4 x i64> %x) {
; CHECK-LABEL: test_nxv4i64_nxv4i8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
; CHECK-NEXT:    vmax.vx v8, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT:    vnclipu.wi v12, v8, 0
; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v12, 0
; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v8, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <vscale x 4 x i64> %x, zeroinitializer
  %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i64>
  %c = icmp ult <vscale x 4 x i64> %x, splat (i64 256)
  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i64> %x, <vscale x 4 x i64> %b
  %e = trunc <vscale x 4 x i64> %d to <vscale x 4 x i8>
  ret <vscale x 4 x i8> %e
}

define <vscale x 4 x i16> @test_nxv4i32_nxv4i16(<vscale x 4 x i32> %x) {
; CHECK-LABEL: test_nxv4i32_nxv4i16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT:    vmax.vx v10, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v10, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <vscale x 4 x i32> %x, zeroinitializer
  %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i32>
  %c = icmp ult <vscale x 4 x i32> %x, splat (i32 65536)
  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %x, <vscale x 4 x i32> %b
  %e = trunc <vscale x 4 x i32> %d to <vscale x 4 x i16>
  ret <vscale x 4 x i16> %e
}

define <vscale x 4 x i16> @test_nxv4i64_nxv4i16(<vscale x 4 x i64> %x) {
; CHECK-LABEL: test_nxv4i64_nxv4i16:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
; CHECK-NEXT:    vmax.vx v8, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT:    vnclipu.wi v12, v8, 0
; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v12, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <vscale x 4 x i64> %x, zeroinitializer
  %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i64>
  %c = icmp ult <vscale x 4 x i64> %x, splat (i64 65536)
  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i64> %x, <vscale x 4 x i64> %b
  %e = trunc <vscale x 4 x i64> %d to <vscale x 4 x i16>
  ret <vscale x 4 x i16> %e
}

define <vscale x 4 x i32> @test_nxv4i64_nxv4i32(<vscale x 4 x i64> %x) {
; CHECK-LABEL: test_nxv4i64_nxv4i32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
; CHECK-NEXT:    vmax.vx v12, v8, zero
; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT:    vnclipu.wi v8, v12, 0
; CHECK-NEXT:    ret
  %a = icmp sgt <vscale x 4 x i64> %x, zeroinitializer
  %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i64>
  %c = icmp ult <vscale x 4 x i64> %x, splat (i64 4294967296)
  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i64> %x, <vscale x 4 x i64> %b
  %e = trunc <vscale x 4 x i64> %d to <vscale x 4 x i32>
  ret <vscale x 4 x i32> %e
}