llvm/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64

declare void @llvm.masked.compressstore.v1f16(<1 x half>, ptr, <1 x i1>)
define void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) {
; RV32-LABEL: compressstore_v1f16:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
; RV32-NEXT:    vcompress.vm v9, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
; RV32-NEXT:    vse16.v v9, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v1f16:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
; RV64-NEXT:    vcompress.vm v9, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
; RV64-NEXT:    vse16.v v9, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v1f16(<1 x half> %v, ptr align 2 %base, <1 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>)
define void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) {
; RV32-LABEL: compressstore_v2f16:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV32-NEXT:    vcompress.vm v9, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
; RV32-NEXT:    vse16.v v9, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v2f16:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
; RV64-NEXT:    vcompress.vm v9, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
; RV64-NEXT:    vse16.v v9, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v2f16(<2 x half> %v, ptr align 2 %base, <2 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>)
define void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) {
; RV32-LABEL: compressstore_v4f16:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; RV32-NEXT:    vcompress.vm v9, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
; RV32-NEXT:    vse16.v v9, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v4f16:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
; RV64-NEXT:    vcompress.vm v9, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
; RV64-NEXT:    vse16.v v9, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v4f16(<4 x half> %v, ptr align 2 %base, <4 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>)
define void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) {
; RV32-LABEL: compressstore_v8f16:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT:    vcompress.vm v9, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT:    vse16.v v9, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v8f16:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
; RV64-NEXT:    vcompress.vm v9, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT:    vse16.v v9, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v8f16(<8 x half> %v, ptr align 2 %base, <8 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v1f32(<1 x float>, ptr, <1 x i1>)
define void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) {
; RV32-LABEL: compressstore_v1f32:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT:    vcompress.vm v9, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
; RV32-NEXT:    vse32.v v9, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v1f32:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
; RV64-NEXT:    vcompress.vm v9, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
; RV64-NEXT:    vse32.v v9, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v1f32(<1 x float> %v, ptr align 4 %base, <1 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v2f32(<2 x float>, ptr, <2 x i1>)
define void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) {
; RV32-LABEL: compressstore_v2f32:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT:    vcompress.vm v9, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
; RV32-NEXT:    vse32.v v9, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v2f32:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT:    vcompress.vm v9, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
; RV64-NEXT:    vse32.v v9, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v2f32(<2 x float> %v, ptr align 4 %base, <2 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v4f32(<4 x float>, ptr, <4 x i1>)
define void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) {
; RV32-LABEL: compressstore_v4f32:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT:    vcompress.vm v9, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
; RV32-NEXT:    vse32.v v9, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v4f32:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT:    vcompress.vm v9, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
; RV64-NEXT:    vse32.v v9, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v4f32(<4 x float> %v, ptr align 4 %base, <4 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v8f32(<8 x float>, ptr, <8 x i1>)
define void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) {
; RV32-LABEL: compressstore_v8f32:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT:    vcompress.vm v10, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT:    vse32.v v10, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v8f32:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
; RV64-NEXT:    vcompress.vm v10, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT:    vse32.v v10, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v8f32(<8 x float> %v, ptr align 4 %base, <8 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v1f64(<1 x double>, ptr, <1 x i1>)
define void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) {
; RV32-LABEL: compressstore_v1f64:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT:    vcompress.vm v9, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
; RV32-NEXT:    vse64.v v9, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v1f64:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT:    vcompress.vm v9, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT:    vse64.v v9, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v1f64(<1 x double> %v, ptr align 8 %base, <1 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>)
define void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) {
; RV32-LABEL: compressstore_v2f64:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT:    vcompress.vm v9, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
; RV32-NEXT:    vse64.v v9, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v2f64:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT:    vcompress.vm v9, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT:    vse64.v v9, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v2f64(<2 x double> %v, ptr align 8 %base, <2 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>)
define void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) {
; RV32-LABEL: compressstore_v4f64:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT:    vcompress.vm v10, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
; RV32-NEXT:    vse64.v v10, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v4f64:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT:    vcompress.vm v10, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT:    vse64.v v10, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v4f64(<4 x double> %v, ptr align 8 %base, <4 x i1> %mask)
  ret void
}

declare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>)
define void @compressstore_v8f64(ptr %base, <8 x double> %v, <8 x i1> %mask) {
; RV32-LABEL: compressstore_v8f64:
; RV32:       # %bb.0:
; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT:    vcompress.vm v12, v8, v0
; RV32-NEXT:    vcpop.m a1, v0
; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT:    vse64.v v12, (a0)
; RV32-NEXT:    ret
;
; RV64-LABEL: compressstore_v8f64:
; RV64:       # %bb.0:
; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT:    vcompress.vm v12, v8, v0
; RV64-NEXT:    vcpop.m a1, v0
; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT:    vse64.v v12, (a0)
; RV64-NEXT:    ret
  call void @llvm.masked.compressstore.v8f64(<8 x double> %v, ptr align 8 %base, <8 x i1> %mask)
  ret void
}