llvm/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-non-power-of-2.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK

define void @vls3i8(ptr align 8 %array) {
; CHECK-LABEL: vls3i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, ta, ma
; CHECK-NEXT:    vle8.v v8, (a0)
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vse8.v v8, (a0)
; CHECK-NEXT:    ret
entry:
  %arr = getelementptr inbounds <3 x i8>, ptr %array, i64 0
  %1 = load <3 x i8>, ptr %array, align 1
  %2 = add<3 x i8> %1, %1
  store <3 x i8> %2, ptr %array, align 1
  ret void
}

define void @vls3(ptr align 8 %array) {
; CHECK-LABEL: vls3:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 3, e32, m1, ta, ma
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
entry:
  %arr = getelementptr inbounds <3 x i32>, ptr %array, i64 0
  %1 = load <3 x i32>, ptr %array, align 4
  %2 = add<3 x i32> %1, %1
  store <3 x i32> %2, ptr %array, align 4
  ret void
}

define void @vls5(ptr align 8 %array) {
; CHECK-LABEL: vls5:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 5, e32, m2, ta, ma
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
entry:
  %arr = getelementptr inbounds <5 x i32>, ptr %array, i64 0
  %1 = load <5 x i32>, ptr %array, align 4
  %2 = add<5 x i32> %1, %1
  store <5 x i32> %2, ptr %array, align 4
  ret void
}

define void @vls6(ptr align 8 %array) {
; CHECK-LABEL: vls6:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 6, e32, m2, ta, ma
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
entry:
  %arr = getelementptr inbounds <6 x i32>, ptr %array, i64 0
  %1 = load <6 x i32>, ptr %array, align 4
  %2 = add<6 x i32> %1, %1
  store <6 x i32> %2, ptr %array, align 4
  ret void
}

define void @vls7(ptr align 8 %array) {
; CHECK-LABEL: vls7:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 7, e32, m2, ta, ma
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
entry:
  %arr = getelementptr inbounds <7 x i32>, ptr %array, i64 0
  %1 = load <7 x i32>, ptr %array, align 4
  %2 = add<7 x i32> %1, %1
  store <7 x i32> %2, ptr %array, align 4
  ret void
}


define void @vls9(ptr align 8 %array) {
; CHECK-LABEL: vls9:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 9, e32, m4, ta, ma
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
entry:
  %arr = getelementptr inbounds <9 x i32>, ptr %array, i64 0
  %1 = load <9 x i32>, ptr %array, align 4
  %2 = add<9 x i32> %1, %1
  store <9 x i32> %2, ptr %array, align 4
  ret void
}


define void @vls10(ptr align 8 %array) {
; CHECK-LABEL: vls10:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 10, e32, m4, ta, ma
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
entry:
  %arr = getelementptr inbounds <10 x i32>, ptr %array, i64 0
  %1 = load <10 x i32>, ptr %array, align 4
  %2 = add<10 x i32> %1, %1
  store <10 x i32> %2, ptr %array, align 4
  ret void
}

define void @vls11(ptr align 8 %array) {
; CHECK-LABEL: vls11:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 11, e32, m4, ta, ma
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vv v8, v8, v8
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    ret
entry:
  %arr = getelementptr inbounds <11 x i32>, ptr %array, i64 0
  %1 = load <11 x i32>, ptr %array, align 4
  %2 = add<11 x i32> %1, %1
  store <11 x i32> %2, ptr %array, align 4
  ret void
}