; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s
define <512 x i8> @vadd_v512i8_zvl128(<512 x i8> %a, <512 x i8> %b) #0 {
; CHECK-LABEL: vadd_v512i8_zvl128:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: li a4, 48
; CHECK-NEXT: mul a2, a2, a4
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 5
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: li a4, 40
; CHECK-NEXT: mul a2, a2, a4
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: addi a4, a3, 128
; CHECK-NEXT: addi a5, a3, 384
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a5)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: li a5, 24
; CHECK-NEXT: mul a2, a2, a5
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: addi a2, a1, 128
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a3, 256
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle8.v v8, (a2)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle8.v v24, (a4)
; CHECK-NEXT: vle8.v v0, (a3)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 24
; CHECK-NEXT: mul a1, a1, a2
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vadd.vv v16, v16, v8
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vadd.vv v24, v8, v24
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 40
; CHECK-NEXT: mul a1, a1, a2
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vadd.vv v0, v8, v0
; CHECK-NEXT: vse8.v v0, (a0)
; CHECK-NEXT: addi a1, a0, 384
; CHECK-NEXT: vse8.v v16, (a1)
; CHECK-NEXT: addi a1, a0, 256
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vse8.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%c = add <512 x i8> %a, %b
ret <512 x i8> %c
}
define <512 x i8> @vadd_v512i8_zvl256(<512 x i8> %a, <512 x i8> %b) #1 {
; CHECK-LABEL: vadd_v512i8_zvl256:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 256
; CHECK-NEXT: li a2, 256
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vle8.v v0, (a1)
; CHECK-NEXT: vadd.vv v8, v8, v24
; CHECK-NEXT: vadd.vv v16, v16, v0
; CHECK-NEXT: ret
%c = add <512 x i8> %a, %b
ret <512 x i8> %c
}
define <512 x i8> @vadd_v512i8_zvl512(<512 x i8> %a, <512 x i8> %b) #2 {
; CHECK-LABEL: vadd_v512i8_zvl512:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: ret
%c = add <512 x i8> %a, %b
ret <512 x i8> %c
}
define <512 x i8> @vadd_v512i8_zvl1024(<512 x i8> %a, <512 x i8> %b) #3 {
; CHECK-LABEL: vadd_v512i8_zvl1024:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
%c = add <512 x i8> %a, %b
ret <512 x i8> %c
}
define <512 x i8> @vadd_v512i8_zvl2048(<512 x i8> %a, <512 x i8> %b) #4 {
; CHECK-LABEL: vadd_v512i8_zvl2048:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
%c = add <512 x i8> %a, %b
ret <512 x i8> %c
}
define <512 x i8> @vadd_v512i8_zvl4096(<512 x i8> %a, <512 x i8> %b) #5 {
; CHECK-LABEL: vadd_v512i8_zvl4096:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%c = add <512 x i8> %a, %b
ret <512 x i8> %c
}
attributes #0 = { vscale_range(2,1024) }
attributes #1 = { vscale_range(4,1024) }
attributes #2 = { vscale_range(8,1024) }
attributes #3 = { vscale_range(16,1024) }
attributes #4 = { vscale_range(32,1024) }
attributes #5 = { vscale_range(64,1024) }