; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+v < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+v < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
define i32 @and_add_lsr(i32 %x, i32 %y) {
; RV32I-LABEL: and_add_lsr:
; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: srli a1, a1, 20
; RV32I-NEXT: and a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: and_add_lsr:
; RV64I: # %bb.0:
; RV64I-NEXT: addiw a0, a0, -1
; RV64I-NEXT: srliw a1, a1, 20
; RV64I-NEXT: and a0, a1, a0
; RV64I-NEXT: ret
%1 = add i32 %x, 4095
%2 = lshr i32 %y, 20
%r = and i32 %2, %1
ret i32 %r
}
; Make sure we don't crash on fixed length vectors
define <2 x i32> @and_add_lsr_vec(<2 x i32> %x, <2 x i32> %y) {
; RV32I-LABEL: and_add_lsr_vec:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32I-NEXT: vadd.vx v8, v8, a0
; RV32I-NEXT: vsrl.vi v9, v9, 20
; RV32I-NEXT: vand.vv v8, v9, v8
; RV32I-NEXT: ret
;
; RV64I-LABEL: and_add_lsr_vec:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64I-NEXT: vadd.vx v8, v8, a0
; RV64I-NEXT: vsrl.vi v9, v9, 20
; RV64I-NEXT: vand.vv v8, v9, v8
; RV64I-NEXT: ret
%1 = add <2 x i32> %x, splat (i32 4095)
%2 = lshr <2 x i32> %y, splat (i32 20)
%r = and <2 x i32> %2, %1
ret <2 x i32> %r
}
; Make sure we don't crash on scalable vectors
define <vscale x 2 x i32> @and_add_lsr_vec2(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
; RV32I-LABEL: and_add_lsr_vec2:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; RV32I-NEXT: vadd.vx v8, v8, a0
; RV32I-NEXT: vsrl.vi v9, v9, 20
; RV32I-NEXT: vand.vv v8, v9, v8
; RV32I-NEXT: ret
;
; RV64I-LABEL: and_add_lsr_vec2:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; RV64I-NEXT: vadd.vx v8, v8, a0
; RV64I-NEXT: vsrl.vi v9, v9, 20
; RV64I-NEXT: vand.vv v8, v9, v8
; RV64I-NEXT: ret
%1 = add <vscale x 2 x i32> %x, splat (i32 4095)
%2 = lshr <vscale x 2 x i32> %y, splat (i32 20)
%r = and <vscale x 2 x i32> %2, %1
ret <vscale x 2 x i32> %r
}