llvm/llvm/test/CodeGen/RISCV/rv32zbb.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32I
; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBB

declare i32 @llvm.ctlz.i32(i32, i1)

define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-LABEL: ctlz_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    beqz a0, .LBB0_2
; RV32I-NEXT:  # %bb.1: # %cond.false
; RV32I-NEXT:    srli a1, a0, 1
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 2
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 4
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 8
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 16
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    not a0, a0
; RV32I-NEXT:    srli a1, a0, 1
; RV32I-NEXT:    lui a2, 349525
; RV32I-NEXT:    addi a2, a2, 1365
; RV32I-NEXT:    and a1, a1, a2
; RV32I-NEXT:    sub a0, a0, a1
; RV32I-NEXT:    lui a1, 209715
; RV32I-NEXT:    addi a1, a1, 819
; RV32I-NEXT:    and a2, a0, a1
; RV32I-NEXT:    srli a0, a0, 2
; RV32I-NEXT:    and a0, a0, a1
; RV32I-NEXT:    add a0, a2, a0
; RV32I-NEXT:    srli a1, a0, 4
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    lui a1, 61681
; RV32I-NEXT:    addi a1, a1, -241
; RV32I-NEXT:    and a0, a0, a1
; RV32I-NEXT:    slli a1, a0, 8
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    slli a1, a0, 16
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    srli a0, a0, 24
; RV32I-NEXT:    ret
; RV32I-NEXT:  .LBB0_2:
; RV32I-NEXT:    li a0, 32
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctlz_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    clz a0, a0
; RV32ZBB-NEXT:    ret
  %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
  ret i32 %1
}

declare i64 @llvm.ctlz.i64(i64, i1)

define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: ctlz_i64:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lui a2, 349525
; RV32I-NEXT:    addi a4, a2, 1365
; RV32I-NEXT:    lui a2, 209715
; RV32I-NEXT:    addi a3, a2, 819
; RV32I-NEXT:    lui a2, 61681
; RV32I-NEXT:    addi a2, a2, -241
; RV32I-NEXT:    bnez a1, .LBB1_2
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    srli a1, a0, 1
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 2
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 4
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 8
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 16
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    not a0, a0
; RV32I-NEXT:    srli a1, a0, 1
; RV32I-NEXT:    and a1, a1, a4
; RV32I-NEXT:    sub a0, a0, a1
; RV32I-NEXT:    and a1, a0, a3
; RV32I-NEXT:    srli a0, a0, 2
; RV32I-NEXT:    and a0, a0, a3
; RV32I-NEXT:    add a0, a1, a0
; RV32I-NEXT:    srli a1, a0, 4
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    and a0, a0, a2
; RV32I-NEXT:    slli a1, a0, 8
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    slli a1, a0, 16
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    srli a0, a0, 24
; RV32I-NEXT:    addi a0, a0, 32
; RV32I-NEXT:    li a1, 0
; RV32I-NEXT:    ret
; RV32I-NEXT:  .LBB1_2:
; RV32I-NEXT:    srli a0, a1, 1
; RV32I-NEXT:    or a0, a1, a0
; RV32I-NEXT:    srli a1, a0, 2
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 4
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 8
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    srli a1, a0, 16
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    not a0, a0
; RV32I-NEXT:    srli a1, a0, 1
; RV32I-NEXT:    and a1, a1, a4
; RV32I-NEXT:    sub a0, a0, a1
; RV32I-NEXT:    and a1, a0, a3
; RV32I-NEXT:    srli a0, a0, 2
; RV32I-NEXT:    and a0, a0, a3
; RV32I-NEXT:    add a0, a1, a0
; RV32I-NEXT:    srli a1, a0, 4
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    and a0, a0, a2
; RV32I-NEXT:    slli a1, a0, 8
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    slli a1, a0, 16
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    srli a0, a0, 24
; RV32I-NEXT:    li a1, 0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctlz_i64:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    bnez a1, .LBB1_2
; RV32ZBB-NEXT:  # %bb.1:
; RV32ZBB-NEXT:    clz a0, a0
; RV32ZBB-NEXT:    addi a0, a0, 32
; RV32ZBB-NEXT:    li a1, 0
; RV32ZBB-NEXT:    ret
; RV32ZBB-NEXT:  .LBB1_2:
; RV32ZBB-NEXT:    clz a0, a1
; RV32ZBB-NEXT:    li a1, 0
; RV32ZBB-NEXT:    ret
  %1 = call i64 @llvm.ctlz.i64(i64 %a, i1 false)
  ret i64 %1
}

declare i32 @llvm.cttz.i32(i32, i1)

define i32 @cttz_i32(i32 %a) nounwind {
; RV32I-LABEL: cttz_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    beqz a0, .LBB2_2
; RV32I-NEXT:  # %bb.1: # %cond.false
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    neg a1, a0
; RV32I-NEXT:    and a0, a0, a1
; RV32I-NEXT:    lui a1, 30667
; RV32I-NEXT:    addi a1, a1, 1329
; RV32I-NEXT:    call __mulsi3
; RV32I-NEXT:    srli a0, a0, 27
; RV32I-NEXT:    lui a1, %hi(.LCPI2_0)
; RV32I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
; RV32I-NEXT:    add a0, a1, a0
; RV32I-NEXT:    lbu a0, 0(a0)
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
; RV32I-NEXT:  .LBB2_2:
; RV32I-NEXT:    li a0, 32
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: cttz_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    ctz a0, a0
; RV32ZBB-NEXT:    ret
  %1 = call i32 @llvm.cttz.i32(i32 %a, i1 false)
  ret i32 %1
}

declare i64 @llvm.cttz.i64(i64, i1)

define i64 @cttz_i64(i64 %a) nounwind {
; RV32I-LABEL: cttz_i64:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -32
; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT:    mv s2, a1
; RV32I-NEXT:    mv s0, a0
; RV32I-NEXT:    neg a0, a0
; RV32I-NEXT:    and a0, s0, a0
; RV32I-NEXT:    lui a1, 30667
; RV32I-NEXT:    addi s3, a1, 1329
; RV32I-NEXT:    mv a1, s3
; RV32I-NEXT:    call __mulsi3
; RV32I-NEXT:    mv s1, a0
; RV32I-NEXT:    lui s4, %hi(.LCPI3_0)
; RV32I-NEXT:    addi s4, s4, %lo(.LCPI3_0)
; RV32I-NEXT:    neg a0, s2
; RV32I-NEXT:    and a0, s2, a0
; RV32I-NEXT:    mv a1, s3
; RV32I-NEXT:    call __mulsi3
; RV32I-NEXT:    bnez s2, .LBB3_3
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    li a0, 32
; RV32I-NEXT:    beqz s0, .LBB3_4
; RV32I-NEXT:  .LBB3_2:
; RV32I-NEXT:    srli s1, s1, 27
; RV32I-NEXT:    add s1, s4, s1
; RV32I-NEXT:    lbu a0, 0(s1)
; RV32I-NEXT:    j .LBB3_5
; RV32I-NEXT:  .LBB3_3:
; RV32I-NEXT:    srli a0, a0, 27
; RV32I-NEXT:    add a0, s4, a0
; RV32I-NEXT:    lbu a0, 0(a0)
; RV32I-NEXT:    bnez s0, .LBB3_2
; RV32I-NEXT:  .LBB3_4:
; RV32I-NEXT:    addi a0, a0, 32
; RV32I-NEXT:  .LBB3_5:
; RV32I-NEXT:    li a1, 0
; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 32
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: cttz_i64:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    bnez a0, .LBB3_2
; RV32ZBB-NEXT:  # %bb.1:
; RV32ZBB-NEXT:    ctz a0, a1
; RV32ZBB-NEXT:    addi a0, a0, 32
; RV32ZBB-NEXT:    li a1, 0
; RV32ZBB-NEXT:    ret
; RV32ZBB-NEXT:  .LBB3_2:
; RV32ZBB-NEXT:    ctz a0, a0
; RV32ZBB-NEXT:    li a1, 0
; RV32ZBB-NEXT:    ret
  %1 = call i64 @llvm.cttz.i64(i64 %a, i1 false)
  ret i64 %1
}

declare i32 @llvm.ctpop.i32(i32)

define i32 @ctpop_i32(i32 %a) nounwind {
; RV32I-LABEL: ctpop_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    srli a1, a0, 1
; RV32I-NEXT:    lui a2, 349525
; RV32I-NEXT:    addi a2, a2, 1365
; RV32I-NEXT:    and a1, a1, a2
; RV32I-NEXT:    sub a0, a0, a1
; RV32I-NEXT:    lui a1, 209715
; RV32I-NEXT:    addi a1, a1, 819
; RV32I-NEXT:    and a2, a0, a1
; RV32I-NEXT:    srli a0, a0, 2
; RV32I-NEXT:    and a0, a0, a1
; RV32I-NEXT:    add a0, a2, a0
; RV32I-NEXT:    srli a1, a0, 4
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    lui a1, 61681
; RV32I-NEXT:    addi a1, a1, -241
; RV32I-NEXT:    and a0, a0, a1
; RV32I-NEXT:    slli a1, a0, 8
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    slli a1, a0, 16
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    srli a0, a0, 24
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    ret
  %1 = call i32 @llvm.ctpop.i32(i32 %a)
  ret i32 %1
}

define i1 @ctpop_i32_ult_two(i32 signext %a) nounwind {
; RV32I-LABEL: ctpop_i32_ult_two:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a1, a0, -1
; RV32I-NEXT:    and a0, a0, a1
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i32_ult_two:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    sltiu a0, a0, 2
; RV32ZBB-NEXT:    ret
  %1 = call i32 @llvm.ctpop.i32(i32 %a)
  %2 = icmp ult i32 %1, 2
  ret i1 %2
}

define i1 @ctpop_i32_ugt_one(i32 signext %a) nounwind {
; RV32I-LABEL: ctpop_i32_ugt_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a1, a0, -1
; RV32I-NEXT:    and a0, a0, a1
; RV32I-NEXT:    snez a0, a0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i32_ugt_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    sltiu a0, a0, 2
; RV32ZBB-NEXT:    xori a0, a0, 1
; RV32ZBB-NEXT:    ret
  %1 = call i32 @llvm.ctpop.i32(i32 %a)
  %2 = icmp ugt i32 %1, 1
  ret i1 %2
}

define i1 @ctpop_i32_eq_one(i32 signext %a) nounwind {
; RV32I-LABEL: ctpop_i32_eq_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a1, a0, -1
; RV32I-NEXT:    xor a0, a0, a1
; RV32I-NEXT:    sltu a0, a1, a0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i32_eq_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    addi a0, a0, -1
; RV32ZBB-NEXT:    seqz a0, a0
; RV32ZBB-NEXT:    ret
  %1 = call i32 @llvm.ctpop.i32(i32 %a)
  %2 = icmp eq i32 %1, 1
  ret i1 %2
}

define i1 @ctpop_i32_ne_one(i32 signext %a) nounwind {
; RV32I-LABEL: ctpop_i32_ne_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a1, a0, -1
; RV32I-NEXT:    xor a0, a0, a1
; RV32I-NEXT:    sltu a0, a1, a0
; RV32I-NEXT:    xori a0, a0, 1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i32_ne_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    addi a0, a0, -1
; RV32ZBB-NEXT:    snez a0, a0
; RV32ZBB-NEXT:    ret
  %1 = call i32 @llvm.ctpop.i32(i32 %a)
  %2 = icmp ne i32 %1, 1
  ret i1 %2
}

declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)

define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV32I-LABEL: ctpop_v2i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    srli a2, a0, 1
; RV32I-NEXT:    lui a3, 349525
; RV32I-NEXT:    addi a3, a3, 1365
; RV32I-NEXT:    and a2, a2, a3
; RV32I-NEXT:    sub a0, a0, a2
; RV32I-NEXT:    lui a2, 209715
; RV32I-NEXT:    addi a2, a2, 819
; RV32I-NEXT:    and a4, a0, a2
; RV32I-NEXT:    srli a0, a0, 2
; RV32I-NEXT:    and a0, a0, a2
; RV32I-NEXT:    add a0, a4, a0
; RV32I-NEXT:    srli a4, a0, 4
; RV32I-NEXT:    add a0, a0, a4
; RV32I-NEXT:    lui a4, 61681
; RV32I-NEXT:    addi a4, a4, -241
; RV32I-NEXT:    and a0, a0, a4
; RV32I-NEXT:    slli a5, a0, 8
; RV32I-NEXT:    add a0, a0, a5
; RV32I-NEXT:    slli a5, a0, 16
; RV32I-NEXT:    add a0, a0, a5
; RV32I-NEXT:    srli a0, a0, 24
; RV32I-NEXT:    srli a5, a1, 1
; RV32I-NEXT:    and a3, a5, a3
; RV32I-NEXT:    sub a1, a1, a3
; RV32I-NEXT:    and a3, a1, a2
; RV32I-NEXT:    srli a1, a1, 2
; RV32I-NEXT:    and a1, a1, a2
; RV32I-NEXT:    add a1, a3, a1
; RV32I-NEXT:    srli a2, a1, 4
; RV32I-NEXT:    add a1, a1, a2
; RV32I-NEXT:    and a1, a1, a4
; RV32I-NEXT:    slli a2, a1, 8
; RV32I-NEXT:    add a1, a1, a2
; RV32I-NEXT:    slli a2, a1, 16
; RV32I-NEXT:    add a1, a1, a2
; RV32I-NEXT:    srli a1, a1, 24
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
  ret <2 x i32> %1
}

define <2 x i1> @ctpop_v2i32_ult_two(<2 x i32> %a) nounwind {
; RV32I-LABEL: ctpop_v2i32_ult_two:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a2, a0, -1
; RV32I-NEXT:    and a0, a0, a2
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    addi a2, a1, -1
; RV32I-NEXT:    and a1, a1, a2
; RV32I-NEXT:    seqz a1, a1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i32_ult_two:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    sltiu a0, a0, 2
; RV32ZBB-NEXT:    sltiu a1, a1, 2
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
  %2 = icmp ult <2 x i32> %1, <i32 2, i32 2>
  ret <2 x i1> %2
}

define <2 x i1> @ctpop_v2i32_ugt_one(<2 x i32> %a) nounwind {
; RV32I-LABEL: ctpop_v2i32_ugt_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a2, a0, -1
; RV32I-NEXT:    and a0, a0, a2
; RV32I-NEXT:    snez a0, a0
; RV32I-NEXT:    addi a2, a1, -1
; RV32I-NEXT:    and a1, a1, a2
; RV32I-NEXT:    snez a1, a1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i32_ugt_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    sltiu a0, a0, 2
; RV32ZBB-NEXT:    xori a0, a0, 1
; RV32ZBB-NEXT:    sltiu a1, a1, 2
; RV32ZBB-NEXT:    xori a1, a1, 1
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
  %2 = icmp ugt <2 x i32> %1, <i32 1, i32 1>
  ret <2 x i1> %2
}

define <2 x i1> @ctpop_v2i32_eq_one(<2 x i32> %a) nounwind {
; RV32I-LABEL: ctpop_v2i32_eq_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a2, a0, -1
; RV32I-NEXT:    xor a0, a0, a2
; RV32I-NEXT:    sltu a0, a2, a0
; RV32I-NEXT:    addi a2, a1, -1
; RV32I-NEXT:    xor a1, a1, a2
; RV32I-NEXT:    sltu a1, a2, a1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i32_eq_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    addi a0, a0, -1
; RV32ZBB-NEXT:    seqz a0, a0
; RV32ZBB-NEXT:    addi a1, a1, -1
; RV32ZBB-NEXT:    seqz a1, a1
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
  %2 = icmp eq <2 x i32> %1, <i32 1, i32 1>
  ret <2 x i1> %2
}

define <2 x i1> @ctpop_v2i32_ne_one(<2 x i32> %a) nounwind {
; RV32I-LABEL: ctpop_v2i32_ne_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a2, a0, -1
; RV32I-NEXT:    xor a0, a0, a2
; RV32I-NEXT:    sltu a0, a2, a0
; RV32I-NEXT:    xori a0, a0, 1
; RV32I-NEXT:    addi a2, a1, -1
; RV32I-NEXT:    xor a1, a1, a2
; RV32I-NEXT:    sltu a1, a2, a1
; RV32I-NEXT:    xori a1, a1, 1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i32_ne_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    addi a0, a0, -1
; RV32ZBB-NEXT:    snez a0, a0
; RV32ZBB-NEXT:    addi a1, a1, -1
; RV32ZBB-NEXT:    snez a1, a1
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
  %2 = icmp ne <2 x i32> %1, <i32 1, i32 1>
  ret <2 x i1> %2
}

declare i64 @llvm.ctpop.i64(i64)

define i64 @ctpop_i64(i64 %a) nounwind {
; RV32I-LABEL: ctpop_i64:
; RV32I:       # %bb.0:
; RV32I-NEXT:    srli a2, a1, 1
; RV32I-NEXT:    lui a3, 349525
; RV32I-NEXT:    addi a3, a3, 1365
; RV32I-NEXT:    and a2, a2, a3
; RV32I-NEXT:    sub a1, a1, a2
; RV32I-NEXT:    lui a2, 209715
; RV32I-NEXT:    addi a2, a2, 819
; RV32I-NEXT:    and a4, a1, a2
; RV32I-NEXT:    srli a1, a1, 2
; RV32I-NEXT:    and a1, a1, a2
; RV32I-NEXT:    add a1, a4, a1
; RV32I-NEXT:    srli a4, a1, 4
; RV32I-NEXT:    add a1, a1, a4
; RV32I-NEXT:    lui a4, 61681
; RV32I-NEXT:    addi a4, a4, -241
; RV32I-NEXT:    and a1, a1, a4
; RV32I-NEXT:    slli a5, a1, 8
; RV32I-NEXT:    add a1, a1, a5
; RV32I-NEXT:    slli a5, a1, 16
; RV32I-NEXT:    add a1, a1, a5
; RV32I-NEXT:    srli a1, a1, 24
; RV32I-NEXT:    srli a5, a0, 1
; RV32I-NEXT:    and a3, a5, a3
; RV32I-NEXT:    sub a0, a0, a3
; RV32I-NEXT:    and a3, a0, a2
; RV32I-NEXT:    srli a0, a0, 2
; RV32I-NEXT:    and a0, a0, a2
; RV32I-NEXT:    add a0, a3, a0
; RV32I-NEXT:    srli a2, a0, 4
; RV32I-NEXT:    add a0, a0, a2
; RV32I-NEXT:    and a0, a0, a4
; RV32I-NEXT:    slli a2, a0, 8
; RV32I-NEXT:    add a0, a0, a2
; RV32I-NEXT:    slli a2, a0, 16
; RV32I-NEXT:    add a0, a0, a2
; RV32I-NEXT:    srli a0, a0, 24
; RV32I-NEXT:    add a0, a0, a1
; RV32I-NEXT:    li a1, 0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i64:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    add a0, a0, a1
; RV32ZBB-NEXT:    li a1, 0
; RV32ZBB-NEXT:    ret
  %1 = call i64 @llvm.ctpop.i64(i64 %a)
  ret i64 %1
}

define i1 @ctpop_i64_ugt_two(i64 %a) nounwind {
; RV32I-LABEL: ctpop_i64_ugt_two:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a2, a0, -1
; RV32I-NEXT:    and a2, a0, a2
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    sub a0, a1, a0
; RV32I-NEXT:    and a0, a1, a0
; RV32I-NEXT:    or a0, a2, a0
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i64_ugt_two:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    add a0, a0, a1
; RV32ZBB-NEXT:    sltiu a0, a0, 2
; RV32ZBB-NEXT:    ret
  %1 = call i64 @llvm.ctpop.i64(i64 %a)
  %2 = icmp ult i64 %1, 2
  ret i1 %2
}

define i1 @ctpop_i64_ugt_one(i64 %a) nounwind {
; RV32I-LABEL: ctpop_i64_ugt_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi a2, a0, -1
; RV32I-NEXT:    and a2, a0, a2
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    sub a0, a1, a0
; RV32I-NEXT:    and a0, a1, a0
; RV32I-NEXT:    or a0, a2, a0
; RV32I-NEXT:    snez a0, a0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i64_ugt_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    add a0, a0, a1
; RV32ZBB-NEXT:    sltiu a0, a0, 2
; RV32ZBB-NEXT:    xori a0, a0, 1
; RV32ZBB-NEXT:    ret
  %1 = call i64 @llvm.ctpop.i64(i64 %a)
  %2 = icmp ugt i64 %1, 1
  ret i1 %2
}

define i1 @ctpop_i64_eq_one(i64 %a) nounwind {
; RV32I-LABEL: ctpop_i64_eq_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    beqz a1, .LBB17_2
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    sub a0, a1, a0
; RV32I-NEXT:    xor a1, a1, a0
; RV32I-NEXT:    sltu a0, a0, a1
; RV32I-NEXT:    ret
; RV32I-NEXT:  .LBB17_2:
; RV32I-NEXT:    addi a1, a0, -1
; RV32I-NEXT:    xor a0, a0, a1
; RV32I-NEXT:    sltu a0, a1, a0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i64_eq_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    add a0, a0, a1
; RV32ZBB-NEXT:    addi a0, a0, -1
; RV32ZBB-NEXT:    seqz a0, a0
; RV32ZBB-NEXT:    ret
  %1 = call i64 @llvm.ctpop.i64(i64 %a)
  %2 = icmp eq i64 %1, 1
  ret i1 %2
}

define i1 @ctpop_i64_ne_one(i64 %a) nounwind {
; RV32I-LABEL: ctpop_i64_ne_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    beqz a1, .LBB18_2
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    sub a0, a1, a0
; RV32I-NEXT:    xor a1, a1, a0
; RV32I-NEXT:    sltu a0, a0, a1
; RV32I-NEXT:    xori a0, a0, 1
; RV32I-NEXT:    ret
; RV32I-NEXT:  .LBB18_2:
; RV32I-NEXT:    addi a1, a0, -1
; RV32I-NEXT:    xor a0, a0, a1
; RV32I-NEXT:    sltu a0, a1, a0
; RV32I-NEXT:    xori a0, a0, 1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_i64_ne_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    add a0, a0, a1
; RV32ZBB-NEXT:    addi a0, a0, -1
; RV32ZBB-NEXT:    snez a0, a0
; RV32ZBB-NEXT:    ret
  %1 = call i64 @llvm.ctpop.i64(i64 %a)
  %2 = icmp ne i64 %1, 1
  ret i1 %2
}

declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)

define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV32I-LABEL: ctpop_v2i64:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lw a3, 4(a1)
; RV32I-NEXT:    lw a4, 0(a1)
; RV32I-NEXT:    lw a2, 8(a1)
; RV32I-NEXT:    lw a1, 12(a1)
; RV32I-NEXT:    srli a5, a3, 1
; RV32I-NEXT:    lui a6, 349525
; RV32I-NEXT:    addi a6, a6, 1365
; RV32I-NEXT:    and a5, a5, a6
; RV32I-NEXT:    sub a3, a3, a5
; RV32I-NEXT:    lui a5, 209715
; RV32I-NEXT:    addi a5, a5, 819
; RV32I-NEXT:    and a7, a3, a5
; RV32I-NEXT:    srli a3, a3, 2
; RV32I-NEXT:    and a3, a3, a5
; RV32I-NEXT:    add a3, a7, a3
; RV32I-NEXT:    srli a7, a3, 4
; RV32I-NEXT:    add a3, a3, a7
; RV32I-NEXT:    lui a7, 61681
; RV32I-NEXT:    addi a7, a7, -241
; RV32I-NEXT:    and a3, a3, a7
; RV32I-NEXT:    slli t0, a3, 8
; RV32I-NEXT:    add a3, a3, t0
; RV32I-NEXT:    slli t0, a3, 16
; RV32I-NEXT:    add a3, a3, t0
; RV32I-NEXT:    srli a3, a3, 24
; RV32I-NEXT:    srli t0, a4, 1
; RV32I-NEXT:    and t0, t0, a6
; RV32I-NEXT:    sub a4, a4, t0
; RV32I-NEXT:    and t0, a4, a5
; RV32I-NEXT:    srli a4, a4, 2
; RV32I-NEXT:    and a4, a4, a5
; RV32I-NEXT:    add a4, t0, a4
; RV32I-NEXT:    srli t0, a4, 4
; RV32I-NEXT:    add a4, a4, t0
; RV32I-NEXT:    and a4, a4, a7
; RV32I-NEXT:    slli t0, a4, 8
; RV32I-NEXT:    add a4, a4, t0
; RV32I-NEXT:    slli t0, a4, 16
; RV32I-NEXT:    add a4, a4, t0
; RV32I-NEXT:    srli a4, a4, 24
; RV32I-NEXT:    add a3, a4, a3
; RV32I-NEXT:    srli a4, a1, 1
; RV32I-NEXT:    and a4, a4, a6
; RV32I-NEXT:    sub a1, a1, a4
; RV32I-NEXT:    and a4, a1, a5
; RV32I-NEXT:    srli a1, a1, 2
; RV32I-NEXT:    and a1, a1, a5
; RV32I-NEXT:    add a1, a4, a1
; RV32I-NEXT:    srli a4, a1, 4
; RV32I-NEXT:    add a1, a1, a4
; RV32I-NEXT:    and a1, a1, a7
; RV32I-NEXT:    slli a4, a1, 8
; RV32I-NEXT:    add a1, a1, a4
; RV32I-NEXT:    slli a4, a1, 16
; RV32I-NEXT:    add a1, a1, a4
; RV32I-NEXT:    srli a1, a1, 24
; RV32I-NEXT:    srli a4, a2, 1
; RV32I-NEXT:    and a4, a4, a6
; RV32I-NEXT:    sub a2, a2, a4
; RV32I-NEXT:    and a4, a2, a5
; RV32I-NEXT:    srli a2, a2, 2
; RV32I-NEXT:    and a2, a2, a5
; RV32I-NEXT:    add a2, a4, a2
; RV32I-NEXT:    srli a4, a2, 4
; RV32I-NEXT:    add a2, a2, a4
; RV32I-NEXT:    and a2, a2, a7
; RV32I-NEXT:    slli a4, a2, 8
; RV32I-NEXT:    add a2, a2, a4
; RV32I-NEXT:    slli a4, a2, 16
; RV32I-NEXT:    add a2, a2, a4
; RV32I-NEXT:    srli a2, a2, 24
; RV32I-NEXT:    add a1, a2, a1
; RV32I-NEXT:    sw zero, 12(a0)
; RV32I-NEXT:    sw zero, 4(a0)
; RV32I-NEXT:    sw a1, 8(a0)
; RV32I-NEXT:    sw a3, 0(a0)
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i64:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    lw a2, 4(a1)
; RV32ZBB-NEXT:    lw a3, 0(a1)
; RV32ZBB-NEXT:    lw a4, 8(a1)
; RV32ZBB-NEXT:    lw a1, 12(a1)
; RV32ZBB-NEXT:    cpop a2, a2
; RV32ZBB-NEXT:    cpop a3, a3
; RV32ZBB-NEXT:    add a2, a3, a2
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a3, a4
; RV32ZBB-NEXT:    add a1, a3, a1
; RV32ZBB-NEXT:    sw zero, 12(a0)
; RV32ZBB-NEXT:    sw zero, 4(a0)
; RV32ZBB-NEXT:    sw a1, 8(a0)
; RV32ZBB-NEXT:    sw a2, 0(a0)
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
  ret <2 x i64> %1
}

define <2 x i1> @ctpop_v2i64_ult_two(<2 x i64> %a) nounwind {
; RV32I-LABEL: ctpop_v2i64_ult_two:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lw a1, 0(a0)
; RV32I-NEXT:    lw a2, 4(a0)
; RV32I-NEXT:    lw a3, 8(a0)
; RV32I-NEXT:    lw a4, 12(a0)
; RV32I-NEXT:    addi a0, a1, -1
; RV32I-NEXT:    and a0, a1, a0
; RV32I-NEXT:    seqz a1, a1
; RV32I-NEXT:    sub a1, a2, a1
; RV32I-NEXT:    and a1, a2, a1
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    addi a1, a3, -1
; RV32I-NEXT:    and a1, a3, a1
; RV32I-NEXT:    seqz a2, a3
; RV32I-NEXT:    sub a2, a4, a2
; RV32I-NEXT:    and a2, a4, a2
; RV32I-NEXT:    or a1, a1, a2
; RV32I-NEXT:    seqz a1, a1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i64_ult_two:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    lw a1, 12(a0)
; RV32ZBB-NEXT:    lw a2, 8(a0)
; RV32ZBB-NEXT:    lw a3, 0(a0)
; RV32ZBB-NEXT:    lw a0, 4(a0)
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a2, a2
; RV32ZBB-NEXT:    add a1, a2, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    cpop a2, a3
; RV32ZBB-NEXT:    add a0, a2, a0
; RV32ZBB-NEXT:    sltiu a0, a0, 2
; RV32ZBB-NEXT:    sltiu a1, a1, 2
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
  %2 = icmp ult <2 x i64> %1, <i64 2, i64 2>
  ret <2 x i1> %2
}

define <2 x i1> @ctpop_v2i64_ugt_one(<2 x i64> %a) nounwind {
; RV32I-LABEL: ctpop_v2i64_ugt_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lw a1, 0(a0)
; RV32I-NEXT:    lw a2, 4(a0)
; RV32I-NEXT:    lw a3, 8(a0)
; RV32I-NEXT:    lw a4, 12(a0)
; RV32I-NEXT:    addi a0, a1, -1
; RV32I-NEXT:    and a0, a1, a0
; RV32I-NEXT:    seqz a1, a1
; RV32I-NEXT:    sub a1, a2, a1
; RV32I-NEXT:    and a1, a2, a1
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    snez a0, a0
; RV32I-NEXT:    addi a1, a3, -1
; RV32I-NEXT:    and a1, a3, a1
; RV32I-NEXT:    seqz a2, a3
; RV32I-NEXT:    sub a2, a4, a2
; RV32I-NEXT:    and a2, a4, a2
; RV32I-NEXT:    or a1, a1, a2
; RV32I-NEXT:    snez a1, a1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i64_ugt_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    lw a1, 12(a0)
; RV32ZBB-NEXT:    lw a2, 8(a0)
; RV32ZBB-NEXT:    lw a3, 0(a0)
; RV32ZBB-NEXT:    lw a0, 4(a0)
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a2, a2
; RV32ZBB-NEXT:    add a1, a2, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    cpop a2, a3
; RV32ZBB-NEXT:    add a0, a2, a0
; RV32ZBB-NEXT:    sltiu a0, a0, 2
; RV32ZBB-NEXT:    xori a0, a0, 1
; RV32ZBB-NEXT:    sltiu a1, a1, 2
; RV32ZBB-NEXT:    xori a1, a1, 1
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
  %2 = icmp ugt <2 x i64> %1, <i64 1, i64 1>
  ret <2 x i1> %2
}

define <2 x i1> @ctpop_v2i64_eq_one(<2 x i64> %a) nounwind {
; RV32I-LABEL: ctpop_v2i64_eq_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    mv a1, a0
; RV32I-NEXT:    lw a0, 0(a0)
; RV32I-NEXT:    lw a3, 4(a1)
; RV32I-NEXT:    lw a2, 12(a1)
; RV32I-NEXT:    beqz a3, .LBB22_3
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    seqz a0, a0
; RV32I-NEXT:    sub a0, a3, a0
; RV32I-NEXT:    xor a3, a3, a0
; RV32I-NEXT:    sltu a0, a0, a3
; RV32I-NEXT:    lw a1, 8(a1)
; RV32I-NEXT:    bnez a2, .LBB22_4
; RV32I-NEXT:  .LBB22_2:
; RV32I-NEXT:    addi a2, a1, -1
; RV32I-NEXT:    xor a1, a1, a2
; RV32I-NEXT:    sltu a1, a2, a1
; RV32I-NEXT:    ret
; RV32I-NEXT:  .LBB22_3:
; RV32I-NEXT:    addi a3, a0, -1
; RV32I-NEXT:    xor a0, a0, a3
; RV32I-NEXT:    sltu a0, a3, a0
; RV32I-NEXT:    lw a1, 8(a1)
; RV32I-NEXT:    beqz a2, .LBB22_2
; RV32I-NEXT:  .LBB22_4:
; RV32I-NEXT:    seqz a1, a1
; RV32I-NEXT:    sub a1, a2, a1
; RV32I-NEXT:    xor a2, a2, a1
; RV32I-NEXT:    sltu a1, a1, a2
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i64_eq_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    lw a1, 12(a0)
; RV32ZBB-NEXT:    lw a2, 8(a0)
; RV32ZBB-NEXT:    lw a3, 0(a0)
; RV32ZBB-NEXT:    lw a0, 4(a0)
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a2, a2
; RV32ZBB-NEXT:    add a1, a2, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    cpop a2, a3
; RV32ZBB-NEXT:    add a0, a2, a0
; RV32ZBB-NEXT:    addi a0, a0, -1
; RV32ZBB-NEXT:    seqz a0, a0
; RV32ZBB-NEXT:    addi a1, a1, -1
; RV32ZBB-NEXT:    seqz a1, a1
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
  %2 = icmp eq <2 x i64> %1, <i64 1, i64 1>
  ret <2 x i1> %2
}

define <2 x i1> @ctpop_v2i64_ne_one(<2 x i64> %a) nounwind {
; RV32I-LABEL: ctpop_v2i64_ne_one:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lw a2, 0(a0)
; RV32I-NEXT:    lw a3, 4(a0)
; RV32I-NEXT:    lw a1, 12(a0)
; RV32I-NEXT:    beqz a3, .LBB23_2
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    seqz a2, a2
; RV32I-NEXT:    sub a2, a3, a2
; RV32I-NEXT:    xor a3, a3, a2
; RV32I-NEXT:    sltu a2, a2, a3
; RV32I-NEXT:    j .LBB23_3
; RV32I-NEXT:  .LBB23_2:
; RV32I-NEXT:    addi a3, a2, -1
; RV32I-NEXT:    xor a2, a2, a3
; RV32I-NEXT:    sltu a2, a3, a2
; RV32I-NEXT:  .LBB23_3:
; RV32I-NEXT:    lw a3, 8(a0)
; RV32I-NEXT:    xori a0, a2, 1
; RV32I-NEXT:    beqz a1, .LBB23_5
; RV32I-NEXT:  # %bb.4:
; RV32I-NEXT:    seqz a2, a3
; RV32I-NEXT:    sub a2, a1, a2
; RV32I-NEXT:    xor a1, a1, a2
; RV32I-NEXT:    sltu a1, a2, a1
; RV32I-NEXT:    xori a1, a1, 1
; RV32I-NEXT:    ret
; RV32I-NEXT:  .LBB23_5:
; RV32I-NEXT:    addi a1, a3, -1
; RV32I-NEXT:    xor a3, a3, a1
; RV32I-NEXT:    sltu a1, a1, a3
; RV32I-NEXT:    xori a1, a1, 1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: ctpop_v2i64_ne_one:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    lw a1, 12(a0)
; RV32ZBB-NEXT:    lw a2, 8(a0)
; RV32ZBB-NEXT:    lw a3, 0(a0)
; RV32ZBB-NEXT:    lw a0, 4(a0)
; RV32ZBB-NEXT:    cpop a1, a1
; RV32ZBB-NEXT:    cpop a2, a2
; RV32ZBB-NEXT:    add a1, a2, a1
; RV32ZBB-NEXT:    cpop a0, a0
; RV32ZBB-NEXT:    cpop a2, a3
; RV32ZBB-NEXT:    add a0, a2, a0
; RV32ZBB-NEXT:    addi a0, a0, -1
; RV32ZBB-NEXT:    snez a0, a0
; RV32ZBB-NEXT:    addi a1, a1, -1
; RV32ZBB-NEXT:    snez a1, a1
; RV32ZBB-NEXT:    ret
  %1 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
  %2 = icmp ne <2 x i64> %1, <i64 1, i64 1>
  ret <2 x i1> %2
}

define i32 @sextb_i32(i32 %a) nounwind {
; RV32I-LABEL: sextb_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 24
; RV32I-NEXT:    srai a0, a0, 24
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: sextb_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    sext.b a0, a0
; RV32ZBB-NEXT:    ret
  %shl = shl i32 %a, 24
  %shr = ashr exact i32 %shl, 24
  ret i32 %shr
}

define i64 @sextb_i64(i64 %a) nounwind {
; RV32I-LABEL: sextb_i64:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a1, a0, 24
; RV32I-NEXT:    srai a0, a1, 24
; RV32I-NEXT:    srai a1, a1, 31
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: sextb_i64:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    sext.b a0, a0
; RV32ZBB-NEXT:    srai a1, a0, 31
; RV32ZBB-NEXT:    ret
  %shl = shl i64 %a, 56
  %shr = ashr exact i64 %shl, 56
  ret i64 %shr
}

define i32 @sexth_i32(i32 %a) nounwind {
; RV32I-LABEL: sexth_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 16
; RV32I-NEXT:    srai a0, a0, 16
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: sexth_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    sext.h a0, a0
; RV32ZBB-NEXT:    ret
  %shl = shl i32 %a, 16
  %shr = ashr exact i32 %shl, 16
  ret i32 %shr
}

define i64 @sexth_i64(i64 %a) nounwind {
; RV32I-LABEL: sexth_i64:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a1, a0, 16
; RV32I-NEXT:    srai a0, a1, 16
; RV32I-NEXT:    srai a1, a1, 31
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: sexth_i64:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    sext.h a0, a0
; RV32ZBB-NEXT:    srai a1, a0, 31
; RV32ZBB-NEXT:    ret
  %shl = shl i64 %a, 48
  %shr = ashr exact i64 %shl, 48
  ret i64 %shr
}

define i32 @min_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: min_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    blt a0, a1, .LBB28_2
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    mv a0, a1
; RV32I-NEXT:  .LBB28_2:
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: min_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    min a0, a0, a1
; RV32ZBB-NEXT:    ret
  %cmp = icmp slt i32 %a, %b
  %cond = select i1 %cmp, i32 %a, i32 %b
  ret i32 %cond
}

; As we are not matching directly i64 code patterns on RV32 some i64 patterns
; don't have yet any matching bit manipulation instructions on RV32.
; This test is presented here in case future expansions of the Bitmanip
; extensions introduce instructions suitable for this pattern.

define i64 @min_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: min_i64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    beq a1, a3, .LBB29_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    slt a4, a1, a3
; CHECK-NEXT:    beqz a4, .LBB29_3
; CHECK-NEXT:    j .LBB29_4
; CHECK-NEXT:  .LBB29_2:
; CHECK-NEXT:    sltu a4, a0, a2
; CHECK-NEXT:    bnez a4, .LBB29_4
; CHECK-NEXT:  .LBB29_3:
; CHECK-NEXT:    mv a0, a2
; CHECK-NEXT:    mv a1, a3
; CHECK-NEXT:  .LBB29_4:
; CHECK-NEXT:    ret
  %cmp = icmp slt i64 %a, %b
  %cond = select i1 %cmp, i64 %a, i64 %b
  ret i64 %cond
}

define i32 @max_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: max_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    blt a1, a0, .LBB30_2
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    mv a0, a1
; RV32I-NEXT:  .LBB30_2:
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: max_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    max a0, a0, a1
; RV32ZBB-NEXT:    ret
  %cmp = icmp sgt i32 %a, %b
  %cond = select i1 %cmp, i32 %a, i32 %b
  ret i32 %cond
}

; As we are not matching directly i64 code patterns on RV32 some i64 patterns
; don't have yet any matching bit manipulation instructions on RV32.
; This test is presented here in case future expansions of the Bitmanip
; extensions introduce instructions suitable for this pattern.

define i64 @max_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: max_i64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    beq a1, a3, .LBB31_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    slt a4, a3, a1
; CHECK-NEXT:    beqz a4, .LBB31_3
; CHECK-NEXT:    j .LBB31_4
; CHECK-NEXT:  .LBB31_2:
; CHECK-NEXT:    sltu a4, a2, a0
; CHECK-NEXT:    bnez a4, .LBB31_4
; CHECK-NEXT:  .LBB31_3:
; CHECK-NEXT:    mv a0, a2
; CHECK-NEXT:    mv a1, a3
; CHECK-NEXT:  .LBB31_4:
; CHECK-NEXT:    ret
  %cmp = icmp sgt i64 %a, %b
  %cond = select i1 %cmp, i64 %a, i64 %b
  ret i64 %cond
}

define i32 @minu_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: minu_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    bltu a0, a1, .LBB32_2
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    mv a0, a1
; RV32I-NEXT:  .LBB32_2:
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: minu_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    minu a0, a0, a1
; RV32ZBB-NEXT:    ret
  %cmp = icmp ult i32 %a, %b
  %cond = select i1 %cmp, i32 %a, i32 %b
  ret i32 %cond
}

; As we are not matching directly i64 code patterns on RV32 some i64 patterns
; don't have yet any matching bit manipulation instructions on RV32.
; This test is presented here in case future expansions of the Bitmanip
; extensions introduce instructions suitable for this pattern.

define i64 @minu_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: minu_i64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    beq a1, a3, .LBB33_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    sltu a4, a1, a3
; CHECK-NEXT:    beqz a4, .LBB33_3
; CHECK-NEXT:    j .LBB33_4
; CHECK-NEXT:  .LBB33_2:
; CHECK-NEXT:    sltu a4, a0, a2
; CHECK-NEXT:    bnez a4, .LBB33_4
; CHECK-NEXT:  .LBB33_3:
; CHECK-NEXT:    mv a0, a2
; CHECK-NEXT:    mv a1, a3
; CHECK-NEXT:  .LBB33_4:
; CHECK-NEXT:    ret
  %cmp = icmp ult i64 %a, %b
  %cond = select i1 %cmp, i64 %a, i64 %b
  ret i64 %cond
}

define i32 @maxu_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: maxu_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    bltu a1, a0, .LBB34_2
; RV32I-NEXT:  # %bb.1:
; RV32I-NEXT:    mv a0, a1
; RV32I-NEXT:  .LBB34_2:
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: maxu_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    maxu a0, a0, a1
; RV32ZBB-NEXT:    ret
  %cmp = icmp ugt i32 %a, %b
  %cond = select i1 %cmp, i32 %a, i32 %b
  ret i32 %cond
}

; As we are not matching directly i64 code patterns on RV32 some i64 patterns
; don't have yet any matching bit manipulation instructions on RV32.
; This test is presented here in case future expansions of the Bitmanip
; extensions introduce instructions suitable for this pattern.

define i64 @maxu_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: maxu_i64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    beq a1, a3, .LBB35_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    sltu a4, a3, a1
; CHECK-NEXT:    beqz a4, .LBB35_3
; CHECK-NEXT:    j .LBB35_4
; CHECK-NEXT:  .LBB35_2:
; CHECK-NEXT:    sltu a4, a2, a0
; CHECK-NEXT:    bnez a4, .LBB35_4
; CHECK-NEXT:  .LBB35_3:
; CHECK-NEXT:    mv a0, a2
; CHECK-NEXT:    mv a1, a3
; CHECK-NEXT:  .LBB35_4:
; CHECK-NEXT:    ret
  %cmp = icmp ugt i64 %a, %b
  %cond = select i1 %cmp, i64 %a, i64 %b
  ret i64 %cond
}

declare i32 @llvm.abs.i32(i32, i1 immarg)

define i32 @abs_i32(i32 %x) {
; RV32I-LABEL: abs_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    srai a1, a0, 31
; RV32I-NEXT:    xor a0, a0, a1
; RV32I-NEXT:    sub a0, a0, a1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: abs_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    neg a1, a0
; RV32ZBB-NEXT:    max a0, a0, a1
; RV32ZBB-NEXT:    ret
  %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
  ret i32 %abs
}

declare i64 @llvm.abs.i64(i64, i1 immarg)

define i64 @abs_i64(i64 %x) {
; CHECK-LABEL: abs_i64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    bgez a1, .LBB37_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    snez a2, a0
; CHECK-NEXT:    neg a0, a0
; CHECK-NEXT:    neg a1, a1
; CHECK-NEXT:    sub a1, a1, a2
; CHECK-NEXT:  .LBB37_2:
; CHECK-NEXT:    ret
  %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
  ret i64 %abs
}

define i32 @zexth_i32(i32 %a) nounwind {
; RV32I-LABEL: zexth_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 16
; RV32I-NEXT:    srli a0, a0, 16
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: zexth_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    zext.h a0, a0
; RV32ZBB-NEXT:    ret
  %and = and i32 %a, 65535
  ret i32 %and
}

define i64 @zexth_i64(i64 %a) nounwind {
; RV32I-LABEL: zexth_i64:
; RV32I:       # %bb.0:
; RV32I-NEXT:    slli a0, a0, 16
; RV32I-NEXT:    srli a0, a0, 16
; RV32I-NEXT:    li a1, 0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: zexth_i64:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    zext.h a0, a0
; RV32ZBB-NEXT:    li a1, 0
; RV32ZBB-NEXT:    ret
  %and = and i64 %a, 65535
  ret i64 %and
}

declare i32 @llvm.bswap.i32(i32)

define i32 @bswap_i32(i32 %a) nounwind {
; RV32I-LABEL: bswap_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    srli a1, a0, 8
; RV32I-NEXT:    lui a2, 16
; RV32I-NEXT:    addi a2, a2, -256
; RV32I-NEXT:    and a1, a1, a2
; RV32I-NEXT:    srli a3, a0, 24
; RV32I-NEXT:    or a1, a1, a3
; RV32I-NEXT:    and a2, a0, a2
; RV32I-NEXT:    slli a2, a2, 8
; RV32I-NEXT:    slli a0, a0, 24
; RV32I-NEXT:    or a0, a0, a2
; RV32I-NEXT:    or a0, a0, a1
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: bswap_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    rev8 a0, a0
; RV32ZBB-NEXT:    ret
  %1 = tail call i32 @llvm.bswap.i32(i32 %a)
  ret i32 %1
}

declare i64 @llvm.bswap.i64(i64)

define i64 @bswap_i64(i64 %a) {
; RV32I-LABEL: bswap_i64:
; RV32I:       # %bb.0:
; RV32I-NEXT:    srli a2, a1, 8
; RV32I-NEXT:    lui a3, 16
; RV32I-NEXT:    addi a3, a3, -256
; RV32I-NEXT:    and a2, a2, a3
; RV32I-NEXT:    srli a4, a1, 24
; RV32I-NEXT:    or a2, a2, a4
; RV32I-NEXT:    and a4, a1, a3
; RV32I-NEXT:    slli a4, a4, 8
; RV32I-NEXT:    slli a1, a1, 24
; RV32I-NEXT:    or a1, a1, a4
; RV32I-NEXT:    or a2, a1, a2
; RV32I-NEXT:    srli a1, a0, 8
; RV32I-NEXT:    and a1, a1, a3
; RV32I-NEXT:    srli a4, a0, 24
; RV32I-NEXT:    or a1, a1, a4
; RV32I-NEXT:    and a3, a0, a3
; RV32I-NEXT:    slli a3, a3, 8
; RV32I-NEXT:    slli a0, a0, 24
; RV32I-NEXT:    or a0, a0, a3
; RV32I-NEXT:    or a1, a0, a1
; RV32I-NEXT:    mv a0, a2
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: bswap_i64:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    rev8 a2, a1
; RV32ZBB-NEXT:    rev8 a1, a0
; RV32ZBB-NEXT:    mv a0, a2
; RV32ZBB-NEXT:    ret
  %1 = call i64 @llvm.bswap.i64(i64 %a)
  ret i64 %1
}

define i16 @orc_b_i16(i16 %a) {
; RV32I-LABEL: orc_b_i16:
; RV32I:       # %bb.0:
; RV32I-NEXT:    andi a0, a0, 257
; RV32I-NEXT:    slli a1, a0, 8
; RV32I-NEXT:    sub a0, a1, a0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: orc_b_i16:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    andi a0, a0, 257
; RV32ZBB-NEXT:    orc.b a0, a0
; RV32ZBB-NEXT:    ret
  %1 = and i16 %a, 257
  %2 = mul nuw i16 %1, 255
  ret i16 %2
}

define i32 @orc_b_i32(i32 %a) {
; RV32I-LABEL: orc_b_i32:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lui a1, 4112
; RV32I-NEXT:    addi a1, a1, 257
; RV32I-NEXT:    and a0, a0, a1
; RV32I-NEXT:    slli a1, a0, 8
; RV32I-NEXT:    sub a0, a1, a0
; RV32I-NEXT:    ret
;
; RV32ZBB-LABEL: orc_b_i32:
; RV32ZBB:       # %bb.0:
; RV32ZBB-NEXT:    lui a1, 4112
; RV32ZBB-NEXT:    addi a1, a1, 257
; RV32ZBB-NEXT:    and a0, a0, a1
; RV32ZBB-NEXT:    orc.b a0, a0
; RV32ZBB-NEXT:    ret
  %1 = and i32 %a, 16843009
  %2 = mul nuw i32 %1, 255
  ret i32 %2
}

define i64 @orc_b_i64(i64 %a) {
; CHECK-LABEL: orc_b_i64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lui a2, 4112
; CHECK-NEXT:    addi a2, a2, 257
; CHECK-NEXT:    and a1, a1, a2
; CHECK-NEXT:    and a0, a0, a2
; CHECK-NEXT:    slli a2, a0, 8
; CHECK-NEXT:    sltu a3, a2, a0
; CHECK-NEXT:    srli a4, a0, 24
; CHECK-NEXT:    slli a5, a1, 8
; CHECK-NEXT:    or a4, a5, a4
; CHECK-NEXT:    sub a1, a4, a1
; CHECK-NEXT:    sub a1, a1, a3
; CHECK-NEXT:    sub a0, a2, a0
; CHECK-NEXT:    ret
  %1 = and i64 %a, 72340172838076673
  %2 = mul nuw i64 %1, 255
  ret i64 %2
}

define i32 @srai_slli(i16 signext %0) {
; CHECK-LABEL: srai_slli:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a0, a0, 25
; CHECK-NEXT:    srai a0, a0, 31
; CHECK-NEXT:    ret
  %2 = shl i16 %0, 9
  %sext = ashr i16 %2, 15
  %3 = sext i16 %sext to i32
  ret i32 %3
}

define i32 @srai_slli2(i16 signext %0) {
; CHECK-LABEL: srai_slli2:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a0, a0, 25
; CHECK-NEXT:    srai a0, a0, 30
; CHECK-NEXT:    ret
  %2 = shl i16 %0, 9
  %sext = ashr i16 %2, 14
  %3 = sext i16 %sext to i32
  ret i32 %3
}