llvm/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+m -global-isel -verify-machineinstrs < %s \
; RUN:   -riscv-enable-copy-propagation=false | FileCheck %s --check-prefix=RV32IM
; RUN: llc -mtriple=riscv64 -mattr=+m -global-isel -verify-machineinstrs < %s \
; RUN:   -riscv-enable-copy-propagation=false | FileCheck %s --check-prefix=RV64IM

; Extends to 32 bits exhaustively tested for add only.

define i8 @add_i8(i8 %a, i8 %b) {
; RV32IM-LABEL: add_i8:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    add a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: add_i8:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    addw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = add i8 %a, %b
  ret i8 %0
}

define i32 @add_i8_signext_i32(i8 %a, i8 %b) {
; RV32IM-LABEL: add_i8_signext_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    slli a0, a0, 24
; RV32IM-NEXT:    srai a0, a0, 24
; RV32IM-NEXT:    slli a1, a1, 24
; RV32IM-NEXT:    srai a1, a1, 24
; RV32IM-NEXT:    add a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: add_i8_signext_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    slli a0, a0, 24
; RV64IM-NEXT:    sraiw a0, a0, 24
; RV64IM-NEXT:    slli a1, a1, 24
; RV64IM-NEXT:    sraiw a1, a1, 24
; RV64IM-NEXT:    addw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = sext i8 %a to i32
  %1 = sext i8 %b to i32
  %2 = add i32 %0, %1
  ret i32 %2
}

define i32 @add_i8_zeroext_i32(i8 %a, i8 %b) {
; RV32IM-LABEL: add_i8_zeroext_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    andi a0, a0, 255
; RV32IM-NEXT:    andi a1, a1, 255
; RV32IM-NEXT:    add a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: add_i8_zeroext_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    andi a0, a0, 255
; RV64IM-NEXT:    andi a1, a1, 255
; RV64IM-NEXT:    addw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = zext i8 %a to i32
  %1 = zext i8 %b to i32
  %2 = add i32 %0, %1
  ret i32 %2
}

; TODO: Handle G_IMPLICIT_DEF, which is needed to have i8 -> i64 extends working
; on RV32.

define i32 @add_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: add_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    add a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: add_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    addw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = add i32 %a, %b
  ret i32 %0
}

define i32 @addi_i32(i32 %a) {
; RV32IM-LABEL: addi_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    addi a0, a0, 1234
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: addi_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    addiw a0, a0, 1234
; RV64IM-NEXT:    ret
entry:
  %0 = add i32 %a, 1234
  ret i32 %0
}

define i32 @sub_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: sub_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    sub a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: sub_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    subw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = sub i32 %a, %b
  ret i32 %0
}

define i32 @subi_i32(i32 %a) {
; RV32IM-LABEL: subi_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    addi a0, a0, -1234
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: subi_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    addiw a0, a0, -1234
; RV64IM-NEXT:    ret
entry:
  %0 = sub i32 %a, 1234
  ret i32 %0
}

define i32 @neg_i32(i32 %a) {
; RV32IM-LABEL: neg_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    neg a0, a0
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: neg_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    negw a0, a0
; RV64IM-NEXT:    ret
entry:
  %0 = sub i32 0, %a
  ret i32 %0
}

define i32 @sll_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: sll_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    sll a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: sll_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    sllw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = shl i32 %a, %b
  ret i32 %0
}

define i32 @slli_i32(i32 %a) {
; RV32IM-LABEL: slli_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    slli a0, a0, 11
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: slli_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    slliw a0, a0, 11
; RV64IM-NEXT:    ret
entry:
  %0 = shl i32 %a, 11
  ret i32 %0
}

define i32 @sra_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: sra_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    sra a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: sra_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    sraw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = ashr i32 %a, %b
  ret i32 %0
}

define i32 @srai_i32(i32 %a) {
; RV32IM-LABEL: srai_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    srai a0, a0, 17
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: srai_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    sraiw a0, a0, 17
; RV64IM-NEXT:    ret
entry:
  %0 = ashr i32 %a, 17
  ret i32 %0
}

define i32 @srl_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: srl_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    srl a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: srl_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    srlw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = lshr i32 %a, %b
  ret i32 %0
}

define i32 @srli_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: srli_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    srli a0, a0, 23
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: srli_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    srliw a0, a0, 23
; RV64IM-NEXT:    ret
entry:
  %0 = lshr i32 %a, 23
  ret i32 %0
}

define i32 @and_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: and_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    and a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: and_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    and a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = and i32 %a, %b
  ret i32 %0
}

define i32 @andi_i32(i32 %a) {
; RV32IM-LABEL: andi_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    andi a0, a0, 1234
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: andi_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    andi a0, a0, 1234
; RV64IM-NEXT:    ret
entry:
  %0 = and i32 %a, 1234
  ret i32 %0
}

define i32 @or_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: or_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    or a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: or_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    or a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = or i32 %a, %b
  ret i32 %0
}

define i32 @ori_i32(i32 %a) {
; RV32IM-LABEL: ori_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    ori a0, a0, 1234
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: ori_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    ori a0, a0, 1234
; RV64IM-NEXT:    ret
entry:
  %0 = or i32 %a, 1234
  ret i32 %0
}

define i32 @xor_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: xor_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    xor a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: xor_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    xor a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = xor i32 %a, %b
  ret i32 %0
}

define i32 @xori_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: xori_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    xori a0, a0, 1234
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: xori_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    xori a0, a0, 1234
; RV64IM-NEXT:    ret
entry:
  %0 = xor i32 %a, 1234
  ret i32 %0
}

define i32 @mul_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: mul_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    mul a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: mul_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    mulw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = mul i32 %a, %b
  ret i32 %0
}

define i32 @sdiv_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: sdiv_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    div a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: sdiv_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    divw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = sdiv i32 %a, %b
  ret i32 %0
}

define i32 @srem_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: srem_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    rem a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: srem_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    remw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = srem i32 %a, %b
  ret i32 %0
}

define i32 @udiv_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: udiv_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    divu a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: udiv_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    divuw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = udiv i32 %a, %b
  ret i32 %0
}

define i32 @urem_i32(i32 %a, i32 %b) {
; RV32IM-LABEL: urem_i32:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    remu a0, a0, a1
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: urem_i32:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    remuw a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = urem i32 %a, %b
  ret i32 %0
}

define i64 @add_i64(i64 %a, i64 %b) {
; RV32IM-LABEL: add_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    add a0, a0, a2
; RV32IM-NEXT:    sltu a2, a0, a2
; RV32IM-NEXT:    add a1, a1, a3
; RV32IM-NEXT:    add a1, a1, a2
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: add_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    add a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = add i64 %a, %b
  ret i64 %0
}

define i64 @addi_i64(i64 %a) {
; RV32IM-LABEL: addi_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    addi a0, a0, 1234
; RV32IM-NEXT:    sltiu a2, a0, 1234
; RV32IM-NEXT:    add a1, a1, a2
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: addi_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    addi a0, a0, 1234
; RV64IM-NEXT:    ret
entry:
  %0 = add i64 %a, 1234
  ret i64 %0
}

define i64 @sub_i64(i64 %a, i64 %b) {
; RV32IM-LABEL: sub_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    sub a4, a0, a2
; RV32IM-NEXT:    sltu a0, a0, a2
; RV32IM-NEXT:    sub a1, a1, a3
; RV32IM-NEXT:    sub a1, a1, a0
; RV32IM-NEXT:    mv a0, a4
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: sub_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    sub a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = sub i64 %a, %b
  ret i64 %0
}

define i64 @subi_i64(i64 %a) {
; RV32IM-LABEL: subi_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    lui a2, 301
; RV32IM-NEXT:    addi a3, a2, 1548
; RV32IM-NEXT:    sub a2, a0, a3
; RV32IM-NEXT:    sltu a0, a0, a3
; RV32IM-NEXT:    sub a1, a1, a0
; RV32IM-NEXT:    mv a0, a2
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: subi_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    lui a1, 301
; RV64IM-NEXT:    addiw a1, a1, 1548
; RV64IM-NEXT:    sub a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = sub i64 %a, 1234444
  ret i64 %0
}

define i64 @neg_i64(i64 %a) {
; RV32IM-LABEL: neg_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    neg a2, a0
; RV32IM-NEXT:    snez a0, a0
; RV32IM-NEXT:    neg a1, a1
; RV32IM-NEXT:    sub a1, a1, a0
; RV32IM-NEXT:    mv a0, a2
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: neg_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    neg a0, a0
; RV64IM-NEXT:    ret
entry:
  %0 = sub i64 0, %a
  ret i64 %0
}

; TODO: Handle G_SELECT, which is needed to have i64 shifts working on RV32.

define i64 @and_i64(i64 %a, i64 %b) {
; RV32IM-LABEL: and_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    and a0, a0, a2
; RV32IM-NEXT:    and a1, a1, a3
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: and_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    and a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = and i64 %a, %b
  ret i64 %0
}

define i64 @andi_i64(i64 %a) {
; RV32IM-LABEL: andi_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    andi a0, a0, 1234
; RV32IM-NEXT:    li a1, 0
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: andi_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    andi a0, a0, 1234
; RV64IM-NEXT:    ret
entry:
  %0 = and i64 %a, 1234
  ret i64 %0
}

define i64 @or_i64(i64 %a, i64 %b) {
; RV32IM-LABEL: or_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    or a0, a0, a2
; RV32IM-NEXT:    or a1, a1, a3
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: or_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    or a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = or i64 %a, %b
  ret i64 %0
}

define i64 @ori_i64(i64 %a) {
; RV32IM-LABEL: ori_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    ori a0, a0, 1234
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: ori_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    ori a0, a0, 1234
; RV64IM-NEXT:    ret
entry:
  %0 = or i64 %a, 1234
  ret i64 %0
}

define i64 @xor_i64(i64 %a, i64 %b) {
; RV32IM-LABEL: xor_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    xor a0, a0, a2
; RV32IM-NEXT:    xor a1, a1, a3
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: xor_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    xor a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = xor i64 %a, %b
  ret i64 %0
}

define i64 @xori_i64(i64 %a) {
; RV32IM-LABEL: xori_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    xori a0, a0, 1234
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: xori_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    xori a0, a0, 1234
; RV64IM-NEXT:    ret
entry:
  %0 = xor i64 %a, 1234
  ret i64 %0
}

define i64 @mul_i64(i64 %a, i64 %b) {
; RV32IM-LABEL: mul_i64:
; RV32IM:       # %bb.0: # %entry
; RV32IM-NEXT:    mul a4, a0, a2
; RV32IM-NEXT:    mul a1, a1, a2
; RV32IM-NEXT:    mul a3, a0, a3
; RV32IM-NEXT:    mulhu a0, a0, a2
; RV32IM-NEXT:    add a1, a1, a3
; RV32IM-NEXT:    add a1, a1, a0
; RV32IM-NEXT:    mv a0, a4
; RV32IM-NEXT:    ret
;
; RV64IM-LABEL: mul_i64:
; RV64IM:       # %bb.0: # %entry
; RV64IM-NEXT:    mul a0, a0, a1
; RV64IM-NEXT:    ret
entry:
  %0 = mul i64 %a, %b
  ret i64 %0
}

; TODO: Handle G_SDIV, G_SREM, G_UDIV, G_UREM for i64 on RV32. Likely will be
; dispatched to a libcall?