# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=CHECK,RV32I
# RUN: llc -mtriple=riscv32 -mattr=+zbb -run-pass=legalizer %s -o - \
# RUN: | FileCheck %s --check-prefixes=CHECK,RV32ZBB
---
name: umax_i8
body: |
bb.0.entry:
; RV32I-LABEL: name: umax_i8
; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; RV32I-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; RV32I-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[AND]](s32), [[AND1]]
; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[COPY]], [[COPY1]]
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; RV32I-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C2]]
; RV32I-NEXT: $x10 = COPY [[AND2]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32ZBB-LABEL: name: umax_i8
; RV32ZBB: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; RV32ZBB-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
; RV32ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; RV32ZBB-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
; RV32ZBB-NEXT: [[UMAX:%[0-9]+]]:_(s32) = G_UMAX [[AND]], [[AND1]]
; RV32ZBB-NEXT: $x10 = COPY [[UMAX]](s32)
; RV32ZBB-NEXT: PseudoRET implicit $x10
%0:_(s32) = COPY $x10
%1:_(s32) = COPY $x11
%2:_(s8) = G_TRUNC %0(s32)
%3:_(s8) = G_TRUNC %1(s32)
%4:_(s8) = G_UMAX %2, %3
%5:_(s32) = G_ZEXT %4(s8)
$x10 = COPY %5(s32)
PseudoRET implicit $x10
...
---
name: umax_i16
body: |
bb.0.entry:
; RV32I-LABEL: name: umax_i16
; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[AND]](s32), [[AND1]]
; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[COPY]], [[COPY1]]
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C2]]
; RV32I-NEXT: $x10 = COPY [[AND2]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32ZBB-LABEL: name: umax_i16
; RV32ZBB: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32ZBB-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
; RV32ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32ZBB-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
; RV32ZBB-NEXT: [[UMAX:%[0-9]+]]:_(s32) = G_UMAX [[AND]], [[AND1]]
; RV32ZBB-NEXT: $x10 = COPY [[UMAX]](s32)
; RV32ZBB-NEXT: PseudoRET implicit $x10
%0:_(s32) = COPY $x10
%1:_(s32) = COPY $x11
%2:_(s16) = G_TRUNC %0(s32)
%3:_(s16) = G_TRUNC %1(s32)
%4:_(s16) = G_UMAX %2, %3
%5:_(s32) = G_ZEXT %4(s16)
$x10 = COPY %5(s32)
PseudoRET implicit $x10
...
---
name: umax_i32
body: |
bb.0.entry:
; RV32I-LABEL: name: umax_i32
; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY]](s32), [[COPY1]]
; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[COPY]], [[COPY1]]
; RV32I-NEXT: $x10 = COPY [[SELECT]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32ZBB-LABEL: name: umax_i32
; RV32ZBB: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32ZBB-NEXT: [[UMAX:%[0-9]+]]:_(s32) = G_UMAX [[COPY]], [[COPY1]]
; RV32ZBB-NEXT: $x10 = COPY [[UMAX]](s32)
; RV32ZBB-NEXT: PseudoRET implicit $x10
%0:_(s32) = COPY $x10
%1:_(s32) = COPY $x11
%2:_(s32) = G_UMAX %0, %1
$x10 = COPY %2(s32)
PseudoRET implicit $x10
...
---
name: umax_i64
body: |
bb.0.entry:
; CHECK-LABEL: name: umax_i64
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY1]](s32), [[COPY3]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY1]](s32), [[COPY3]]
; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY]](s32), [[COPY2]]
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[ICMP2]], [[ICMP]]
; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[SELECT]](s32), [[COPY]], [[COPY2]]
; CHECK-NEXT: $x10 = COPY [[SELECT1]](s32)
; CHECK-NEXT: PseudoRET implicit $x10
%0:_(s32) = COPY $x10
%1:_(s32) = COPY $x11
%2:_(s32) = COPY $x12
%3:_(s32) = COPY $x13
%4:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
%5:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
%6:_(s64) = G_UMAX %4, %5
%7:_(s32) = G_TRUNC %6(s64)
$x10 = COPY %7(s32)
PseudoRET implicit $x10
...