llvm/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fminnum.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=legalizer -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
# RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
...
---
name:            s16_legal_with_full_fp16
alignment:       4
body:             |
  bb.0:
    liveins: $h0, $h1
    ; CHECK-FP16-LABEL: name: s16_legal_with_full_fp16
    ; CHECK-FP16: liveins: $h0, $h1
    ; CHECK-FP16-NEXT: {{  $}}
    ; CHECK-FP16-NEXT: %a:_(s16) = COPY $h0
    ; CHECK-FP16-NEXT: %b:_(s16) = COPY $h1
    ; CHECK-FP16-NEXT: %minnum:_(s16) = G_FMINNUM %a, %b
    ; CHECK-FP16-NEXT: $h0 = COPY %minnum(s16)
    ; CHECK-FP16-NEXT: RET_ReallyLR implicit $h0
    ;
    ; CHECK-NOFP16-LABEL: name: s16_legal_with_full_fp16
    ; CHECK-NOFP16: liveins: $h0, $h1
    ; CHECK-NOFP16-NEXT: {{  $}}
    ; CHECK-NOFP16-NEXT: %a:_(s16) = COPY $h0
    ; CHECK-NOFP16-NEXT: %b:_(s16) = COPY $h1
    ; CHECK-NOFP16-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT %a(s16)
    ; CHECK-NOFP16-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT %b(s16)
    ; CHECK-NOFP16-NEXT: [[FMINNUM:%[0-9]+]]:_(s32) = G_FMINNUM [[FPEXT]], [[FPEXT1]]
    ; CHECK-NOFP16-NEXT: %minnum:_(s16) = G_FPTRUNC [[FMINNUM]](s32)
    ; CHECK-NOFP16-NEXT: $h0 = COPY %minnum(s16)
    ; CHECK-NOFP16-NEXT: RET_ReallyLR implicit $h0
    %a:_(s16) = COPY $h0
    %b:_(s16) = COPY $h1
    %minnum:_(s16) = G_FMINNUM %a, %b
    $h0 = COPY %minnum(s16)
    RET_ReallyLR implicit $h0

...
---
name:            s32_legal
alignment:       4
body:             |
  bb.0:
    liveins: $s0, $s1
    ; CHECK-LABEL: name: s32_legal
    ; CHECK: liveins: $s0, $s1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %a:_(s32) = COPY $s0
    ; CHECK-NEXT: %b:_(s32) = COPY $s1
    ; CHECK-NEXT: %minnum:_(s32) = G_FMINNUM %a, %b
    ; CHECK-NEXT: $s0 = COPY %minnum(s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $s0
    %a:_(s32) = COPY $s0
    %b:_(s32) = COPY $s1
    %minnum:_(s32) = G_FMINNUM %a, %b
    $s0 = COPY %minnum(s32)
    RET_ReallyLR implicit $s0

...
---
name:            s64_legal
alignment:       4
body:             |
  bb.0:
    liveins: $d0, $d1
    ; CHECK-LABEL: name: s64_legal
    ; CHECK: liveins: $d0, $d1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %a:_(s64) = COPY $d0
    ; CHECK-NEXT: %b:_(s64) = COPY $d1
    ; CHECK-NEXT: %minnum:_(s64) = G_FMINNUM %a, %b
    ; CHECK-NEXT: $d0 = COPY %minnum(s64)
    ; CHECK-NEXT: RET_ReallyLR implicit $d0
    %a:_(s64) = COPY $d0
    %b:_(s64) = COPY $d1
    %minnum:_(s64) = G_FMINNUM %a, %b
    $d0 = COPY %minnum(s64)
    RET_ReallyLR implicit $d0

...
---
name:            s128_libcall
alignment:       4
body:             |
  bb.0:
    liveins: $q0, $q1
    ; CHECK-LABEL: name: s128_libcall
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %a:_(s128) = COPY $q0
    ; CHECK-NEXT: %b:_(s128) = COPY $q1
    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
    ; CHECK-NEXT: $q0 = COPY %a(s128)
    ; CHECK-NEXT: $q1 = COPY %b(s128)
    ; CHECK-NEXT: BL &fminl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $q1, implicit-def $q0
    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
    ; CHECK-NEXT: %minnum:_(s128) = COPY $q0
    ; CHECK-NEXT: $q0 = COPY %minnum(s128)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %a:_(s128) = COPY $q0
    %b:_(s128) = COPY $q1
    %minnum:_(s128) = G_FMINNUM %a, %b
    $q0 = COPY %minnum(s128)
    RET_ReallyLR implicit $q0

...
---
name:            v4s32_legal
alignment:       4
body:             |
  bb.0:
    liveins: $q0, $q1
    ; CHECK-LABEL: name: v4s32_legal
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %a:_(<4 x s32>) = COPY $q0
    ; CHECK-NEXT: %b:_(<4 x s32>) = COPY $q1
    ; CHECK-NEXT: %minnum:_(<4 x s32>) = G_FMINNUM %a, %b
    ; CHECK-NEXT: $q0 = COPY %minnum(<4 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %a:_(<4 x s32>) = COPY $q0
    %b:_(<4 x s32>) = COPY $q1
    %minnum:_(<4 x s32>) = G_FMINNUM %a, %b
    $q0 = COPY %minnum(<4 x s32>)
    RET_ReallyLR implicit $q0

...
---
name:            v3s32_widen
alignment:       4
body:             |
  bb.1.entry:
    liveins: $q0, $q1
    ; CHECK-LABEL: name: v3s32_widen
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>)
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
    ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](<2 x s64>)
    ; CHECK-NEXT: [[FMINNUM:%[0-9]+]]:_(<4 x s32>) = G_FMINNUM [[BITCAST]], [[BITCAST1]]
    ; CHECK-NEXT: $q0 = COPY [[FMINNUM]](<4 x s32>)
    ; CHECK-NEXT: RET_ReallyLR implicit $q0
    %2:_(<2 x s64>) = COPY $q0
    %3:_(<4 x s32>) = G_BITCAST %2:_(<2 x s64>)
    %4:_(s32), %5:_(s32), %6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %3:_(<4 x s32>)
    %0:_(<3 x s32>) = G_BUILD_VECTOR %4:_(s32), %5:_(s32), %6:_(s32)
    %8:_(<2 x s64>) = COPY $q1
    %9:_(<4 x s32>) = G_BITCAST %8:_(<2 x s64>)
    %10:_(s32), %11:_(s32), %12:_(s32), %13:_(s32) = G_UNMERGE_VALUES %9:_(<4 x s32>)
    %1:_(<3 x s32>) = G_BUILD_VECTOR %10:_(s32), %11:_(s32), %12:_(s32)
    %14:_(<3 x s32>) = G_FMINNUM %0:_, %1:_
    %15:_(s32), %16:_(s32), %17:_(s32) = G_UNMERGE_VALUES %14:_(<3 x s32>)
    %18:_(s32) = G_IMPLICIT_DEF
    %19:_(<4 x s32>) = G_BUILD_VECTOR %15:_(s32), %16:_(s32), %17:_(s32), %18:_(s32)
    $q0 = COPY %19:_(<4 x s32>)
    RET_ReallyLR implicit $q0

...