# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN:llc %s -verify-machineinstrs -mtriple=aarch64-unknown-unknown -run-pass=legalizer -mattr=-fullfp16 -o - | FileCheck %s --check-prefix=NO-FP16
# RUN:llc %s -verify-machineinstrs -mtriple=aarch64-unknown-unknown -run-pass=legalizer -mattr=+fullfp16 -o - | FileCheck %s --check-prefix=FP16
...
---
name: test_f16.roundeven
alignment: 4
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $h0
; NO-FP16-LABEL: name: test_f16.roundeven
; NO-FP16: liveins: $h0
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
; NO-FP16-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[COPY]](s16)
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(s32) = G_INTRINSIC_ROUNDEVEN [[FPEXT]]
; NO-FP16-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INTRINSIC_ROUNDEVEN]](s32)
; NO-FP16-NEXT: $h0 = COPY [[FPTRUNC]](s16)
; NO-FP16-NEXT: RET_ReallyLR implicit $h0
;
; FP16-LABEL: name: test_f16.roundeven
; FP16: liveins: $h0
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(s16) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; FP16-NEXT: $h0 = COPY [[INTRINSIC_ROUNDEVEN]](s16)
; FP16-NEXT: RET_ReallyLR implicit $h0
%0:_(s16) = COPY $h0
%1:_(s16) = G_INTRINSIC_ROUNDEVEN %0
$h0 = COPY %1(s16)
RET_ReallyLR implicit $h0
...
---
name: test_f32.roundeven
alignment: 4
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $s0
; NO-FP16-LABEL: name: test_f32.roundeven
; NO-FP16: liveins: $s0
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $s0
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(s32) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; NO-FP16-NEXT: $s0 = COPY [[INTRINSIC_ROUNDEVEN]](s32)
; NO-FP16-NEXT: RET_ReallyLR implicit $s0
;
; FP16-LABEL: name: test_f32.roundeven
; FP16: liveins: $s0
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $s0
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(s32) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; FP16-NEXT: $s0 = COPY [[INTRINSIC_ROUNDEVEN]](s32)
; FP16-NEXT: RET_ReallyLR implicit $s0
%0:_(s32) = COPY $s0
%1:_(s32) = G_INTRINSIC_ROUNDEVEN %0
$s0 = COPY %1(s32)
RET_ReallyLR implicit $s0
...
---
name: test_f64.roundeven
alignment: 4
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $d0
; NO-FP16-LABEL: name: test_f64.roundeven
; NO-FP16: liveins: $d0
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(s64) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; NO-FP16-NEXT: $d0 = COPY [[INTRINSIC_ROUNDEVEN]](s64)
; NO-FP16-NEXT: RET_ReallyLR implicit $d0
;
; FP16-LABEL: name: test_f64.roundeven
; FP16: liveins: $d0
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(s64) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; FP16-NEXT: $d0 = COPY [[INTRINSIC_ROUNDEVEN]](s64)
; FP16-NEXT: RET_ReallyLR implicit $d0
%0:_(s64) = COPY $d0
%1:_(s64) = G_INTRINSIC_ROUNDEVEN %0
$d0 = COPY %1(s64)
RET_ReallyLR implicit $d0
...
---
name: test_v8f16.roundeven
alignment: 4
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $q0
; NO-FP16-LABEL: name: test_v8f16.roundeven
; NO-FP16: liveins: $q0
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
; NO-FP16-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
; NO-FP16-NEXT: [[FPEXT:%[0-9]+]]:_(<4 x s32>) = G_FPEXT [[UV]](<4 x s16>)
; NO-FP16-NEXT: [[FPEXT1:%[0-9]+]]:_(<4 x s32>) = G_FPEXT [[UV1]](<4 x s16>)
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_ROUNDEVEN [[FPEXT]]
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN1:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_ROUNDEVEN [[FPEXT1]]
; NO-FP16-NEXT: [[FPTRUNC:%[0-9]+]]:_(<4 x s16>) = G_FPTRUNC [[INTRINSIC_ROUNDEVEN]](<4 x s32>)
; NO-FP16-NEXT: [[FPTRUNC1:%[0-9]+]]:_(<4 x s16>) = G_FPTRUNC [[INTRINSIC_ROUNDEVEN1]](<4 x s32>)
; NO-FP16-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[FPTRUNC]](<4 x s16>), [[FPTRUNC1]](<4 x s16>)
; NO-FP16-NEXT: $q0 = COPY [[CONCAT_VECTORS]](<8 x s16>)
; NO-FP16-NEXT: RET_ReallyLR implicit $q0
;
; FP16-LABEL: name: test_v8f16.roundeven
; FP16: liveins: $q0
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<8 x s16>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; FP16-NEXT: $q0 = COPY [[INTRINSIC_ROUNDEVEN]](<8 x s16>)
; FP16-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%1:_(<8 x s16>) = G_INTRINSIC_ROUNDEVEN %0
$q0 = COPY %1(<8 x s16>)
RET_ReallyLR implicit $q0
...
---
name: test_v4f16.roundeven
alignment: 4
tracksRegLiveness: true
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
machineFunctionInfo: {}
body: |
bb.0:
liveins: $d0
; NO-FP16-LABEL: name: test_v4f16.roundeven
; NO-FP16: liveins: $d0
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
; NO-FP16-NEXT: [[FPEXT:%[0-9]+]]:_(<4 x s32>) = G_FPEXT [[COPY]](<4 x s16>)
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_ROUNDEVEN [[FPEXT]]
; NO-FP16-NEXT: [[FPTRUNC:%[0-9]+]]:_(<4 x s16>) = G_FPTRUNC [[INTRINSIC_ROUNDEVEN]](<4 x s32>)
; NO-FP16-NEXT: $d0 = COPY [[FPTRUNC]](<4 x s16>)
; NO-FP16-NEXT: RET_ReallyLR implicit $d0
;
; FP16-LABEL: name: test_v4f16.roundeven
; FP16: liveins: $d0
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<4 x s16>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; FP16-NEXT: $d0 = COPY [[INTRINSIC_ROUNDEVEN]](<4 x s16>)
; FP16-NEXT: RET_ReallyLR implicit $d0
%0:_(<4 x s16>) = COPY $d0
%1:_(<4 x s16>) = G_INTRINSIC_ROUNDEVEN %0
$d0 = COPY %1(<4 x s16>)
RET_ReallyLR implicit $d0
...
---
name: test_v2f32.roundeven
alignment: 4
tracksRegLiveness: true
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
machineFunctionInfo: {}
body: |
bb.0:
liveins: $d0
; NO-FP16-LABEL: name: test_v2f32.roundeven
; NO-FP16: liveins: $d0
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; NO-FP16-NEXT: $d0 = COPY [[INTRINSIC_ROUNDEVEN]](<2 x s32>)
; NO-FP16-NEXT: RET_ReallyLR implicit $d0
;
; FP16-LABEL: name: test_v2f32.roundeven
; FP16: liveins: $d0
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; FP16-NEXT: $d0 = COPY [[INTRINSIC_ROUNDEVEN]](<2 x s32>)
; FP16-NEXT: RET_ReallyLR implicit $d0
%0:_(<2 x s32>) = COPY $d0
%1:_(<2 x s32>) = G_INTRINSIC_ROUNDEVEN %0
$d0 = COPY %1(<2 x s32>)
RET_ReallyLR implicit $d0
...
---
name: test_v4f32.roundeven
alignment: 4
tracksRegLiveness: true
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
machineFunctionInfo: {}
body: |
bb.0:
liveins: $q0
; NO-FP16-LABEL: name: test_v4f32.roundeven
; NO-FP16: liveins: $q0
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; NO-FP16-NEXT: $q0 = COPY [[INTRINSIC_ROUNDEVEN]](<4 x s32>)
; NO-FP16-NEXT: RET_ReallyLR implicit $q0
;
; FP16-LABEL: name: test_v4f32.roundeven
; FP16: liveins: $q0
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; FP16-NEXT: $q0 = COPY [[INTRINSIC_ROUNDEVEN]](<4 x s32>)
; FP16-NEXT: RET_ReallyLR implicit $q0
%0:_(<4 x s32>) = COPY $q0
%1:_(<4 x s32>) = G_INTRINSIC_ROUNDEVEN %0
$q0 = COPY %1(<4 x s32>)
RET_ReallyLR implicit $q0
...
---
name: test_v2f64.roundeven
alignment: 4
tracksRegLiveness: true
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
machineFunctionInfo: {}
body: |
bb.0:
liveins: $q0
; NO-FP16-LABEL: name: test_v2f64.roundeven
; NO-FP16: liveins: $q0
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<2 x s64>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; NO-FP16-NEXT: $q0 = COPY [[INTRINSIC_ROUNDEVEN]](<2 x s64>)
; NO-FP16-NEXT: RET_ReallyLR implicit $q0
;
; FP16-LABEL: name: test_v2f64.roundeven
; FP16: liveins: $q0
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<2 x s64>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; FP16-NEXT: $q0 = COPY [[INTRINSIC_ROUNDEVEN]](<2 x s64>)
; FP16-NEXT: RET_ReallyLR implicit $q0
%0:_(<2 x s64>) = COPY $q0
%1:_(<2 x s64>) = G_INTRINSIC_ROUNDEVEN %0
$q0 = COPY %1(<2 x s64>)
RET_ReallyLR implicit $q0
...
---
name: test_v4f64.roundeven
alignment: 4
tracksRegLiveness: true
body: |
bb.0:
liveins: $q0, $q1
; NO-FP16-LABEL: name: test_v4f64.roundeven
; NO-FP16: liveins: $q0, $q1
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; NO-FP16-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<2 x s64>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN1:%[0-9]+]]:_(<2 x s64>) = G_INTRINSIC_ROUNDEVEN [[COPY1]]
; NO-FP16-NEXT: $q0 = COPY [[INTRINSIC_ROUNDEVEN]](<2 x s64>)
; NO-FP16-NEXT: $q1 = COPY [[INTRINSIC_ROUNDEVEN1]](<2 x s64>)
; NO-FP16-NEXT: RET_ReallyLR implicit $q0
;
; FP16-LABEL: name: test_v4f64.roundeven
; FP16: liveins: $q0, $q1
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; FP16-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<2 x s64>) = G_INTRINSIC_ROUNDEVEN [[COPY]]
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN1:%[0-9]+]]:_(<2 x s64>) = G_INTRINSIC_ROUNDEVEN [[COPY1]]
; FP16-NEXT: $q0 = COPY [[INTRINSIC_ROUNDEVEN]](<2 x s64>)
; FP16-NEXT: $q1 = COPY [[INTRINSIC_ROUNDEVEN1]](<2 x s64>)
; FP16-NEXT: RET_ReallyLR implicit $q0
%0:_(<2 x s64>) = COPY $q0
%1:_(<2 x s64>) = COPY $q1
%2:_(<4 x s64>) = G_CONCAT_VECTORS %0, %1
%3:_(<4 x s64>) = G_INTRINSIC_ROUNDEVEN %2
%4:_(<2 x s64>), %5:_(<2 x s64>) = G_UNMERGE_VALUES %3
$q0 = COPY %4(<2 x s64>)
$q1 = COPY %5(<2 x s64>)
RET_ReallyLR implicit $q0
...
---
name: test_v2f16.roundeven
alignment: 4
tracksRegLiveness: true
body: |
bb.0:
liveins: $s0, $s1
; NO-FP16-LABEL: name: test_v2f16.roundeven
; NO-FP16: liveins: $s0, $s1
; NO-FP16-NEXT: {{ $}}
; NO-FP16-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $s0
; NO-FP16-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
; NO-FP16-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
; NO-FP16-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[DEF]](s16), [[DEF]](s16)
; NO-FP16-NEXT: [[FPEXT:%[0-9]+]]:_(<4 x s32>) = G_FPEXT [[BUILD_VECTOR]](<4 x s16>)
; NO-FP16-NEXT: [[UV2:%[0-9]+]]:_(<2 x s32>), [[UV3:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[FPEXT]](<4 x s32>)
; NO-FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_ROUNDEVEN [[UV2]]
; NO-FP16-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INTRINSIC_ROUNDEVEN]](<2 x s32>)
; NO-FP16-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; NO-FP16-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[DEF1]](s32), [[DEF1]](s32)
; NO-FP16-NEXT: [[FPTRUNC:%[0-9]+]]:_(<4 x s16>) = G_FPTRUNC [[BUILD_VECTOR1]](<4 x s32>)
; NO-FP16-NEXT: [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FPTRUNC]](<4 x s16>)
; NO-FP16-NEXT: $s0 = COPY [[UV6]](<2 x s16>)
; NO-FP16-NEXT: RET_ReallyLR implicit $s0
;
; FP16-LABEL: name: test_v2f16.roundeven
; FP16: liveins: $s0, $s1
; FP16-NEXT: {{ $}}
; FP16-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $s0
; FP16-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
; FP16-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
; FP16-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[DEF]](s16), [[DEF]](s16)
; FP16-NEXT: [[INTRINSIC_ROUNDEVEN:%[0-9]+]]:_(<4 x s16>) = G_INTRINSIC_ROUNDEVEN [[BUILD_VECTOR]]
; FP16-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INTRINSIC_ROUNDEVEN]](<4 x s16>)
; FP16-NEXT: $s0 = COPY [[UV2]](<2 x s16>)
; FP16-NEXT: RET_ReallyLR implicit $s0
%0:_(<2 x s16>) = COPY $s0
%1:_(<2 x s16>) = G_INTRINSIC_ROUNDEVEN %0
$s0 = COPY %1(<2 x s16>)
RET_ReallyLR implicit $s0