llvm/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-fadd.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -mtriple=aarch64 -run-pass=legalizer -global-isel-abort=1 %s -o - | FileCheck %s

---
name:            fadd_v2s32
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $d0

    ; CHECK-LABEL: name: fadd_v2s32
    ; CHECK: liveins: $d0
    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
    ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:_(s32) = G_VECREDUCE_FADD [[COPY]](<2 x s32>)
    ; CHECK: $w0 = COPY [[VECREDUCE_FADD]](s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %0:_(<2 x s32>) = COPY $d0
    %1:_(s32) = G_VECREDUCE_FADD %0(<2 x s32>)
    $w0 = COPY %1(s32)
    RET_ReallyLR implicit $w0

...
---
name:            fadd_v2s64
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $q0

    ; CHECK-LABEL: name: fadd_v2s64
    ; CHECK: liveins: $q0
    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
    ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:_(s64) = G_VECREDUCE_FADD [[COPY]](<2 x s64>)
    ; CHECK: $x0 = COPY [[VECREDUCE_FADD]](s64)
    ; CHECK: RET_ReallyLR implicit $x0
    %0:_(<2 x s64>) = COPY $q0
    %2:_(s64) = G_VECREDUCE_FADD %0(<2 x s64>)
    $x0 = COPY %2(s64)
    RET_ReallyLR implicit $x0

...
---
name:            fadd_v8s64
alignment:       4
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $q0, $q1, $q2, $q3
    ; This is a power-of-2 legalization, so use a tree reduction.
    ; CHECK-LABEL: name: fadd_v8s64
    ; CHECK: liveins: $q0, $q1, $q2, $q3
    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
    ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
    ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
    ; CHECK: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY]], [[COPY1]]
    ; CHECK: [[FADD1:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY2]], [[COPY3]]
    ; CHECK: [[FADD2:%[0-9]+]]:_(<2 x s64>) = G_FADD [[FADD]], [[FADD1]]
    ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:_(s64) = G_VECREDUCE_FADD [[FADD2]](<2 x s64>)
    ; CHECK: $x0 = COPY [[VECREDUCE_FADD]](s64)
    ; CHECK: RET_ReallyLR implicit $x0
    %0:_(<2 x s64>) = COPY $q0
    %1:_(<2 x s64>) = COPY $q1
    %2:_(<2 x s64>) = COPY $q2
    %3:_(<2 x s64>) = COPY $q3
    %4:_(<4 x s64>) = G_CONCAT_VECTORS %0(<2 x s64>), %1(<2 x s64>)
    %5:_(<4 x s64>) = G_CONCAT_VECTORS %2(<2 x s64>), %3(<2 x s64>)
    %6:_(<8 x s64>) = G_CONCAT_VECTORS %4(<4 x s64>), %5(<4 x s64>)
    %7:_(s64) = G_VECREDUCE_FADD %6(<8 x s64>)
    $x0 = COPY %7(s64)
    RET_ReallyLR implicit $x0

...