llvm/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - | FileCheck %s

# Check we don't try to combine a load of < s8 as that will end up creating a illegal non-extending load.
--- |
  define i8 @test(ptr %ptr) {
    ret i8 undef
  }

...
---
name:            test
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: _ }
  - { id: 1, class: _ }
  - { id: 2, class: _ }
  - { id: 3, class: _ }
body:             |
  bb.1 (%ir-block.0):
    liveins: $x0

    ; CHECK-LABEL: name: test
    ; CHECK: liveins: $x0
    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[COPY]](p0) :: (load (s1) from %ir.ptr)
    ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
    ; CHECK: $w0 = COPY [[ZEXT]](s32)
    ; CHECK: RET_ReallyLR implicit $w0
    %0:_(p0) = COPY $x0
    %1:_(s1) = G_LOAD %0(p0) :: (load (s1) from %ir.ptr)
    %2:_(s8) = G_ZEXT %1(s1)
    %3:_(s32) = G_ANYEXT %2(s8)
    $w0 = COPY %3(s32)
    RET_ReallyLR implicit $w0

...