; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
; Ensure that a no-op 'and' after an extending load gets removed when the and is
; constructed via a splat_vector node.
define <vscale x 2 x i64> @fold_loadext_and(ptr %ptr, i32 %needle, <vscale x 2 x i64> %b) #0 {
; CHECK-LABEL: fold_loadext_and:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = load <vscale x 2 x i32>, ptr %ptr, align 4
%ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
%and = and <vscale x 2 x i64> %ext, splat(i64 4294967295)
ret <vscale x 2 x i64> %and
}
; Same as above but testing the case we care about. Here the vscale x 2 x i32
; types get legalized into vscale x 2 x i64 types which introduces the extending
; load and 'and' nodes similar to the above case.
define <vscale x 2 x i1> @fold_loadext_and_legalize(ptr %ptr, <vscale x 2 x i32> %a) #0 {
; CHECK-LABEL: fold_loadext_and_legalize:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: ld1w { z1.d }, p0/z, [x0]
; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z0.d
; CHECK-NEXT: ret
%load = load <vscale x 2 x i32>, ptr %ptr
%cmp = icmp eq <vscale x 2 x i32> %load, %a
ret <vscale x 2 x i1> %cmp
}
attributes #0 = { "target-features"="+sve" }