; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=slp-vectorizer -S -pass-remarks-missed=slp-vectorizer 2>&1 | FileCheck %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-unknown-linux-gnu"
; This test check that slp vectorizer is not trying to vectorize instructions already vectorized.
define void @vector() {
; CHECK-LABEL: @vector(
; CHECK-NEXT: [[LOAD0:%.*]] = tail call <16 x i8> @vector.load(ptr undef, i32 1)
; CHECK-NEXT: [[LOAD1:%.*]] = tail call <16 x i8> @vector.load(ptr undef, i32 2)
; CHECK-NEXT: [[ADD:%.*]] = add <16 x i8> [[LOAD1]], [[LOAD0]]
; CHECK-NEXT: tail call void @vector.store(<16 x i8> [[ADD]], ptr undef, i32 1)
; CHECK-NEXT: ret void
;
%load0 = tail call <16 x i8> @vector.load(ptr undef, i32 1)
%load1 = tail call <16 x i8> @vector.load(ptr undef, i32 2)
%add = add <16 x i8> %load1, %load0
tail call void @vector.store(<16 x i8> %add, ptr undef, i32 1)
ret void
}
declare <16 x i8> @vector.load(ptr, i32)
declare void @vector.store(<16 x i8>, ptr, i32)