llvm/llvm/test/Transforms/SLPVectorizer/AArch64/mul.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=slp-vectorizer -S | FileCheck %s

target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-gnu"

; These examples correspond to input code like:
;
;   void t(long * __restrict a, long * __restrict b) {
;     aptr= b[0];
;     aptr= b[1];
;   }
;
; If we SLP vectorise this then we end up with something like this because we
; don't have a mul.2d:
;
;        ldr     q0, [x1]
;        ldr     q1, [x0]
;        fmov    x8, d0
;        mov     x10, v0.d[1]
;        fmov    x9, d1
;        mov     x11, v1.d[1]
;        mul     x8, x9, x8
;        mul     x9, x11, x10
;        fmov    d0, x8
;        mov     v0.d[1], x9
;        str     q0, [x0]
;        ret
;
; If we don't SLP vectorise but scalarize this we get this instead:
;
;        ldp     x8, x9, [x1]
;        ldp     x10, x11, [x0]
;        mul     x9, x11, x9
;        mul     x8, x10, x8
;        stp     x8, x9, [x0]
;        ret
;
define void @mul(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
; CHECK-LABEL: @mul(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[B:%.*]], align 8
; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr [[A:%.*]], align 8
; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP1]], [[TMP0]]
; CHECK-NEXT:    store i64 [[MUL]], ptr [[A]], align 8
; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 1
; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8
; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 1
; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT:    [[MUL4:%.*]] = mul nsw i64 [[TMP3]], [[TMP2]]
; CHECK-NEXT:    store i64 [[MUL4]], ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT:    ret void
;
entry:
  %0 = load i64, ptr %b, align 8
  %1 = load i64, ptr %a, align 8
  %mul = mul nsw i64 %1, %0
  store i64 %mul, ptr %a, align 8
  %arrayidx2 = getelementptr inbounds i64, ptr %b, i64 1
  %2 = load i64, ptr %arrayidx2, align 8
  %arrayidx3 = getelementptr inbounds i64, ptr %a, i64 1
  %3 = load i64, ptr %arrayidx3, align 8
  %mul4 = mul nsw i64 %3, %2
  store i64 %mul4, ptr %arrayidx3, align 8
  ret void
}

; Similar example, but now a multiply-accumulate:
;
;  void x (long * __restrict a, long * __restrict b) {
;    aptr= b[0];
;    aptr= b[1];
;    a[0] += b[0];
;    a[1] += b[1];
;  }
;
define void @mac(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
; CHECK-LABEL: @mac(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[B:%.*]], align 8
; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr [[A:%.*]], align 8
; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP1]], [[TMP0]]
; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 1
; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8
; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 1
; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT:    [[MUL4:%.*]] = mul nsw i64 [[TMP3]], [[TMP2]]
; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[MUL]], [[TMP0]]
; CHECK-NEXT:    store i64 [[ADD]], ptr [[A]], align 8
; CHECK-NEXT:    [[ADD9:%.*]] = add nsw i64 [[MUL4]], [[TMP2]]
; CHECK-NEXT:    store i64 [[ADD9]], ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT:    ret void
;
entry:
  %0 = load i64, ptr %b, align 8
  %1 = load i64, ptr %a, align 8
  %mul = mul nsw i64 %1, %0
  %arrayidx2 = getelementptr inbounds i64, ptr %b, i64 1
  %2 = load i64, ptr %arrayidx2, align 8
  %arrayidx3 = getelementptr inbounds i64, ptr %a, i64 1
  %3 = load i64, ptr %arrayidx3, align 8
  %mul4 = mul nsw i64 %3, %2
  %add = add nsw i64 %mul, %0
  store i64 %add, ptr %a, align 8
  %add9 = add nsw i64 %mul4, %2
  store i64 %add9, ptr %arrayidx3, align 8
  ret void
}