llvm/llvm/test/Transforms/SLPVectorizer/X86/shuffled-gathers-diff-size.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux -slp-threshold=-2 | FileCheck %s

define void @foo(ptr noalias nocapture writeonly %B, ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %C, i32 %n, i32 %m) {
; CHECK-LABEL: @foo(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A:%.*]], align 4
; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP0]], [[N:%.*]]
; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[C:%.*]], align 4
; CHECK-NEXT:    [[MUL2:%.*]] = mul nsw i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[MUL2]], [[MUL]]
; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 1
; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4
; CHECK-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[ADD]], [[TMP2]]
; CHECK-NEXT:    store i32 [[MUL4]], ptr [[B:%.*]], align 4
; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 1
; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX8]], align 4
; CHECK-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[TMP3]], [[M]]
; CHECK-NEXT:    [[ADD10:%.*]] = add nsw i32 [[MUL9]], [[MUL]]
; CHECK-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 2
; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4
; CHECK-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[ADD10]], [[TMP4]]
; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 1
; CHECK-NEXT:    store i32 [[MUL12]], ptr [[ARRAYIDX13]], align 4
; CHECK-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[TMP2]], [[N]]
; CHECK-NEXT:    [[MUL17:%.*]] = mul nsw i32 [[TMP4]], [[M]]
; CHECK-NEXT:    [[ADD18:%.*]] = add nsw i32 [[MUL17]], [[MUL15]]
; CHECK-NEXT:    [[MUL20:%.*]] = mul nsw i32 [[ADD18]], [[TMP0]]
; CHECK-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 2
; CHECK-NEXT:    store i32 [[MUL20]], ptr [[ARRAYIDX21]], align 4
; CHECK-NEXT:    [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 3
; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX24]], align 4
; CHECK-NEXT:    [[MUL25:%.*]] = mul nsw i32 [[TMP5]], [[M]]
; CHECK-NEXT:    [[ADD26:%.*]] = add nsw i32 [[MUL25]], [[MUL15]]
; CHECK-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[ADD26]], [[TMP1]]
; CHECK-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 3
; CHECK-NEXT:    store i32 [[MUL28]], ptr [[ARRAYIDX29]], align 4
; CHECK-NEXT:    ret void
;
entry:
  %0 = load i32, ptr %A, align 4
  %mul = mul nsw i32 %0, %n
  %1 = load i32, ptr %C, align 4
  %mul2 = mul nsw i32 %1, %m
  %add = add nsw i32 %mul2, %mul
  %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 1
  %2 = load i32, ptr %arrayidx3, align 4
  %mul4 = mul nsw i32 %add, %2
  store i32 %mul4, ptr %B, align 4
  %arrayidx8 = getelementptr inbounds i32, ptr %C, i64 1
  %3 = load i32, ptr %arrayidx8, align 4
  %mul9 = mul nsw i32 %3, %m
  %add10 = add nsw i32 %mul9, %mul
  %arrayidx11 = getelementptr inbounds i32, ptr %C, i64 2
  %4 = load i32, ptr %arrayidx11, align 4
  %mul12 = mul nsw i32 %add10, %4
  %arrayidx13 = getelementptr inbounds i32, ptr %B, i64 1
  store i32 %mul12, ptr %arrayidx13, align 4
  %mul15 = mul nsw i32 %2, %n
  %mul17 = mul nsw i32 %4, %m
  %add18 = add nsw i32 %mul17, %mul15
  %mul20 = mul nsw i32 %add18, %0
  %arrayidx21 = getelementptr inbounds i32, ptr %B, i64 2
  store i32 %mul20, ptr %arrayidx21, align 4
  %arrayidx24 = getelementptr inbounds i32, ptr %C, i64 3
  %5 = load i32, ptr %arrayidx24, align 4
  %mul25 = mul nsw i32 %5, %m
  %add26 = add nsw i32 %mul25, %mul15
  %mul28 = mul nsw i32 %add26, %1
  %arrayidx29 = getelementptr inbounds i32, ptr %B, i64 3
  store i32 %mul28, ptr %arrayidx29, align 4
  ret void
}