llvm/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll

; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes='print<access-info>' -disable-output  < %s 2>&1 | FileCheck %s

; Check that loop-independent forward dependences are discovered properly.
;
; FIXME: This does not actually always work which is pretty confusing.  Right
; now there is hack in LAA that tries to figure out loop-independent forward
; dependeces *outside* of the MemoryDepChecker logic (i.e. proper dependence
; analysis).
;
; Therefore if there is only loop-independent dependences for an array
; (i.e. the same index is used), we don't discover the forward dependence.
; So, at ***, we add another non-I-based access of A to trigger
; MemoryDepChecker analysis for accesses of A.
;
;   for (unsigned i = 0; i < 100; i++) {
;     A[i + 1] = B[i] + 1;   // ***
;     A[i] = B[i] + 2;
;     C[i] = A[i] * 2;
;   }

target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"

define void @f(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
; CHECK-LABEL: 'f'
; CHECK-NEXT:    for.body:
; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
; CHECK-NEXT:  Forward loop carried data dependence that prevents store-to-load forwarding.
; CHECK-NEXT:      Dependences:
; CHECK-NEXT:        Forward:
; CHECK-NEXT:            store i32 %b_p1, ptr %Aidx, align 4 ->
; CHECK-NEXT:            %a = load i32, ptr %Aidx, align 4
; CHECK-EMPTY:
; CHECK-NEXT:        ForwardButPreventsForwarding:
; CHECK-NEXT:            store i32 %b_p2, ptr %Aidx_next, align 4 ->
; CHECK-NEXT:            %a = load i32, ptr %Aidx, align 4
; CHECK-EMPTY:
; CHECK-NEXT:        Forward:
; CHECK-NEXT:            store i32 %b_p2, ptr %Aidx_next, align 4 ->
; CHECK-NEXT:            store i32 %b_p1, ptr %Aidx, align 4
; CHECK-EMPTY:
; CHECK-NEXT:      Run-time memory checks:
; CHECK-NEXT:      Grouped accesses:
; CHECK-EMPTY:
; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
; CHECK-NEXT:      SCEV assumptions:
; CHECK-EMPTY:
; CHECK-NEXT:      Expressions re-written:
;

entry:
  br label %for.body

for.body:                                         ; preds = %for.body, %entry
  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1

  %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
  %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
  %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
  %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv

  %b = load i32, ptr %Bidx, align 4
  %b_p2 = add i32 %b, 1
  store i32 %b_p2, ptr %Aidx_next, align 4

  %b_p1 = add i32 %b, 2
  store i32 %b_p1, ptr %Aidx, align 4

  %a = load i32, ptr %Aidx, align 4
  %c = mul i32 %a, 2
  store i32 %c, ptr %Cidx, align 4

  %exitcond = icmp eq i64 %indvars.iv.next, %N
  br i1 %exitcond, label %for.end, label %for.body

for.end:                                          ; preds = %for.body
  ret void
}