llvm/llvm/test/CodeGen/AArch64/replace-load-with-shrink-store-indexed-crash.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "arm64-apple-macosx13.0.0"

define void @_Z1hP1f(ptr %j) {
; CHECK-LABEL: _Z1hP1f:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    mov x8, x0
; CHECK-NEXT:  .LBB0_1: // %for.body
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldr w9, [x8]
; CHECK-NEXT:    mov w10, w9
; CHECK-NEXT:    bfi w10, w9, #16, #16
; CHECK-NEXT:    str w10, [x8], #4
; CHECK-NEXT:    str w9, [x0]
; CHECK-NEXT:    b .LBB0_1
entry:
  br label %for.body

for.body:                                         ; preds = %for.body, %entry
  %indvars.iv1 = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
  %arrayidx3 = getelementptr i32, ptr %j, i64 %indvars.iv1
  %0 = load i32, ptr %arrayidx3, align 4
  %and = and i32 %0, 65535
  %or = mul i32 %and, 65537
  store i32 %or, ptr %arrayidx3, align 4
  store i32 %0, ptr %j, align 4
  %indvars.iv.next = add i64 %indvars.iv1, 1
  br label %for.body
}