llvm/llvm/test/Transforms/DeadStoreElimination/simple.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=dse -S | FileCheck %s
; RUN: opt < %s -aa-pipeline=basic-aa -passes=dse -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"

declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i32) nounwind
declare void @llvm.init.trampoline(ptr, ptr, ptr)
declare void @llvm.matrix.column.major.store(<6 x float>, ptr, i64, i1, i32, i32)

define void @test1(ptr %Q, ptr %P) {
; CHECK-LABEL: @test1(
; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
; CHECK-NEXT:    ret void
;
  %DEAD = load i32, ptr %Q
  store i32 %DEAD, ptr %P
  store i32 0, ptr %P
  ret void
}

; PR8677
@g = global i32 1

define i32 @test3(ptr %g_addr) nounwind {
; CHECK-LABEL: @test3(
; CHECK-NEXT:    [[G_VALUE:%.*]] = load i32, ptr [[G_ADDR:%.*]], align 4
; CHECK-NEXT:    store i32 -1, ptr @g, align 4
; CHECK-NEXT:    store i32 [[G_VALUE]], ptr [[G_ADDR]], align 4
; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr @g, align 4
; CHECK-NEXT:    ret i32 [[TMP3]]
;
  %g_value = load i32, ptr %g_addr, align 4
  store i32 -1, ptr @g, align 4
  store i32 %g_value, ptr %g_addr, align 4
  %tmp3 = load i32, ptr @g, align 4
  ret i32 %tmp3
}


define void @test4(ptr %Q) {
; CHECK-LABEL: @test4(
; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4
; CHECK-NEXT:    store volatile i32 [[A]], ptr [[Q]], align 4
; CHECK-NEXT:    ret void
;
  %a = load i32, ptr %Q
  store volatile i32 %a, ptr %Q
  ret void
}

; PR8576 - Should delete store of 10 even though p/q are may aliases.
define void @test2(ptr %p, ptr %q) {
; CHECK-LABEL: @test2(
; CHECK-NEXT:    store i32 20, ptr [[Q:%.*]], align 4
; CHECK-NEXT:    store i32 30, ptr [[P:%.*]], align 4
; CHECK-NEXT:    ret void
;
  store i32 10, ptr %p, align 4
  store i32 20, ptr %q, align 4
  store i32 30, ptr %p, align 4
  ret void
}

; Should delete store of 10 even though memset is a may-store to P (P and Q may
; alias).
define void @test6(ptr %p, ptr %q) {
; CHECK-LABEL: @test6(
; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[Q:%.*]], i8 42, i64 900, i1 false)
; CHECK-NEXT:    store i32 30, ptr [[P:%.*]], align 4
; CHECK-NEXT:    ret void
;
  store i32 10, ptr %p, align 4       ;; dead.
  call void @llvm.memset.p0.i64(ptr %q, i8 42, i64 900, i1 false)
  store i32 30, ptr %p, align 4
  ret void
}

; Should delete store of 10 even though memset is a may-store to P (P and Q may
; alias).
define void @test6_atomic(ptr align 4 %p, ptr align 4 %q) {
; CHECK-LABEL: @test6_atomic(
; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[Q:%.*]], i8 42, i64 900, i32 4)
; CHECK-NEXT:    store atomic i32 30, ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT:    ret void
;
  store atomic i32 10, ptr %p unordered, align 4       ;; dead.
  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %q, i8 42, i64 900, i32 4)
  store atomic i32 30, ptr %p unordered, align 4
  ret void
}

; Should delete store of 10 even though memcpy is a may-store to P (P and Q may
; alias).
define void @test7(ptr %p, ptr %q, ptr noalias %r) {
; CHECK-LABEL: @test7(
; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[Q:%.*]], ptr [[R:%.*]], i64 900, i1 false)
; CHECK-NEXT:    store i32 30, ptr [[P:%.*]], align 4
; CHECK-NEXT:    ret void
;
  store i32 10, ptr %p, align 4       ;; dead.
  call void @llvm.memcpy.p0.p0.i64(ptr %q, ptr %r, i64 900, i1 false)
  store i32 30, ptr %p, align 4
  ret void
}

; Should delete store of 10 even though memcpy is a may-store to P (P and Q may
; alias).
define void @test7_atomic(ptr align 4 %p, ptr align 4 %q, ptr noalias align 4 %r) {
; CHECK-LABEL: @test7_atomic(
; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 4 [[Q:%.*]], ptr align 4 [[R:%.*]], i64 900, i32 4)
; CHECK-NEXT:    store atomic i32 30, ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT:    ret void
;
  store atomic i32 10, ptr %p unordered, align 4       ;; dead.
  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 4 %q, ptr align 4 %r, i64 900, i32 4)
  store atomic i32 30, ptr %p unordered, align 4
  ret void
}

; Do not delete stores that are only partially killed.
define i32 @test8() {
; CHECK-LABEL: @test8(
; CHECK-NEXT:    [[V:%.*]] = alloca i32, align 4
; CHECK-NEXT:    store i32 1234567, ptr [[V]], align 4
; CHECK-NEXT:    [[X:%.*]] = load i32, ptr [[V]], align 4
; CHECK-NEXT:    ret i32 [[X]]
;
  %V = alloca i32
  store i32 1234567, ptr %V
  store i8 0, ptr %V
  %X = load i32, ptr %V
  ret i32 %X

}

; Test for byval handling.
%struct.x = type { i32, i32, i32, i32 }
define void @test9(ptr byval(%struct.x)  %a) nounwind  {
; CHECK-LABEL: @test9(
; CHECK-NEXT:    ret void
;
  store i32 1, ptr %a, align 4
  ret void
}

; Test for inalloca handling.
define void @test9_2(ptr inalloca(%struct.x) %a) nounwind {
; CHECK-LABEL: @test9_2(
; CHECK-NEXT:    ret void
;
  store i32 1, ptr %a, align 4
  ret void
}

; Test for preallocated handling.
define void @test9_3(ptr preallocated(%struct.x)  %a) nounwind  {
; CHECK-LABEL: @test9_3(
; CHECK-NEXT:    ret void
;
  store i32 1, ptr %a, align 4
  ret void
}

; va_arg has fuzzy dependence, the store shouldn't be zapped.
define double @test10(ptr %X) {
; CHECK-LABEL: @test10(
; CHECK-NEXT:    [[X_ADDR:%.*]] = alloca ptr, align 8
; CHECK-NEXT:    store ptr [[X:%.*]], ptr [[X_ADDR]], align 8
; CHECK-NEXT:    [[TMP_0:%.*]] = va_arg ptr [[X_ADDR]], double
; CHECK-NEXT:    ret double [[TMP_0]]
;
  %X_addr = alloca ptr
  store ptr %X, ptr %X_addr
  %tmp.0 = va_arg ptr %X_addr, double
  ret double %tmp.0
}

; DSE should delete the dead trampoline.
declare void @test11f()
define void @test11() {
; CHECK-LABEL: @test11(
; CHECK-NEXT:    ret void
;
  %storage = alloca [10 x i8], align 16		; <ptr> [#uses=1]
  %cast = getelementptr [10 x i8], ptr %storage, i32 0, i32 0		; <ptr> [#uses=1]
  call void @llvm.init.trampoline( ptr %cast, ptr @test11f, ptr null )		; <ptr> [#uses=1]
  ret void
}

; Specialized store intrinsics should be removed if dead.
define void @test_matrix_store(i64 %stride) {
; CHECK-LABEL: @test_matrix_store(
; CHECK-NEXT:    ret void
;
  %a = alloca [6 x float]
  call void @llvm.matrix.column.major.store(<6 x float> zeroinitializer, ptr %a, i64 %stride, i1 false, i32 3, i32 2)
  ret void
}

; %P doesn't escape, the DEAD instructions should be removed.
declare void @may_unwind()
define ptr @test_malloc_no_escape_before_return() {
; CHECK-LABEL: @test_malloc_no_escape_before_return(
; CHECK-NEXT:    [[PTR:%.*]] = tail call ptr @malloc(i64 4)
; CHECK-NEXT:    call void @may_unwind()
; CHECK-NEXT:    store i32 0, ptr [[PTR]], align 4
; CHECK-NEXT:    ret ptr [[PTR]]
;
  %ptr = tail call ptr @malloc(i64 4)
  %DEAD = load i32, ptr %ptr
  %DEAD2 = add i32 %DEAD, 1
  store i32 %DEAD2, ptr %ptr
  call void @may_unwind()
  store i32 0, ptr %ptr
  ret ptr %ptr
}

define ptr @test_custom_malloc_no_escape_before_return() {
; CHECK-LABEL: @test_custom_malloc_no_escape_before_return(
; CHECK-NEXT:    [[PTR:%.*]] = tail call ptr @custom_malloc(i32 4)
; CHECK-NEXT:    call void @may_unwind()
; CHECK-NEXT:    store i32 0, ptr [[PTR]], align 4
; CHECK-NEXT:    ret ptr [[PTR]]
;
  %ptr = tail call ptr @custom_malloc(i32 4)
  %DEAD = load i32, ptr %ptr
  %DEAD2 = add i32 %DEAD, 1
  store i32 %DEAD2, ptr %ptr
  call void @may_unwind()
  store i32 0, ptr %ptr
  ret ptr %ptr
}

define ptr addrspace(1) @test13_addrspacecast() {
; CHECK-LABEL: @test13_addrspacecast(
; CHECK-NEXT:    [[P:%.*]] = tail call ptr @malloc(i64 4)
; CHECK-NEXT:    [[P_AC:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(1)
; CHECK-NEXT:    call void @may_unwind()
; CHECK-NEXT:    store i32 0, ptr addrspace(1) [[P_AC]], align 4
; CHECK-NEXT:    ret ptr addrspace(1) [[P_AC]]
;
  %p = tail call ptr @malloc(i64 4)
  %p.ac = addrspacecast ptr %p to ptr addrspace(1)
  %DEAD = load i32, ptr addrspace(1) %p.ac
  %DEAD2 = add i32 %DEAD, 1
  store i32 %DEAD2, ptr addrspace(1) %p.ac
  call void @may_unwind()
  store i32 0, ptr addrspace(1) %p.ac
  ret ptr addrspace(1) %p.ac
}


declare noalias ptr @malloc(i64) willreturn allockind("alloc,uninitialized")
declare noalias ptr @custom_malloc(i32) willreturn
declare noalias ptr @calloc(i64, i64) willreturn allockind("alloc,zeroed")

define void @test14(ptr %Q) {
; CHECK-LABEL: @test14(
; CHECK-NEXT:    ret void
;
  %P = alloca i32
  %DEAD = load i32, ptr %Q
  store i32 %DEAD, ptr %P
  ret void

}

; The store here is not dead because the byval call reads it.
declare void @test19f(ptr byval({i32}) align 4 %P)

define void @test19(ptr nocapture byval({i32}) align 4 %arg5) nounwind ssp {
; CHECK-LABEL: @test19(
; CHECK-NEXT:  bb:
; CHECK-NEXT:    store i32 912, ptr [[ARG5:%.*]], align 4
; CHECK-NEXT:    call void @test19f(ptr byval({ i32 }) align 4 [[ARG5]])
; CHECK-NEXT:    ret void
;
bb:
  store i32 912, ptr %arg5
  call void @test19f(ptr byval({i32}) align 4 %arg5)
  ret void

}

define void @malloc_no_escape() {
; CHECK-LABEL: @malloc_no_escape(
; CHECK-NEXT:    ret void
;
  %m = call ptr @malloc(i64 24)
  store i8 0, ptr %m
  ret void
}

define void @custom_malloc_no_escape() {
; CHECK-LABEL: @custom_malloc_no_escape(
; CHECK-NEXT:    [[M:%.*]] = call ptr @custom_malloc(i32 24)
; CHECK-NEXT:    ret void
;
  %m = call ptr @custom_malloc(i32 24)
  store i8 0, ptr %m
  ret void
}

define void @test21() {
; CHECK-LABEL: @test21(
; CHECK-NEXT:    ret void
;
  %m = call ptr @calloc(i64 9, i64 7)
  store i8 0, ptr %m
  ret void
}

; Currently elimination of stores at the end of a function is limited to a
; single underlying object, for compile-time. This case appears to not be
; very important in practice.
define void @test22(i1 %i, i32 %k, i32 %m) nounwind {
; CHECK-LABEL: @test22(
; CHECK-NEXT:    [[K_ADDR:%.*]] = alloca i32, align 4
; CHECK-NEXT:    [[M_ADDR:%.*]] = alloca i32, align 4
; CHECK-NEXT:    [[K_ADDR_M_ADDR:%.*]] = select i1 [[I:%.*]], ptr [[K_ADDR]], ptr [[M_ADDR]]
; CHECK-NEXT:    store i32 0, ptr [[K_ADDR_M_ADDR]], align 4
; CHECK-NEXT:    ret void
;
  %k.addr = alloca i32
  %m.addr = alloca i32
  %k.addr.m.addr = select i1 %i, ptr %k.addr, ptr %m.addr
  store i32 0, ptr %k.addr.m.addr, align 4
  ret void
}

; PR13547
declare noalias ptr @strdup(ptr nocapture) nounwind
define noalias ptr @test23() nounwind uwtable ssp {
; CHECK-LABEL: @test23(
; CHECK-NEXT:    [[X:%.*]] = alloca [2 x i8], align 1
; CHECK-NEXT:    store i8 97, ptr [[X]], align 1
; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x i8], ptr [[X]], i64 0, i64 1
; CHECK-NEXT:    store i8 0, ptr [[ARRAYIDX1]], align 1
; CHECK-NEXT:    [[CALL:%.*]] = call ptr @strdup(ptr [[X]]) #[[ATTR5:[0-9]+]]
; CHECK-NEXT:    ret ptr [[CALL]]
;
  %x = alloca [2 x i8], align 1
  store i8 97, ptr %x, align 1
  %arrayidx1 = getelementptr inbounds [2 x i8], ptr %x, i64 0, i64 1
  store i8 0, ptr %arrayidx1, align 1
  %call = call ptr @strdup(ptr %x) nounwind
  ret ptr %call
}

; Make sure same sized store to later element is deleted
define void @test24(ptr %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: @test24(
; CHECK-NEXT:    store i32 [[B:%.*]], ptr [[A:%.*]], align 4
; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [2 x i32], ptr [[A]], i64 0, i64 1
; CHECK-NEXT:    store i32 [[C:%.*]], ptr [[TMP1]], align 4
; CHECK-NEXT:    ret void
;
  store i32 0, ptr %a, align 4
  %1 = getelementptr inbounds [2 x i32], ptr %a, i64 0, i64 1
  store i32 0, ptr %1, align 4
  store i32 %b, ptr %a, align 4
  %2 = getelementptr inbounds [2 x i32], ptr %a, i64 0, i64 1
  store i32 %c, ptr %2, align 4
  ret void
}

; Check another case like PR13547 where strdup is not like malloc.
define ptr @test25(ptr %p) nounwind {
; CHECK-LABEL: @test25(
; CHECK-NEXT:    [[P_4:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 4
; CHECK-NEXT:    [[TMP:%.*]] = load i8, ptr [[P_4]], align 1
; CHECK-NEXT:    store i8 0, ptr [[P_4]], align 1
; CHECK-NEXT:    [[Q:%.*]] = call ptr @strdup(ptr [[P]]) #[[ATTR13:[0-9]+]]
; CHECK-NEXT:    store i8 [[TMP]], ptr [[P_4]], align 1
; CHECK-NEXT:    ret ptr [[Q]]
;
  %p.4 = getelementptr i8, ptr %p, i64 4
  %tmp = load i8, ptr %p.4, align 1
  store i8 0, ptr %p.4, align 1
  %q = call ptr @strdup(ptr %p) nounwind optsize
  store i8 %tmp, ptr %p.4, align 1
  ret ptr %q
}

; Don't remove redundant store because of may-aliased store.
define i32 @test28(i1 %c, ptr %p, ptr %p2, i32 %i) {
; CHECK-LABEL: @test28(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT:    store i32 [[I:%.*]], ptr [[P2:%.*]], align 4
; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK:       bb1:
; CHECK-NEXT:    br label [[BB3:%.*]]
; CHECK:       bb2:
; CHECK-NEXT:    br label [[BB3]]
; CHECK:       bb3:
; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
; CHECK-NEXT:    ret i32 0
;
entry:
  %v = load i32, ptr %p, align 4

  ; Might overwrite value at %p
  store i32 %i, ptr %p2, align 4
  br i1 %c, label %bb1, label %bb2
bb1:
  br label %bb3
bb2:
  br label %bb3
bb3:
  store i32 %v, ptr %p, align 4
  ret i32 0
}

; Don't remove redundant store because of may-aliased store.
define i32 @test29(i1 %c, ptr %p, ptr %p2, i32 %i) {
; CHECK-LABEL: @test29(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK:       bb1:
; CHECK-NEXT:    br label [[BB3:%.*]]
; CHECK:       bb2:
; CHECK-NEXT:    store i32 [[I:%.*]], ptr [[P2:%.*]], align 4
; CHECK-NEXT:    br label [[BB3]]
; CHECK:       bb3:
; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
; CHECK-NEXT:    ret i32 0
;
entry:
  %v = load i32, ptr %p, align 4
  br i1 %c, label %bb1, label %bb2
bb1:
  br label %bb3
bb2:
  ; Might overwrite value at %p
  store i32 %i, ptr %p2, align 4
  br label %bb3
bb3:
  store i32 %v, ptr %p, align 4
  ret i32 0
}

declare void @unknown_func()

; Don't remove redundant store because of unknown call.
define i32 @test30(i1 %c, ptr %p, i32 %i) {
; CHECK-LABEL: @test30(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK:       bb1:
; CHECK-NEXT:    br label [[BB3:%.*]]
; CHECK:       bb2:
; CHECK-NEXT:    call void @unknown_func()
; CHECK-NEXT:    br label [[BB3]]
; CHECK:       bb3:
; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
; CHECK-NEXT:    ret i32 0
;
entry:
  %v = load i32, ptr %p, align 4
  br i1 %c, label %bb1, label %bb2
bb1:
  br label %bb3
bb2:
  ; Might overwrite value at %p
  call void @unknown_func()
  br label %bb3
bb3:
  store i32 %v, ptr %p, align 4
  ret i32 0
}

; Don't remove redundant store in a loop with a may-alias store.
define i32 @test32(i1 %c, ptr %p, i32 %i) {
; CHECK-LABEL: @test32(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT:    br label [[BB1:%.*]]
; CHECK:       bb1:
; CHECK-NEXT:    store i32 [[V]], ptr [[P]], align 4
; CHECK-NEXT:    call void @unknown_func()
; CHECK-NEXT:    br i1 undef, label [[BB1]], label [[BB2:%.*]]
; CHECK:       bb2:
; CHECK-NEXT:    ret i32 0
;
entry:
  %v = load i32, ptr %p, align 4
  br label %bb1
bb1:
  store i32 %v, ptr %p, align 4
  ; Might read and overwrite value at %p
  call void @unknown_func()
  br i1 undef, label %bb1, label %bb2
bb2:
  ret i32 0
}

; We cannot remove any stores, because @unknown_func may unwind and the caller
; may read %p while unwinding.
define void @test34(ptr noalias %p) {
; CHECK-LABEL: @test34(
; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
; CHECK-NEXT:    call void @unknown_func()
; CHECK-NEXT:    store i32 0, ptr [[P]], align 4
; CHECK-NEXT:    ret void
;
  store i32 1, ptr %p
  call void @unknown_func()
  store i32 0, ptr %p
  ret void
}

; Same as previous case, but with a dead_on_unwind argument.
define void @test34_dead_on_unwind(ptr noalias dead_on_unwind %p) {
; CHECK-LABEL: @test34_dead_on_unwind(
; CHECK-NEXT:    call void @unknown_func()
; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
; CHECK-NEXT:    ret void
;
  store i32 1, ptr %p
  call void @unknown_func()
  store i32 0, ptr %p
  ret void
}

; Remove redundant store even with an unwinding function in the same block
define void @test35(ptr noalias %p) {
; CHECK-LABEL: @test35(
; CHECK-NEXT:    call void @unknown_func()
; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
; CHECK-NEXT:    ret void
;
  call void @unknown_func()
  store i32 1, ptr %p
  store i32 0, ptr %p
  ret void
}

; We cannot optimize away the first memmove since %P could overlap with %Q.
define void @test36(ptr %P, ptr %Q) {
; CHECK-LABEL: @test36(
; CHECK-NEXT:    tail call void @llvm.memmove.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
; CHECK-NEXT:    tail call void @llvm.memmove.p0.p0.i64(ptr [[P]], ptr [[Q]], i64 12, i1 false)
; CHECK-NEXT:    ret void
;

  tail call void @llvm.memmove.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
  tail call void @llvm.memmove.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
  ret void
}

define void @test36_atomic(ptr %P, ptr %Q) {
; CHECK-LABEL: @test36_atomic(
; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[Q]], i64 12, i32 1)
; CHECK-NEXT:    ret void
;

  tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
  tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
  ret void
}

define void @test37(ptr %P, ptr %Q, ptr %R) {
; CHECK-LABEL: @test37(
; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
; CHECK-NEXT:    tail call void @llvm.memmove.p0.p0.i64(ptr [[P]], ptr [[R:%.*]], i64 12, i1 false)
; CHECK-NEXT:    ret void
;

  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
  tail call void @llvm.memmove.p0.p0.i64(ptr %P, ptr %R, i64 12, i1 false)
  ret void
}

define void @test37_atomic(ptr %P, ptr %Q, ptr %R) {
; CHECK-LABEL: @test37_atomic(
; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[R:%.*]], i64 12, i32 1)
; CHECK-NEXT:    ret void
;

  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
  tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %R, i64 12, i32 1)
  ret void
}

; See PR11763 - LLVM allows memcpy's source and destination to be equal (but not
; inequal and overlapping).
define void @test38(ptr %P, ptr %Q, ptr %R) {
; CHECK-LABEL: @test38(
; CHECK-NEXT:    tail call void @llvm.memmove.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[R:%.*]], i64 12, i1 false)
; CHECK-NEXT:    ret void
;

  tail call void @llvm.memmove.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %R, i64 12, i1 false)
  ret void
}

; See PR11763 - LLVM allows memcpy's source and destination to be equal (but not
; inequal and overlapping).
define void @test38_atomic(ptr %P, ptr %Q, ptr %R) {
; CHECK-LABEL: @test38_atomic(
; CHECK-NEXT:    tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[R:%.*]], i64 12, i32 1)
; CHECK-NEXT:    ret void
;

  tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %R, i64 12, i32 1)
  ret void
}

define void @test39(ptr %P, ptr %Q, ptr %R) {
; CHECK-LABEL: @test39(
; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P:%.*]], ptr [[Q:%.*]], i64 12, i1 false)
; CHECK-NEXT:    tail call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[R:%.*]], i64 8, i1 false)
; CHECK-NEXT:    ret void
;

  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %Q, i64 12, i1 false)
  tail call void @llvm.memcpy.p0.p0.i64(ptr %P, ptr %R, i64 8, i1 false)
  ret void
}

define void @test39_atomic(ptr %P, ptr %Q, ptr %R) {
; CHECK-LABEL: @test39_atomic(
; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 12, i32 1)
; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[P]], ptr align 1 [[R:%.*]], i64 8, i32 1)
; CHECK-NEXT:    ret void
;

  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %Q, i64 12, i32 1)
  tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %P, ptr align 1 %R, i64 8, i32 1)
  ret void
}

declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i32)

declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
define void @test40(ptr noalias %Pp, ptr noalias %Q)  {
; CHECK-LABEL: @test40(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[A]])
; CHECK-NEXT:    [[PC:%.*]] = load ptr, ptr [[PP:%.*]], align 8
; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 [[A]], ptr align 4 [[Q:%.*]], i64 4, i1 false)
; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[PC]], ptr nonnull align 4 [[A]], i64 4, i1 true)
; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[A]])
; CHECK-NEXT:    ret void
;
entry:
  %A = alloca i32, align 4
  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %A)
  %Pc = load ptr, ptr %Pp, align 8
  call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 %A, ptr align 4 %Q, i64 4, i1 false)
  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %Pc, ptr nonnull align 4 %A, i64 4, i1 true)
  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %A)
  ret void
}

declare void @free(ptr nocapture) allockind("free")

; We cannot remove `store i32 1, ptr %p`, because @unknown_func may unwind
; and the caller may read %p while unwinding.
define void @test41(ptr noalias %P) {
; CHECK-LABEL: @test41(
; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
; CHECK-NEXT:    call void @unknown_func()
; CHECK-NEXT:    call void @free(ptr [[P]])
; CHECK-NEXT:    ret void
;
  store i32 1, ptr %P
  call void @unknown_func()
  store i32 2, ptr %P
  call void @free(ptr %P)
  ret void
}

define void @test42(ptr %P, ptr %Q) {
; CHECK-LABEL: @test42(
; CHECK-NEXT:    store i32 1, ptr [[P:%.*]], align 4
; CHECK-NEXT:    store i32 2, ptr [[Q:%.*]], align 4
; CHECK-NEXT:    store i8 3, ptr [[P]], align 1
; CHECK-NEXT:    ret void
;
  store i32 1, ptr %P
  store i32 2, ptr %Q
  store i8 3, ptr %P
  ret void
}

define void @test42a(ptr %P, ptr %Q) {
; CHECK-LABEL: @test42a(
; CHECK-NEXT:    store atomic i32 1, ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT:    store atomic i32 2, ptr [[Q:%.*]] unordered, align 4
; CHECK-NEXT:    store atomic i8 3, ptr [[P]] unordered, align 4
; CHECK-NEXT:    ret void
;
  store atomic i32 1, ptr %P unordered, align 4
  store atomic i32 2, ptr %Q unordered, align 4
  store atomic i8 3, ptr %P unordered, align 4
  ret void
}

define void @test43a(ptr %P, ptr noalias %Q) {
; CHECK-LABEL: @test43a(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    store atomic i32 50331649, ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT:    store atomic i32 2, ptr [[Q:%.*]] unordered, align 4
; CHECK-NEXT:    ret void
;
entry:
  store atomic i32 1, ptr %P unordered, align 4
  store atomic i32 2, ptr %Q unordered, align 4
  store atomic i8 3, ptr %P unordered, align 4
  ret void
}

; Some tests where volatile may block removing a store.

; Here we can remove the first non-volatile store. We cannot remove the
; volatile store.
define void @test44_volatile(ptr %P) {
; CHECK-LABEL: @test44_volatile(
; CHECK-NEXT:    store volatile i32 2, ptr [[P:%.*]], align 4
; CHECK-NEXT:    store i32 3, ptr [[P]], align 4
; CHECK-NEXT:    ret void
;
  store i32 1, ptr %P, align 4
  store volatile i32 2, ptr %P, align 4
  store i32 3, ptr %P, align 4
  ret void
}

define void @test45_volatile(ptr %P) {
; CHECK-LABEL: @test45_volatile(
; CHECK-NEXT:    store volatile i32 2, ptr [[P:%.*]], align 4
; CHECK-NEXT:    store volatile i32 3, ptr [[P]], align 4
; CHECK-NEXT:    ret void
;
  store i32 1, ptr %P, align 4
  store volatile i32 2, ptr %P, align 4
  store volatile i32 3, ptr %P, align 4
  ret void
}

define void @test46_volatile(ptr %P) {
; CHECK-LABEL: @test46_volatile(
; CHECK-NEXT:    store volatile i32 2, ptr [[P:%.*]], align 4
; CHECK-NEXT:    store volatile i32 3, ptr [[P]], align 4
; CHECK-NEXT:    ret void
;
  store volatile i32 2, ptr %P, align 4
  store i32 1, ptr %P, align 4
  store volatile i32 3, ptr %P, align 4
  ret void
}

define void @test47_volatile(ptr %P) {
; CHECK-LABEL: @test47_volatile(
; CHECK-NEXT:    store volatile i32 2, ptr [[P:%.*]], align 4
; CHECK-NEXT:    store volatile i32 3, ptr [[P]], align 4
; CHECK-NEXT:    ret void
;
  store volatile i32 2, ptr %P, align 4
  store volatile i32 3, ptr %P, align 4
  ret void
}

define i32 @test48(ptr %P, ptr noalias %Q, ptr %R) {
; CHECK-LABEL: @test48(
; CHECK-NEXT:    store i32 2, ptr [[P:%.*]], align 4
; CHECK-NEXT:    store i32 3, ptr [[Q:%.*]], align 4
; CHECK-NEXT:    [[L:%.*]] = load i32, ptr [[R:%.*]], align 4
; CHECK-NEXT:    ret i32 [[L]]
;
  store i32 1, ptr %Q
  store i32 2, ptr %P
  store i32 3, ptr %Q
  %l = load i32, ptr %R
  ret i32 %l
}

define void @test49() {
; CHECK-LABEL: @test49(
; CHECK-NEXT:  bb:
; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr readonly null, i8 0, i64 0, i1 false)
; CHECK-NEXT:    store ptr null, ptr null, align 8
; CHECK-NEXT:    ret void
;
bb:
  call void @llvm.memset.p0.i64(ptr readonly null, i8 0, i64 0, i1 false)
  store ptr null, ptr null, align 8
  ret void
}