llvm/llvm/test/Transforms/PreISelIntrinsicLowering/X86/memcpy-inline-non-constant-len.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -mtriple=x86_64-pc-linux-gnu -passes=pre-isel-intrinsic-lowering -S -o - %s | FileCheck %s

; Constant length memcpy.inline should be left unmodified.
define void @memcpy_32(ptr %dst, ptr %src) nounwind {
; CHECK-LABEL: define void @memcpy_32(
; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT:    call void @llvm.memcpy.inline.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 32, i1 false)
; CHECK-NEXT:    tail call void @llvm.memcpy.inline.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 32, i1 true)
; CHECK-NEXT:    ret void
;
  call void @llvm.memcpy.inline.p0.p0.i64(ptr %dst, ptr %src, i64 32, i1 0)
  tail call void @llvm.memcpy.inline.p0.p0.i64(ptr %dst, ptr %src, i64 32, i1 1)
  ret void
}

define void @memcpy_x(ptr %dst, ptr %src, i64 %x) nounwind {
; CHECK-LABEL: define void @memcpy_x(
; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i64 [[X]], 0
; CHECK-NEXT:    br i1 [[TMP1]], label %[[LOOP_MEMCPY_EXPANSION:.*]], label %[[POST_LOOP_MEMCPY_EXPANSION:.*]]
; CHECK:       [[LOOP_MEMCPY_EXPANSION]]:
; CHECK-NEXT:    [[LOOP_INDEX:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP5:%.*]], %[[LOOP_MEMCPY_EXPANSION]] ]
; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[LOOP_INDEX]]
; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1
; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[LOOP_INDEX]]
; CHECK-NEXT:    store i8 [[TMP3]], ptr [[TMP4]], align 1
; CHECK-NEXT:    [[TMP5]] = add i64 [[LOOP_INDEX]], 1
; CHECK-NEXT:    [[TMP6:%.*]] = icmp ult i64 [[TMP5]], [[X]]
; CHECK-NEXT:    br i1 [[TMP6]], label %[[LOOP_MEMCPY_EXPANSION]], label %[[POST_LOOP_MEMCPY_EXPANSION]]
; CHECK:       [[POST_LOOP_MEMCPY_EXPANSION]]:
; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i64 [[X]], 0
; CHECK-NEXT:    br i1 [[TMP7]], label %[[LOOP_MEMCPY_EXPANSION2:.*]], label %[[POST_LOOP_MEMCPY_EXPANSION1:.*]]
; CHECK:       [[LOOP_MEMCPY_EXPANSION2]]:
; CHECK-NEXT:    [[LOOP_INDEX3:%.*]] = phi i64 [ 0, %[[POST_LOOP_MEMCPY_EXPANSION]] ], [ [[TMP11:%.*]], %[[LOOP_MEMCPY_EXPANSION2]] ]
; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[LOOP_INDEX3]]
; CHECK-NEXT:    [[TMP9:%.*]] = load volatile i8, ptr [[TMP8]], align 1
; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[LOOP_INDEX3]]
; CHECK-NEXT:    store volatile i8 [[TMP9]], ptr [[TMP10]], align 1
; CHECK-NEXT:    [[TMP11]] = add i64 [[LOOP_INDEX3]], 1
; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i64 [[TMP11]], [[X]]
; CHECK-NEXT:    br i1 [[TMP12]], label %[[LOOP_MEMCPY_EXPANSION2]], label %[[POST_LOOP_MEMCPY_EXPANSION1]]
; CHECK:       [[POST_LOOP_MEMCPY_EXPANSION1]]:
; CHECK-NEXT:    ret void
;
  call void @llvm.memcpy.inline.p0.p0.i64(ptr %dst, ptr %src, i64 %x, i1 0)
  tail call void @llvm.memcpy.inline.p0.p0.i64(ptr %dst, ptr %src, i64 %x, i1 1)
  ret void
}