; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
;; Test how memcpy is optimized when ual is turned off which is similar to AArch64/arm64-misaligned-memcpy-inline.ll.
; RUN: llc --mtriple=loongarch32 --mattr=-ual < %s | FileCheck %s --check-prefix=LA32
; RUN: llc --mtriple=loongarch64 --mattr=-ual < %s | FileCheck %s --check-prefix=LA64
;; Small (16 bytes here) unaligned memcpy() should be a function call if
;; ual is turned off.
define void @t0(ptr %out, ptr %in) {
; LA32-LABEL: t0:
; LA32: # %bb.0: # %entry
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: .cfi_def_cfa_offset 16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: ori $a2, $zero, 16
; LA32-NEXT: bl %plt(memcpy)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: t0:
; LA64: # %bb.0: # %entry
; LA64-NEXT: addi.d $sp, $sp, -16
; LA64-NEXT: .cfi_def_cfa_offset 16
; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
; LA64-NEXT: .cfi_offset 1, -8
; LA64-NEXT: ori $a2, $zero, 16
; LA64-NEXT: bl %plt(memcpy)
; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 16
; LA64-NEXT: ret
entry:
call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 16, i1 false)
ret void
}
;; Small (16 bytes here) aligned memcpy() should be inlined even if
;; ual is turned off.
define void @t1(ptr align 8 %out, ptr align 8 %in) {
; LA32-LABEL: t1:
; LA32: # %bb.0: # %entry
; LA32-NEXT: ld.w $a2, $a1, 12
; LA32-NEXT: st.w $a2, $a0, 12
; LA32-NEXT: ld.w $a2, $a1, 8
; LA32-NEXT: st.w $a2, $a0, 8
; LA32-NEXT: ld.w $a2, $a1, 4
; LA32-NEXT: st.w $a2, $a0, 4
; LA32-NEXT: ld.w $a1, $a1, 0
; LA32-NEXT: st.w $a1, $a0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: t1:
; LA64: # %bb.0: # %entry
; LA64-NEXT: ld.d $a2, $a1, 8
; LA64-NEXT: st.d $a2, $a0, 8
; LA64-NEXT: ld.d $a1, $a1, 0
; LA64-NEXT: st.d $a1, $a0, 0
; LA64-NEXT: ret
entry:
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %out, ptr align 8 %in, i64 16, i1 false)
ret void
}
;; Tiny (4 bytes here) unaligned memcpy() should be inlined with byte sized
;; loads and stores if ual is turned off.
define void @t2(ptr %out, ptr %in) {
; LA32-LABEL: t2:
; LA32: # %bb.0: # %entry
; LA32-NEXT: ld.b $a2, $a1, 3
; LA32-NEXT: st.b $a2, $a0, 3
; LA32-NEXT: ld.b $a2, $a1, 2
; LA32-NEXT: st.b $a2, $a0, 2
; LA32-NEXT: ld.b $a2, $a1, 1
; LA32-NEXT: st.b $a2, $a0, 1
; LA32-NEXT: ld.b $a1, $a1, 0
; LA32-NEXT: st.b $a1, $a0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: t2:
; LA64: # %bb.0: # %entry
; LA64-NEXT: ld.b $a2, $a1, 3
; LA64-NEXT: st.b $a2, $a0, 3
; LA64-NEXT: ld.b $a2, $a1, 2
; LA64-NEXT: st.b $a2, $a0, 2
; LA64-NEXT: ld.b $a2, $a1, 1
; LA64-NEXT: st.b $a2, $a0, 1
; LA64-NEXT: ld.b $a1, $a1, 0
; LA64-NEXT: st.b $a1, $a0, 0
; LA64-NEXT: ret
entry:
call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 4, i1 false)
ret void
}
@.str = private constant [22 x i8] c"preemption imbalance \00", align 1
define void @t3() {
; LA32-LABEL: t3:
; LA32: # %bb.0: # %entry
; LA32-NEXT: addi.w $sp, $sp, -64
; LA32-NEXT: .cfi_def_cfa_offset 64
; LA32-NEXT: pcalau12i $a0, %pc_hi20(.L.str)
; LA32-NEXT: addi.w $a0, $a0, %pc_lo12(.L.str)
; LA32-NEXT: ld.h $a1, $a0, 20
; LA32-NEXT: ld.w $a2, $a0, 16
; LA32-NEXT: st.h $a1, $sp, 20
; LA32-NEXT: st.w $a2, $sp, 16
; LA32-NEXT: ld.w $a1, $a0, 12
; LA32-NEXT: ld.w $a2, $a0, 8
; LA32-NEXT: ld.w $a3, $a0, 4
; LA32-NEXT: ld.w $a0, $a0, 0
; LA32-NEXT: st.w $a1, $sp, 12
; LA32-NEXT: st.w $a2, $sp, 8
; LA32-NEXT: st.w $a3, $sp, 4
; LA32-NEXT: st.w $a0, $sp, 0
; LA32-NEXT: addi.w $sp, $sp, 64
; LA32-NEXT: ret
;
; LA64-LABEL: t3:
; LA64: # %bb.0: # %entry
; LA64-NEXT: addi.d $sp, $sp, -64
; LA64-NEXT: .cfi_def_cfa_offset 64
; LA64-NEXT: pcalau12i $a0, %pc_hi20(.L.str)
; LA64-NEXT: addi.d $a0, $a0, %pc_lo12(.L.str)
; LA64-NEXT: ld.h $a1, $a0, 20
; LA64-NEXT: ld.w $a2, $a0, 16
; LA64-NEXT: ld.d $a3, $a0, 8
; LA64-NEXT: ld.d $a0, $a0, 0
; LA64-NEXT: st.h $a1, $sp, 20
; LA64-NEXT: st.w $a2, $sp, 16
; LA64-NEXT: st.d $a3, $sp, 8
; LA64-NEXT: st.d $a0, $sp, 0
; LA64-NEXT: addi.d $sp, $sp, 64
; LA64-NEXT: ret
entry:
%msgbuf = alloca [64 x i8], align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %msgbuf, ptr align 1 @.str, i64 22, i1 false)
ret void
}
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)