llvm/llvm/test/CodeGen/AMDGPU/memmove-scalar-load.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5

; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 %s -o - | FileCheck %s

; Testing codegen for memmove with scalar reads.


define void @memmove_p1_p4_sz16_align_4_4(ptr addrspace(1) align 4 %dst, ptr addrspace(4) align 4 readonly inreg %src) {
; CHECK-LABEL: memmove_p1_p4_sz16_align_4_4:
; CHECK:       ; %bb.0: ; %entry
; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT:    s_load_dwordx4 s[4:7], s[6:7], 0x0
; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
; CHECK-NEXT:    v_mov_b32_e32 v2, s4
; CHECK-NEXT:    v_mov_b32_e32 v3, s5
; CHECK-NEXT:    v_mov_b32_e32 v4, s6
; CHECK-NEXT:    v_mov_b32_e32 v5, s7
; CHECK-NEXT:    global_store_dwordx4 v[0:1], v[2:5], off
; CHECK-NEXT:    s_setpc_b64 s[30:31]
entry:
  tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 4 %dst, ptr addrspace(4) noundef nonnull align 4 %src, i64 16, i1 false)
  ret void
}

define void @memmove_p1_p4_sz31_align_4_4(ptr addrspace(1) align 4 %dst, ptr addrspace(4) align 4 readonly inreg %src) {
; CHECK-LABEL: memmove_p1_p4_sz31_align_4_4:
; CHECK:       ; %bb.0: ; %entry
; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT:    v_mov_b32_e32 v2, 0
; CHECK-NEXT:    global_load_ubyte v9, v2, s[6:7] offset:30
; CHECK-NEXT:    s_load_dwordx8 s[4:11], s[6:7], 0x0
; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
; CHECK-NEXT:    v_mov_b32_e32 v2, s4
; CHECK-NEXT:    v_mov_b32_e32 v3, s5
; CHECK-NEXT:    v_mov_b32_e32 v4, s6
; CHECK-NEXT:    v_mov_b32_e32 v5, s7
; CHECK-NEXT:    v_mov_b32_e32 v10, s11
; CHECK-NEXT:    v_mov_b32_e32 v6, s8
; CHECK-NEXT:    v_mov_b32_e32 v7, s9
; CHECK-NEXT:    v_mov_b32_e32 v8, s10
; CHECK-NEXT:    global_store_dwordx4 v[0:1], v[2:5], off
; CHECK-NEXT:    global_store_short v[0:1], v10, off offset:28
; CHECK-NEXT:    s_waitcnt vmcnt(0)
; CHECK-NEXT:    global_store_byte v[0:1], v9, off offset:30
; CHECK-NEXT:    global_store_dwordx3 v[0:1], v[6:8], off offset:16
; CHECK-NEXT:    s_setpc_b64 s[30:31]
entry:
  tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 4 %dst, ptr addrspace(4) noundef nonnull align 4 %src, i64 31, i1 false)
  ret void
}

define void @memmove_p1_p4_sz32_align_4_4(ptr addrspace(1) align 4 %dst, ptr addrspace(4) align 4 readonly inreg %src) {
; CHECK-LABEL: memmove_p1_p4_sz32_align_4_4:
; CHECK:       ; %bb.0: ; %entry
; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT:    s_load_dwordx8 s[4:11], s[6:7], 0x0
; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
; CHECK-NEXT:    v_mov_b32_e32 v2, s8
; CHECK-NEXT:    v_mov_b32_e32 v3, s9
; CHECK-NEXT:    v_mov_b32_e32 v4, s10
; CHECK-NEXT:    v_mov_b32_e32 v5, s11
; CHECK-NEXT:    v_mov_b32_e32 v9, s7
; CHECK-NEXT:    v_mov_b32_e32 v8, s6
; CHECK-NEXT:    v_mov_b32_e32 v7, s5
; CHECK-NEXT:    v_mov_b32_e32 v6, s4
; CHECK-NEXT:    global_store_dwordx4 v[0:1], v[2:5], off offset:16
; CHECK-NEXT:    global_store_dwordx4 v[0:1], v[6:9], off
; CHECK-NEXT:    s_setpc_b64 s[30:31]
entry:
  tail call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) noundef nonnull align 4 %dst, ptr addrspace(4) noundef nonnull align 4 %src, i64 32, i1 false)
  ret void
}

declare void @llvm.memmove.p1.p4.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(4) nocapture readonly, i64, i1 immarg) #2

attributes #0 = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }