llvm/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.gfx908.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck --check-prefix=GCN %s

; Test using saddr addressing mode of global_* flat atomic instructions.

; --------------------------------------------------------------------------------
; amdgcn global atomic fadd
; --------------------------------------------------------------------------------

define amdgpu_ps void @global_fadd_saddr_f32_nortn(ptr addrspace(1) inreg %sbase, i32 %voffset, float %data) {
; GCN-LABEL: global_fadd_saddr_f32_nortn:
; GCN:       ; %bb.0:
; GCN-NEXT:    global_atomic_add_f32 v0, v1, s[2:3]
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    buffer_wbinvl1
; GCN-NEXT:    s_endpgm
  %zext.offset = zext i32 %voffset to i64
  %gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
  %ret = atomicrmw fadd ptr addrspace(1) %gep0, float %data syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0
  ret void
}

define amdgpu_ps void @global_fadd_saddr_f32_nortn_neg128(ptr addrspace(1) inreg %sbase, i32 %voffset, float %data) {
; GCN-LABEL: global_fadd_saddr_f32_nortn_neg128:
; GCN:       ; %bb.0:
; GCN-NEXT:    global_atomic_add_f32 v0, v1, s[2:3] offset:-128
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    buffer_wbinvl1
; GCN-NEXT:    s_endpgm
  %zext.offset = zext i32 %voffset to i64
  %gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
  %gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
  %ret = atomicrmw fadd ptr addrspace(1) %gep1, float %data syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0
  ret void
}

define amdgpu_ps void @global_fadd_saddr_v2f16_nortn(ptr addrspace(1) inreg %sbase, i32 %voffset, <2 x half> %data) {
; GCN-LABEL: global_fadd_saddr_v2f16_nortn:
; GCN:       ; %bb.0:
; GCN-NEXT:    global_atomic_pk_add_f16 v0, v1, s[2:3]
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    buffer_wbinvl1
; GCN-NEXT:    s_endpgm
  %zext.offset = zext i32 %voffset to i64
  %gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
  %ret = atomicrmw fadd ptr addrspace(1) %gep0, <2 x half> %data syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
  ret void
}

define amdgpu_ps void @global_fadd_saddr_v2f16_nortn_neg128(ptr addrspace(1) inreg %sbase, i32 %voffset, <2 x half> %data) {
; GCN-LABEL: global_fadd_saddr_v2f16_nortn_neg128:
; GCN:       ; %bb.0:
; GCN-NEXT:    global_atomic_pk_add_f16 v0, v1, s[2:3] offset:-128
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    buffer_wbinvl1
; GCN-NEXT:    s_endpgm
  %zext.offset = zext i32 %voffset to i64
  %gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
  %gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
  %ret = atomicrmw fadd ptr addrspace(1) %gep1, <2 x half> %data syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
  ret void
}

!0 = !{}