llvm/llvm/test/CodeGen/AMDGPU/copy_to_scc.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s

define amdgpu_kernel void @copy_to_scc(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(4) %addrSrc) {
; GCN-LABEL: copy_to_scc:
; GCN:       ; %bb.0: ; %entry
; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x34
; GCN-NEXT:    v_mov_b32_e32 v1, 0
; GCN-NEXT:    s_waitcnt lgkmcnt(0)
; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
; GCN-NEXT:    s_waitcnt lgkmcnt(0)
; GCN-NEXT:    buffer_load_dword v0, off, s[4:7], 0 offset:252
; GCN-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x24
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
; GCN-NEXT:    s_waitcnt lgkmcnt(0)
; GCN-NEXT:    s_load_dword s0, s[6:7], 0x0
; GCN-NEXT:    s_waitcnt lgkmcnt(0)
; GCN-NEXT:    s_cmp_lg_u32 s0, 0
; GCN-NEXT:    s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GCN-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GCN-NEXT:    s_cselect_b32 s0, 2, 3
; GCN-NEXT:    v_mov_b32_e32 v0, s0
; GCN-NEXT:    global_store_dword v1, v0, s[4:5]
; GCN-NEXT:    s_endpgm
entry:                                             ; preds = %1009
  %0 = load i32, ptr addrspace(1) %in, align 4
  %1 = load ptr addrspace(8), ptr addrspace(4) %addrSrc, align 16
  %2 = icmp ne i32 %0, 0
  %3 = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) %1, i32 252, i32 0, i32 0)
  %4 = icmp ne i32 %3, 0
  %5 = xor i1 %2, %4
  %result = select i1 %5, i32 2, i32 3
  store i32 %result, ptr addrspace(1) %out
  ret void
}

declare i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8), i32, i32, i32 immarg)