llvm/llvm/test/CodeGen/AMDGPU/tail-call-uniform-target-in-vgprs-issue110930.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s

target triple = "amdgcn-amd-amdhsa"

; The tail call target is known uniform, but will be in a VGPR, so we
; need readfirstlane to legalize it.
define void @tail_call_uniform_vgpr_value() {
; CHECK-LABEL: tail_call_uniform_vgpr_value:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT:    v_mov_b32_e32 v0, 0
; CHECK-NEXT:    ds_read_b64 v[0:1], v0
; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
; CHECK-NEXT:    v_readfirstlane_b32 s17, v1
; CHECK-NEXT:    v_readfirstlane_b32 s16, v0
; CHECK-NEXT:    s_setpc_b64 s[16:17]
  %fptr = load ptr, ptr addrspace(3) null, align 8
  tail call void %fptr()
  ret void
}

@constant = external hidden addrspace(4) constant ptr

; readfirstlanes should fold out.
define void @tail_call_uniform_sgpr_value() {
; CHECK-LABEL: tail_call_uniform_sgpr_value:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT:    s_getpc_b64 s[16:17]
; CHECK-NEXT:    s_add_u32 s16, s16, constant@rel32@lo+4
; CHECK-NEXT:    s_addc_u32 s17, s17, constant@rel32@hi+12
; CHECK-NEXT:    s_load_dwordx2 s[16:17], s[16:17], 0x0
; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
; CHECK-NEXT:    s_setpc_b64 s[16:17]
  %fptr = load ptr, ptr addrspace(4) @constant, align 8
  tail call void %fptr()
  ret void
}