llvm/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -global-isel -verify-machineinstrs < %s | FileCheck %s

declare i64 @llvm.amdgcn.ballot.i64(i1)
declare i64 @llvm.ctpop.i64(i64)

; Test ballot(0)

define amdgpu_cs i64 @constant_false() {
; CHECK-LABEL: constant_false:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_mov_b32 s0, 0
; CHECK-NEXT:    s_mov_b32 s1, 0
; CHECK-NEXT:    ; return to shader part epilog
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 0)
  ret i64 %ballot
}

; Test ballot(1)

define amdgpu_cs i64 @constant_true() {
; CHECK-LABEL: constant_true:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_mov_b32 s0, exec_lo
; CHECK-NEXT:    s_mov_b32 s1, exec_hi
; CHECK-NEXT:    ; return to shader part epilog
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 1)
  ret i64 %ballot
}

; Test ballot of a non-comparison operation

define amdgpu_cs i64 @non_compare(i32 %x) {
; CHECK-LABEL: non_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_and_b32_e32 v0, 1, v0
; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
; CHECK-NEXT:    s_and_b64 s[0:1], vcc, exec
; CHECK-NEXT:    ; return to shader part epilog
  %trunc = trunc i32 %x to i1
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %trunc)
  ret i64 %ballot
}

; Test ballot of comparisons

define amdgpu_cs i64 @compare_ints(i32 %x, i32 %y) {
; CHECK-LABEL: compare_ints:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_cmp_eq_u32_e64 s[0:1], v0, v1
; CHECK-NEXT:    ; return to shader part epilog
  %cmp = icmp eq i32 %x, %y
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp)
  ret i64 %ballot
}

define amdgpu_cs i64 @compare_int_with_constant(i32 %x) {
; CHECK-LABEL: compare_int_with_constant:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_mov_b32_e32 v1, 0x63
; CHECK-NEXT:    v_cmp_ge_i32_e64 s[0:1], v0, v1
; CHECK-NEXT:    ; return to shader part epilog
  %cmp = icmp sge i32 %x, 99
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp)
  ret i64 %ballot
}

define amdgpu_cs i64 @compare_floats(float %x, float %y) {
; CHECK-LABEL: compare_floats:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_cmp_gt_f32_e64 s[0:1], v0, v1
; CHECK-NEXT:    ; return to shader part epilog
  %cmp = fcmp ogt float %x, %y
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp)
  ret i64 %ballot
}

define amdgpu_cs i64 @ctpop_of_ballot(float %x, float %y) {
; CHECK-LABEL: ctpop_of_ballot:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_cmp_gt_f32_e32 vcc, v0, v1
; CHECK-NEXT:    s_bcnt1_i32_b64 s0, vcc
; CHECK-NEXT:    s_mov_b32 s1, 0
; CHECK-NEXT:    ; return to shader part epilog
  %cmp = fcmp ogt float %x, %y
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp)
  %bcnt = call i64 @llvm.ctpop.i64(i64 %ballot)
  ret i64 %bcnt
}

define amdgpu_cs i32 @branch_divergent_ballot_ne_zero_non_compare(i32 %v) {
; CHECK-LABEL: branch_divergent_ballot_ne_zero_non_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_and_b32_e32 v0, 1, v0
; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
; CHECK-NEXT:    s_and_b64 s[0:1], vcc, exec
; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc1 .LBB7_2
; CHECK-NEXT:  ; %bb.1: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB7_3
; CHECK-NEXT:  .LBB7_2: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB7_3
; CHECK-NEXT:  .LBB7_3:
  %c = trunc i32 %v to i1
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_ne_zero = icmp ne i64 %ballot, 0
  br i1 %ballot_ne_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_and_b32 s0, 1, s0
; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0
; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc1 .LBB8_2
; CHECK-NEXT:  ; %bb.1: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB8_3
; CHECK-NEXT:  .LBB8_2: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB8_3
; CHECK-NEXT:  .LBB8_3:
  %c = trunc i32 %v to i1
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_ne_zero = icmp ne i64 %ballot, 0
  br i1 %ballot_ne_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_non_compare(i32 %v) {
; CHECK-LABEL: branch_divergent_ballot_eq_zero_non_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_and_b32_e32 v0, 1, v0
; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
; CHECK-NEXT:    s_and_b64 s[0:1], vcc, exec
; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc0 .LBB9_2
; CHECK-NEXT:  ; %bb.1: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB9_3
; CHECK-NEXT:  .LBB9_2: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB9_3
; CHECK-NEXT:  .LBB9_3:
  %c = trunc i32 %v to i1
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_eq_zero = icmp eq i64 %ballot, 0
  br i1 %ballot_eq_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_and_b32 s0, 1, s0
; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0
; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc0 .LBB10_2
; CHECK-NEXT:  ; %bb.1: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB10_3
; CHECK-NEXT:  .LBB10_2: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB10_3
; CHECK-NEXT:  .LBB10_3:
  %c = trunc i32 %v to i1
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_eq_zero = icmp eq i64 %ballot, 0
  br i1 %ballot_eq_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_divergent_ballot_ne_zero_compare(i32 %v) {
; CHECK-LABEL: branch_divergent_ballot_ne_zero_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc, 12, v0
; CHECK-NEXT:    s_cmp_eq_u64 vcc, 0
; CHECK-NEXT:    s_cbranch_scc1 .LBB11_2
; CHECK-NEXT:  ; %bb.1: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB11_3
; CHECK-NEXT:  .LBB11_2: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB11_3
; CHECK-NEXT:  .LBB11_3:
  %c = icmp ult i32 %v, 12
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_ne_zero = icmp ne i64 %ballot, 0
  br i1 %ballot_ne_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_cmp_lt_u32 s0, 12
; CHECK-NEXT:    s_cselect_b32 s0, 1, 0
; CHECK-NEXT:    s_and_b32 s0, 1, s0
; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0
; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc1 .LBB12_2
; CHECK-NEXT:  ; %bb.1: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB12_3
; CHECK-NEXT:  .LBB12_2: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB12_3
; CHECK-NEXT:  .LBB12_3:
  %c = icmp ult i32 %v, 12
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_ne_zero = icmp ne i64 %ballot, 0
  br i1 %ballot_ne_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_compare(i32 %v) {
; CHECK-LABEL: branch_divergent_ballot_eq_zero_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc, 12, v0
; CHECK-NEXT:    s_cmp_lg_u64 vcc, 0
; CHECK-NEXT:    s_cbranch_scc0 .LBB13_2
; CHECK-NEXT:  ; %bb.1: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB13_3
; CHECK-NEXT:  .LBB13_2: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB13_3
; CHECK-NEXT:  .LBB13_3:
  %c = icmp ult i32 %v, 12
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_eq_zero = icmp eq i64 %ballot, 0
  br i1 %ballot_eq_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_cmp_lt_u32 s0, 12
; CHECK-NEXT:    s_cselect_b32 s0, 1, 0
; CHECK-NEXT:    s_and_b32 s0, 1, s0
; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0
; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc0 .LBB14_2
; CHECK-NEXT:  ; %bb.1: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB14_3
; CHECK-NEXT:  .LBB14_2: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB14_3
; CHECK-NEXT:  .LBB14_3:
  %c = icmp ult i32 %v, 12
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_eq_zero = icmp eq i64 %ballot, 0
  br i1 %ballot_eq_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_divergent_ballot_ne_zero_and(i32 %v1, i32 %v2) {
; CHECK-LABEL: branch_divergent_ballot_ne_zero_and:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc, 12, v0
; CHECK-NEXT:    v_cmp_lt_u32_e64 s[0:1], 34, v1
; CHECK-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc1 .LBB15_2
; CHECK-NEXT:  ; %bb.1: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB15_3
; CHECK-NEXT:  .LBB15_2: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB15_3
; CHECK-NEXT:  .LBB15_3:
  %v1c = icmp ult i32 %v1, 12
  %v2c = icmp ugt i32 %v2, 34
  %c = and i1 %v1c, %v2c
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_ne_zero = icmp ne i64 %ballot, 0
  br i1 %ballot_ne_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_and:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_cmp_lt_u32 s0, 12
; CHECK-NEXT:    s_cselect_b32 s0, 1, 0
; CHECK-NEXT:    s_cmp_gt_u32 s1, 34
; CHECK-NEXT:    s_cselect_b32 s1, 1, 0
; CHECK-NEXT:    s_and_b32 s0, s0, s1
; CHECK-NEXT:    s_and_b32 s0, 1, s0
; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0
; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc1 .LBB16_2
; CHECK-NEXT:  ; %bb.1: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB16_3
; CHECK-NEXT:  .LBB16_2: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB16_3
; CHECK-NEXT:  .LBB16_3:
  %v1c = icmp ult i32 %v1, 12
  %v2c = icmp ugt i32 %v2, 34
  %c = and i1 %v1c, %v2c
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_ne_zero = icmp ne i64 %ballot, 0
  br i1 %ballot_ne_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_and(i32 %v1, i32 %v2) {
; CHECK-LABEL: branch_divergent_ballot_eq_zero_and:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc, 12, v0
; CHECK-NEXT:    v_cmp_lt_u32_e64 s[0:1], 34, v1
; CHECK-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc0 .LBB17_2
; CHECK-NEXT:  ; %bb.1: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB17_3
; CHECK-NEXT:  .LBB17_2: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB17_3
; CHECK-NEXT:  .LBB17_3:
  %v1c = icmp ult i32 %v1, 12
  %v2c = icmp ugt i32 %v2, 34
  %c = and i1 %v1c, %v2c
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_eq_zero = icmp eq i64 %ballot, 0
  br i1 %ballot_eq_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg %v2) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_and:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_cmp_lt_u32 s0, 12
; CHECK-NEXT:    s_cselect_b32 s0, 1, 0
; CHECK-NEXT:    s_cmp_gt_u32 s1, 34
; CHECK-NEXT:    s_cselect_b32 s1, 1, 0
; CHECK-NEXT:    s_and_b32 s0, s0, s1
; CHECK-NEXT:    s_and_b32 s0, 1, s0
; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0
; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT:    s_cbranch_scc0 .LBB18_2
; CHECK-NEXT:  ; %bb.1: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB18_3
; CHECK-NEXT:  .LBB18_2: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB18_3
; CHECK-NEXT:  .LBB18_3:
  %v1c = icmp ult i32 %v1, 12
  %v2c = icmp ugt i32 %v2, 34
  %c = and i1 %v1c, %v2c
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %ballot_eq_zero = icmp eq i64 %ballot, 0
  br i1 %ballot_eq_zero, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

define amdgpu_cs i32 @branch_uniform_ballot_sgt_N_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_sgt_N_compare:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_cmp_lt_u32 s0, 12
; CHECK-NEXT:    s_cselect_b32 s0, 1, 0
; CHECK-NEXT:    s_and_b32 s0, 1, s0
; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0
; CHECK-NEXT:    v_cmp_le_i64_e64 vcc, s[0:1], 22
; CHECK-NEXT:    s_cbranch_vccnz .LBB19_2
; CHECK-NEXT:  ; %bb.1: ; %true
; CHECK-NEXT:    s_mov_b32 s0, 42
; CHECK-NEXT:    s_branch .LBB19_3
; CHECK-NEXT:  .LBB19_2: ; %false
; CHECK-NEXT:    s_mov_b32 s0, 33
; CHECK-NEXT:    s_branch .LBB19_3
; CHECK-NEXT:  .LBB19_3:
  %c = icmp ult i32 %v, 12
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
  %bc = icmp sgt i64 %ballot, 22
  br i1 %bc, label %true, label %false
true:
  ret i32 42
false:
  ret i32 33
}

; Input that is not constant or direct result of a compare.
; Tests setting 0 to inactive lanes.
define amdgpu_ps void @non_cst_non_compare_input(ptr addrspace(1) %out, i32 %tid, i32 %cond) {
; CHECK-LABEL: non_cst_non_compare_input:
; CHECK:       ; %bb.0: ; %entry
; CHECK-NEXT:    s_and_b32 s0, 1, s0
; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0
; CHECK-NEXT:    s_and_saveexec_b64 s[2:3], vcc
; CHECK-NEXT:    s_xor_b64 s[2:3], exec, s[2:3]
; CHECK-NEXT:  ; %bb.1: ; %B
; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc, 2, v2
; CHECK-NEXT:    s_andn2_b64 s[0:1], s[0:1], exec
; CHECK-NEXT:    s_and_b64 s[4:5], exec, vcc
; CHECK-NEXT:    s_or_b64 s[0:1], s[0:1], s[4:5]
; CHECK-NEXT:    ; implicit-def: $vgpr2
; CHECK-NEXT:  ; %bb.2: ; %Flow
; CHECK-NEXT:    s_andn2_saveexec_b64 s[2:3], s[2:3]
; CHECK-NEXT:  ; %bb.3: ; %A
; CHECK-NEXT:    v_cmp_le_u32_e32 vcc, 1, v2
; CHECK-NEXT:    s_andn2_b64 s[0:1], s[0:1], exec
; CHECK-NEXT:    s_and_b64 s[4:5], exec, vcc
; CHECK-NEXT:    s_or_b64 s[0:1], s[0:1], s[4:5]
; CHECK-NEXT:  ; %bb.4: ; %exit
; CHECK-NEXT:    s_or_b64 exec, exec, s[2:3]
; CHECK-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; CHECK-NEXT:    v_mov_b32_e32 v3, s1
; CHECK-NEXT:    v_mov_b32_e32 v2, s0
; CHECK-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
; CHECK-NEXT:    s_endpgm
entry:
  %cmp = icmp eq i32 %cond, 0
  br i1 %cmp, label %A, label %B

A:
  %val_A = icmp uge i32 %tid, 1
  br label %exit

B:
  %val_B = icmp ult i32 %tid, 2
  br label %exit

exit:
  %phi = phi i1 [ %val_A, %A ], [ %val_B, %B ]
  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %phi)
  store i64 %ballot, ptr addrspace(1) %out
  ret void
}