llvm/llvm/test/CodeGen/AMDGPU/combine-add-zext-xor.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck --check-prefix=GFX1010 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck --check-prefix=GFX1100 %s

; Test that unused lanes in the s_xor result are masked out with v_cndmask.

define i32 @combine_add_zext_xor() {
; GFX1010-LABEL: combine_add_zext_xor:
; GFX1010:       ; %bb.0: ; %.entry
; GFX1010-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT:    v_mov_b32_e32 v1, 0
; GFX1010-NEXT:    s_branch .LBB0_2
; GFX1010-NEXT:  .LBB0_1: ; %bb9
; GFX1010-NEXT:    ; in Loop: Header=BB0_2 Depth=1
; GFX1010-NEXT:    s_xor_b32 s4, s4, -1
; GFX1010-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0xfffffbe6, v1
; GFX1010-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
; GFX1010-NEXT:    v_add_nc_u32_e32 v2, v1, v0
; GFX1010-NEXT:    v_mov_b32_e32 v1, v2
; GFX1010-NEXT:    s_cbranch_vccz .LBB0_4
; GFX1010-NEXT:  .LBB0_2: ; %.a
; GFX1010-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1010-NEXT:    ; implicit-def: $sgpr4
; GFX1010-NEXT:    s_cbranch_scc1 .LBB0_1
; GFX1010-NEXT:  ; %bb.3: ; %bb
; GFX1010-NEXT:    ; in Loop: Header=BB0_2 Depth=1
; GFX1010-NEXT:    buffer_load_dword v0, v1, s[4:7], 64 offen glc
; GFX1010-NEXT:    s_waitcnt vmcnt(0)
; GFX1010-NEXT:    v_cmp_eq_u32_e64 s4, 0, v0
; GFX1010-NEXT:    s_branch .LBB0_1
; GFX1010-NEXT:  .LBB0_4: ; %.exit
; GFX1010-NEXT:    s_setpc_b64 s[30:31]
;
; GFX1100-LABEL: combine_add_zext_xor:
; GFX1100:       ; %bb.0: ; %.entry
; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT:    v_mov_b32_e32 v1, 0
; GFX1100-NEXT:    s_branch .LBB0_2
; GFX1100-NEXT:  .LBB0_1: ; %bb9
; GFX1100-NEXT:    ; in Loop: Header=BB0_2 Depth=1
; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1100-NEXT:    s_xor_b32 s0, s0, -1
; GFX1100-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0xfffffbe6, v1
; GFX1100-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1100-NEXT:    v_add_nc_u32_e32 v2, v1, v0
; GFX1100-NEXT:    v_mov_b32_e32 v1, v2
; GFX1100-NEXT:    s_cbranch_vccz .LBB0_4
; GFX1100-NEXT:  .LBB0_2: ; %.a
; GFX1100-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1100-NEXT:    ; implicit-def: $sgpr0
; GFX1100-NEXT:    s_cbranch_scc1 .LBB0_1
; GFX1100-NEXT:  ; %bb.3: ; %bb
; GFX1100-NEXT:    ; in Loop: Header=BB0_2 Depth=1
; GFX1100-NEXT:    buffer_load_b32 v0, v1, s[0:3], 64 offen glc
; GFX1100-NEXT:    s_waitcnt vmcnt(0)
; GFX1100-NEXT:    v_cmp_eq_u32_e64 s0, 0, v0
; GFX1100-NEXT:    s_branch .LBB0_1
; GFX1100-NEXT:  .LBB0_4: ; %.exit
; GFX1100-NEXT:    s_setpc_b64 s[30:31]
.entry:
  br label %.a

.a:                                               ; preds = %bb9, %.entry
  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
  br i1 undef, label %bb9, label %bb

bb:                                               ; preds = %.a
  %.i3 = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) undef, i32 %.2, i32 64, i32 1)
  %i5 = icmp eq i32 %.i3, 0
  br label %bb9

bb9:                                              ; preds = %bb, %.a
  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
  %.2.0.in = xor i1 %.2.0.in.in, true
  %.2.0 = zext i1 %.2.0.in to i32
  %i11 = add i32 %.2, %.2.0
  %i12 = icmp sgt i32 %.2, -1050
  br i1 %i12, label %.a, label %.exit

.exit:                                            ; preds = %bb9
  ret i32 %.2.0
}

; Test that unused lanes in the s_xor result are masked out with v_cndmask.

define i32 @combine_sub_zext_xor() {
; GFX1010-LABEL: combine_sub_zext_xor:
; GFX1010:       ; %bb.0: ; %.entry
; GFX1010-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT:    v_mov_b32_e32 v1, 0
; GFX1010-NEXT:    s_branch .LBB1_2
; GFX1010-NEXT:  .LBB1_1: ; %bb9
; GFX1010-NEXT:    ; in Loop: Header=BB1_2 Depth=1
; GFX1010-NEXT:    s_xor_b32 s4, s4, -1
; GFX1010-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0xfffffbe6, v1
; GFX1010-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
; GFX1010-NEXT:    v_sub_nc_u32_e32 v2, v1, v0
; GFX1010-NEXT:    v_mov_b32_e32 v1, v2
; GFX1010-NEXT:    s_cbranch_vccz .LBB1_4
; GFX1010-NEXT:  .LBB1_2: ; %.a
; GFX1010-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1010-NEXT:    ; implicit-def: $sgpr4
; GFX1010-NEXT:    s_cbranch_scc1 .LBB1_1
; GFX1010-NEXT:  ; %bb.3: ; %bb
; GFX1010-NEXT:    ; in Loop: Header=BB1_2 Depth=1
; GFX1010-NEXT:    buffer_load_dword v0, v1, s[4:7], 64 offen glc
; GFX1010-NEXT:    s_waitcnt vmcnt(0)
; GFX1010-NEXT:    v_cmp_eq_u32_e64 s4, 0, v0
; GFX1010-NEXT:    s_branch .LBB1_1
; GFX1010-NEXT:  .LBB1_4: ; %.exit
; GFX1010-NEXT:    s_setpc_b64 s[30:31]
;
; GFX1100-LABEL: combine_sub_zext_xor:
; GFX1100:       ; %bb.0: ; %.entry
; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT:    v_mov_b32_e32 v1, 0
; GFX1100-NEXT:    s_branch .LBB1_2
; GFX1100-NEXT:  .LBB1_1: ; %bb9
; GFX1100-NEXT:    ; in Loop: Header=BB1_2 Depth=1
; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1100-NEXT:    s_xor_b32 s0, s0, -1
; GFX1100-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0xfffffbe6, v1
; GFX1100-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1100-NEXT:    v_sub_nc_u32_e32 v2, v1, v0
; GFX1100-NEXT:    v_mov_b32_e32 v1, v2
; GFX1100-NEXT:    s_cbranch_vccz .LBB1_4
; GFX1100-NEXT:  .LBB1_2: ; %.a
; GFX1100-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1100-NEXT:    ; implicit-def: $sgpr0
; GFX1100-NEXT:    s_cbranch_scc1 .LBB1_1
; GFX1100-NEXT:  ; %bb.3: ; %bb
; GFX1100-NEXT:    ; in Loop: Header=BB1_2 Depth=1
; GFX1100-NEXT:    buffer_load_b32 v0, v1, s[0:3], 64 offen glc
; GFX1100-NEXT:    s_waitcnt vmcnt(0)
; GFX1100-NEXT:    v_cmp_eq_u32_e64 s0, 0, v0
; GFX1100-NEXT:    s_branch .LBB1_1
; GFX1100-NEXT:  .LBB1_4: ; %.exit
; GFX1100-NEXT:    s_setpc_b64 s[30:31]
.entry:
  br label %.a

.a:                                               ; preds = %bb9, %.entry
  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
  br i1 undef, label %bb9, label %bb

bb:                                               ; preds = %.a
  %.i3 = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) undef, i32 %.2, i32 64, i32 1)
  %i5 = icmp eq i32 %.i3, 0
  br label %bb9

bb9:                                              ; preds = %bb, %.a
  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
  %.2.0.in = xor i1 %.2.0.in.in, true
  %.2.0 = zext i1 %.2.0.in to i32
  %i11 = sub i32 %.2, %.2.0
  %i12 = icmp sgt i32 %.2, -1050
  br i1 %i12, label %.a, label %.exit

.exit:                                            ; preds = %bb9
  ret i32 %.2.0
}

; Test that unused lanes in the s_or result are masked out with v_cndmask.

define i32 @combine_add_zext_or() {
; GFX1010-LABEL: combine_add_zext_or:
; GFX1010:       ; %bb.0: ; %.entry
; GFX1010-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT:    s_mov_b32 s4, 0
; GFX1010-NEXT:    s_branch .LBB2_2
; GFX1010-NEXT:  .LBB2_1: ; %bb9
; GFX1010-NEXT:    ; in Loop: Header=BB2_2 Depth=1
; GFX1010-NEXT:    s_cmpk_gt_i32 s4, 0xfbe6
; GFX1010-NEXT:    s_cselect_b32 s6, -1, 0
; GFX1010-NEXT:    s_add_i32 s4, s4, 1
; GFX1010-NEXT:    s_and_b32 vcc_lo, exec_lo, s6
; GFX1010-NEXT:    s_cbranch_vccz .LBB2_4
; GFX1010-NEXT:  .LBB2_2: ; %.a
; GFX1010-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1010-NEXT:    ; implicit-def: $sgpr5
; GFX1010-NEXT:    s_cbranch_scc1 .LBB2_1
; GFX1010-NEXT:  ; %bb.3: ; %bb
; GFX1010-NEXT:    ; in Loop: Header=BB2_2 Depth=1
; GFX1010-NEXT:    v_mov_b32_e32 v0, s4
; GFX1010-NEXT:    buffer_load_dword v0, v0, s[4:7], 64 offen glc
; GFX1010-NEXT:    s_waitcnt vmcnt(0)
; GFX1010-NEXT:    v_cmp_eq_u32_e64 s5, 0, v0
; GFX1010-NEXT:    s_branch .LBB2_1
; GFX1010-NEXT:  .LBB2_4: ; %.exit
; GFX1010-NEXT:    s_or_b32 s4, s5, s6
; GFX1010-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
; GFX1010-NEXT:    s_setpc_b64 s[30:31]
;
; GFX1100-LABEL: combine_add_zext_or:
; GFX1100:       ; %bb.0: ; %.entry
; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT:    s_mov_b32 s0, 0
; GFX1100-NEXT:    s_branch .LBB2_2
; GFX1100-NEXT:  .LBB2_1: ; %bb9
; GFX1100-NEXT:    ; in Loop: Header=BB2_2 Depth=1
; GFX1100-NEXT:    s_cmpk_gt_i32 s0, 0xfbe6
; GFX1100-NEXT:    s_cselect_b32 s2, -1, 0
; GFX1100-NEXT:    s_add_i32 s0, s0, 1
; GFX1100-NEXT:    s_and_b32 vcc_lo, exec_lo, s2
; GFX1100-NEXT:    s_cbranch_vccz .LBB2_4
; GFX1100-NEXT:  .LBB2_2: ; %.a
; GFX1100-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1100-NEXT:    ; implicit-def: $sgpr1
; GFX1100-NEXT:    s_cbranch_scc1 .LBB2_1
; GFX1100-NEXT:  ; %bb.3: ; %bb
; GFX1100-NEXT:    ; in Loop: Header=BB2_2 Depth=1
; GFX1100-NEXT:    v_mov_b32_e32 v0, s0
; GFX1100-NEXT:    buffer_load_b32 v0, v0, s[0:3], 64 offen glc
; GFX1100-NEXT:    s_waitcnt vmcnt(0)
; GFX1100-NEXT:    v_cmp_eq_u32_e64 s1, 0, v0
; GFX1100-NEXT:    s_branch .LBB2_1
; GFX1100-NEXT:  .LBB2_4: ; %.exit
; GFX1100-NEXT:    s_or_b32 s0, s1, s2
; GFX1100-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
; GFX1100-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
; GFX1100-NEXT:    s_setpc_b64 s[30:31]
.entry:
  br label %.a

.a:                                               ; preds = %bb9, %.entry
  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
  br i1 undef, label %bb9, label %bb

bb:                                               ; preds = %.a
  %.i3 = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) undef, i32 %.2, i32 64, i32 1)
  %i5 = icmp eq i32 %.i3, 0
  br label %bb9

bb9:                                              ; preds = %bb, %.a
  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
  %t = icmp sgt i32 %.2, -1050
  %.2.0.in = or i1 %.2.0.in.in, %t
  %.2.0 = zext i1 %.2.0.in to i32
  %i11 = add i32 %.2, %.2.0
  %i12 = icmp sgt i32 %.2, -1050
  br i1 %i12, label %.a, label %.exit

.exit:                                            ; preds = %bb9
  ret i32 %.2.0
}

; Test that unused lanes in the s_or result are masked out with v_cndmask.

define i32 @combine_sub_zext_or() {
; GFX1010-LABEL: combine_sub_zext_or:
; GFX1010:       ; %bb.0: ; %.entry
; GFX1010-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT:    s_mov_b32 s4, 0
; GFX1010-NEXT:    s_branch .LBB3_2
; GFX1010-NEXT:  .LBB3_1: ; %bb9
; GFX1010-NEXT:    ; in Loop: Header=BB3_2 Depth=1
; GFX1010-NEXT:    s_cmpk_gt_i32 s4, 0xfbe6
; GFX1010-NEXT:    s_cselect_b32 s6, -1, 0
; GFX1010-NEXT:    s_add_i32 s4, s4, -1
; GFX1010-NEXT:    s_and_b32 vcc_lo, exec_lo, s6
; GFX1010-NEXT:    s_cbranch_vccz .LBB3_4
; GFX1010-NEXT:  .LBB3_2: ; %.a
; GFX1010-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1010-NEXT:    ; implicit-def: $sgpr5
; GFX1010-NEXT:    s_cbranch_scc1 .LBB3_1
; GFX1010-NEXT:  ; %bb.3: ; %bb
; GFX1010-NEXT:    ; in Loop: Header=BB3_2 Depth=1
; GFX1010-NEXT:    v_mov_b32_e32 v0, s4
; GFX1010-NEXT:    buffer_load_dword v0, v0, s[4:7], 64 offen glc
; GFX1010-NEXT:    s_waitcnt vmcnt(0)
; GFX1010-NEXT:    v_cmp_eq_u32_e64 s5, 0, v0
; GFX1010-NEXT:    s_branch .LBB3_1
; GFX1010-NEXT:  .LBB3_4: ; %.exit
; GFX1010-NEXT:    s_or_b32 s4, s5, s6
; GFX1010-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
; GFX1010-NEXT:    s_setpc_b64 s[30:31]
;
; GFX1100-LABEL: combine_sub_zext_or:
; GFX1100:       ; %bb.0: ; %.entry
; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT:    s_mov_b32 s0, 0
; GFX1100-NEXT:    s_branch .LBB3_2
; GFX1100-NEXT:  .LBB3_1: ; %bb9
; GFX1100-NEXT:    ; in Loop: Header=BB3_2 Depth=1
; GFX1100-NEXT:    s_cmpk_gt_i32 s0, 0xfbe6
; GFX1100-NEXT:    s_cselect_b32 s2, -1, 0
; GFX1100-NEXT:    s_add_i32 s0, s0, -1
; GFX1100-NEXT:    s_and_b32 vcc_lo, exec_lo, s2
; GFX1100-NEXT:    s_cbranch_vccz .LBB3_4
; GFX1100-NEXT:  .LBB3_2: ; %.a
; GFX1100-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1100-NEXT:    ; implicit-def: $sgpr1
; GFX1100-NEXT:    s_cbranch_scc1 .LBB3_1
; GFX1100-NEXT:  ; %bb.3: ; %bb
; GFX1100-NEXT:    ; in Loop: Header=BB3_2 Depth=1
; GFX1100-NEXT:    v_mov_b32_e32 v0, s0
; GFX1100-NEXT:    buffer_load_b32 v0, v0, s[0:3], 64 offen glc
; GFX1100-NEXT:    s_waitcnt vmcnt(0)
; GFX1100-NEXT:    v_cmp_eq_u32_e64 s1, 0, v0
; GFX1100-NEXT:    s_branch .LBB3_1
; GFX1100-NEXT:  .LBB3_4: ; %.exit
; GFX1100-NEXT:    s_or_b32 s0, s1, s2
; GFX1100-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
; GFX1100-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
; GFX1100-NEXT:    s_setpc_b64 s[30:31]
.entry:
  br label %.a

.a:                                               ; preds = %bb9, %.entry
  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
  br i1 undef, label %bb9, label %bb

bb:                                               ; preds = %.a
  %.i3 = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) undef, i32 %.2, i32 64, i32 1)
  %i5 = icmp eq i32 %.i3, 0
  br label %bb9

bb9:                                              ; preds = %bb, %.a
  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
  %t = icmp sgt i32 %.2, -1050
  %.2.0.in = or i1 %.2.0.in.in, %t
  %.2.0 = zext i1 %.2.0.in to i32
  %i11 = sub i32 %.2, %.2.0
  %i12 = icmp sgt i32 %.2, -1050
  br i1 %i12, label %.a, label %.exit

.exit:                                            ; preds = %bb9
  ret i32 %.2.0
}

; Test that unused lanes in the s_and result are masked out with v_cndmask.

define i32 @combine_add_zext_and() {
; GFX1010-LABEL: combine_add_zext_and:
; GFX1010:       ; %bb.0: ; %.entry
; GFX1010-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT:    v_mov_b32_e32 v1, 0
; GFX1010-NEXT:    s_branch .LBB4_2
; GFX1010-NEXT:  .LBB4_1: ; %bb9
; GFX1010-NEXT:    ; in Loop: Header=BB4_2 Depth=1
; GFX1010-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0xfffffbe6, v1
; GFX1010-NEXT:    s_and_b32 s4, s4, vcc_lo
; GFX1010-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
; GFX1010-NEXT:    v_add_nc_u32_e32 v1, v1, v0
; GFX1010-NEXT:    s_cbranch_vccz .LBB4_4
; GFX1010-NEXT:  .LBB4_2: ; %.a
; GFX1010-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1010-NEXT:    ; implicit-def: $sgpr4
; GFX1010-NEXT:    s_cbranch_scc1 .LBB4_1
; GFX1010-NEXT:  ; %bb.3: ; %bb
; GFX1010-NEXT:    ; in Loop: Header=BB4_2 Depth=1
; GFX1010-NEXT:    buffer_load_dword v0, v1, s[4:7], 64 offen glc
; GFX1010-NEXT:    s_waitcnt vmcnt(0)
; GFX1010-NEXT:    v_cmp_eq_u32_e64 s4, 0, v0
; GFX1010-NEXT:    s_branch .LBB4_1
; GFX1010-NEXT:  .LBB4_4: ; %.exit
; GFX1010-NEXT:    s_setpc_b64 s[30:31]
;
; GFX1100-LABEL: combine_add_zext_and:
; GFX1100:       ; %bb.0: ; %.entry
; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT:    v_mov_b32_e32 v1, 0
; GFX1100-NEXT:    s_branch .LBB4_2
; GFX1100-NEXT:  .LBB4_1: ; %bb9
; GFX1100-NEXT:    ; in Loop: Header=BB4_2 Depth=1
; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1100-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0xfffffbe6, v1
; GFX1100-NEXT:    s_and_b32 s0, s0, vcc_lo
; GFX1100-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1100-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
; GFX1100-NEXT:    v_add_nc_u32_e32 v1, v1, v0
; GFX1100-NEXT:    s_cbranch_vccz .LBB4_4
; GFX1100-NEXT:  .LBB4_2: ; %.a
; GFX1100-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1100-NEXT:    ; implicit-def: $sgpr0
; GFX1100-NEXT:    s_cbranch_scc1 .LBB4_1
; GFX1100-NEXT:  ; %bb.3: ; %bb
; GFX1100-NEXT:    ; in Loop: Header=BB4_2 Depth=1
; GFX1100-NEXT:    buffer_load_b32 v0, v1, s[0:3], 64 offen glc
; GFX1100-NEXT:    s_waitcnt vmcnt(0)
; GFX1100-NEXT:    v_cmp_eq_u32_e64 s0, 0, v0
; GFX1100-NEXT:    s_branch .LBB4_1
; GFX1100-NEXT:  .LBB4_4: ; %.exit
; GFX1100-NEXT:    s_setpc_b64 s[30:31]
.entry:
  br label %.a

.a:                                               ; preds = %bb9, %.entry
  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
  br i1 undef, label %bb9, label %bb

bb:                                               ; preds = %.a
  %.i3 = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) undef, i32 %.2, i32 64, i32 1)
  %i5 = icmp eq i32 %.i3, 0
  br label %bb9

bb9:                                              ; preds = %bb, %.a
  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
  %t = icmp sgt i32 %.2, -1050
  %.2.0.in = and i1 %.2.0.in.in, %t
  %.2.0 = zext i1 %.2.0.in to i32
  %i11 = add i32 %.2, %.2.0
  %i12 = icmp sgt i32 %.2, -1050
  br i1 %i12, label %.a, label %.exit

.exit:                                            ; preds = %bb9
  ret i32 %.2.0
}

; Test that unused lanes in the s_and result are masked out with v_cndmask.

define i32 @combine_sub_zext_and() {
; GFX1010-LABEL: combine_sub_zext_and:
; GFX1010:       ; %bb.0: ; %.entry
; GFX1010-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT:    v_mov_b32_e32 v1, 0
; GFX1010-NEXT:    s_branch .LBB5_2
; GFX1010-NEXT:  .LBB5_1: ; %bb9
; GFX1010-NEXT:    ; in Loop: Header=BB5_2 Depth=1
; GFX1010-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0xfffffbe6, v1
; GFX1010-NEXT:    s_and_b32 s4, s4, vcc_lo
; GFX1010-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
; GFX1010-NEXT:    v_sub_nc_u32_e32 v1, v1, v0
; GFX1010-NEXT:    s_cbranch_vccz .LBB5_4
; GFX1010-NEXT:  .LBB5_2: ; %.a
; GFX1010-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1010-NEXT:    ; implicit-def: $sgpr4
; GFX1010-NEXT:    s_cbranch_scc1 .LBB5_1
; GFX1010-NEXT:  ; %bb.3: ; %bb
; GFX1010-NEXT:    ; in Loop: Header=BB5_2 Depth=1
; GFX1010-NEXT:    buffer_load_dword v0, v1, s[4:7], 64 offen glc
; GFX1010-NEXT:    s_waitcnt vmcnt(0)
; GFX1010-NEXT:    v_cmp_eq_u32_e64 s4, 0, v0
; GFX1010-NEXT:    s_branch .LBB5_1
; GFX1010-NEXT:  .LBB5_4: ; %.exit
; GFX1010-NEXT:    s_setpc_b64 s[30:31]
;
; GFX1100-LABEL: combine_sub_zext_and:
; GFX1100:       ; %bb.0: ; %.entry
; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT:    v_mov_b32_e32 v1, 0
; GFX1100-NEXT:    s_branch .LBB5_2
; GFX1100-NEXT:  .LBB5_1: ; %bb9
; GFX1100-NEXT:    ; in Loop: Header=BB5_2 Depth=1
; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1100-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0xfffffbe6, v1
; GFX1100-NEXT:    s_and_b32 s0, s0, vcc_lo
; GFX1100-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1100-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
; GFX1100-NEXT:    v_sub_nc_u32_e32 v1, v1, v0
; GFX1100-NEXT:    s_cbranch_vccz .LBB5_4
; GFX1100-NEXT:  .LBB5_2: ; %.a
; GFX1100-NEXT:    ; =>This Inner Loop Header: Depth=1
; GFX1100-NEXT:    ; implicit-def: $sgpr0
; GFX1100-NEXT:    s_cbranch_scc1 .LBB5_1
; GFX1100-NEXT:  ; %bb.3: ; %bb
; GFX1100-NEXT:    ; in Loop: Header=BB5_2 Depth=1
; GFX1100-NEXT:    buffer_load_b32 v0, v1, s[0:3], 64 offen glc
; GFX1100-NEXT:    s_waitcnt vmcnt(0)
; GFX1100-NEXT:    v_cmp_eq_u32_e64 s0, 0, v0
; GFX1100-NEXT:    s_branch .LBB5_1
; GFX1100-NEXT:  .LBB5_4: ; %.exit
; GFX1100-NEXT:    s_setpc_b64 s[30:31]
.entry:
  br label %.a

.a:                                               ; preds = %bb9, %.entry
  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
  br i1 undef, label %bb9, label %bb

bb:                                               ; preds = %.a
  %.i3 = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) undef, i32 %.2, i32 64, i32 1)
  %i5 = icmp eq i32 %.i3, 0
  br label %bb9

bb9:                                              ; preds = %bb, %.a
  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
  %t = icmp sgt i32 %.2, -1050
  %.2.0.in = and i1 %.2.0.in.in, %t
  %.2.0 = zext i1 %.2.0.in to i32
  %i11 = sub i32 %.2, %.2.0
  %i12 = icmp sgt i32 %.2, -1050
  br i1 %i12, label %.a, label %.exit

.exit:                                            ; preds = %bb9
  ret i32 %.2.0
}


; Function Attrs: nounwind readonly willreturn
declare i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) nocapture, i32, i32, i32 immarg) #0

attributes #0 = { nounwind willreturn memory(argmem: read) }