llvm/llvm/test/CodeGen/AMDGPU/partial-shift-shrink.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GCN %s

; Test combine to reduce the width of a 64-bit shift to 32-bit if
; truncated to 16-bit.
define i16 @trunc_srl_i64_16_to_i16(i64 %x) {
; GCN-LABEL: trunc_srl_i64_16_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %shift = lshr i64 %x, 16
  %trunc = trunc i64 %shift to i16
  ret i16 %trunc
}

define i16 @trunc_srl_i64_17_to_i16(i64 %x) {
; GCN-LABEL: trunc_srl_i64_17_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_lshrrev_b64 v[0:1], 17, v[0:1]
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %shift = lshr i64 %x, 17
  %trunc = trunc i64 %shift to i16
  ret i16 %trunc
}

define i15 @trunc_srl_i55_16_to_i15(i55 %x) {
; GCN-LABEL: trunc_srl_i55_16_to_i15:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_lshrrev_b32_e32 v0, 15, v0
; GCN-NEXT:    v_add_u16_e32 v0, 4, v0
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %shift = lshr i55 %x, 15
  %trunc = trunc i55 %shift to i15
  %add = add i15 %trunc, 4
  ret i15 %add
}

define i16 @trunc_sra_i64_16_to_i16(i64 %x) {
; GCN-LABEL: trunc_sra_i64_16_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %shift = ashr i64 %x, 16
  %trunc = trunc i64 %shift to i16
  ret i16 %trunc
}

define i16 @trunc_sra_i64_17_to_i16(i64 %x) {
; GCN-LABEL: trunc_sra_i64_17_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_lshrrev_b64 v[0:1], 17, v[0:1]
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %shift = ashr i64 %x, 17
  %trunc = trunc i64 %shift to i16
  ret i16 %trunc
}

define i16 @trunc_shl_i64_16_to_i16(i64 %x) {
; GCN-LABEL: trunc_shl_i64_16_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_mov_b32_e32 v0, 0
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %shift = shl i64 %x, 16
  %trunc = trunc i64 %shift to i16
  ret i16 %trunc
}

define i16 @trunc_shl_i64_17_to_i16(i64 %x) {
; GCN-LABEL: trunc_shl_i64_17_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_mov_b32_e32 v0, 0
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %shift = shl i64 %x, 17
  %trunc = trunc i64 %shift to i16
  ret i16 %trunc
}

define <2 x i16> @trunc_srl_v2i64_16_to_v2i16(<2 x i64> %x) {
; GCN-LABEL: trunc_srl_v2i64_16_to_v2i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    s_mov_b32 s4, 0x7060302
; GCN-NEXT:    v_perm_b32 v0, v2, v0, s4
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %shift = lshr <2 x i64> %x, <i64 16, i64 16>
  %trunc = trunc <2 x i64> %shift to <2 x i16>
  ret <2 x i16> %trunc
}

define amdgpu_kernel void @s_trunc_srl_i64_16_to_i16(i64 %x) {
; GCN-LABEL: s_trunc_srl_i64_16_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_load_dword s0, s[2:3], 0x24
; GCN-NEXT:    s_waitcnt lgkmcnt(0)
; GCN-NEXT:    s_lshr_b32 s0, s0, 16
; GCN-NEXT:    s_or_b32 s0, s0, 4
; GCN-NEXT:    v_mov_b32_e32 v0, s0
; GCN-NEXT:    global_store_short v[0:1], v0, off
; GCN-NEXT:    s_endpgm
  %shift = lshr i64 %x, 16
  %trunc = trunc i64 %shift to i16
  %add = or i16 %trunc, 4
  store i16 %add, ptr addrspace(1) undef
  ret void
}

define i16 @trunc_srl_i64_var_mask15_to_i16(i64 %x, i64 %amt) {
; GCN-LABEL: trunc_srl_i64_var_mask15_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_and_b32_e32 v1, 15, v2
; GCN-NEXT:    v_lshrrev_b32_e32 v0, v1, v0
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %amt.masked = and i64 %amt, 15
  %shift = lshr i64 %x, %amt.masked
  %trunc = trunc i64 %shift to i16
  ret i16 %trunc
}

define i16 @trunc_srl_i64_var_mask16_to_i16(i64 %x, i64 %amt) {
; GCN-LABEL: trunc_srl_i64_var_mask16_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_and_b32_e32 v1, 16, v2
; GCN-NEXT:    v_lshrrev_b32_e32 v0, v1, v0
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %amt.masked = and i64 %amt, 16
  %shift = lshr i64 %x, %amt.masked
  %trunc = trunc i64 %shift to i16
  ret i16 %trunc
}

define i16 @trunc_srl_i64_var_mask31_to_i16(i64 %x, i64 %amt) {
; GCN-LABEL: trunc_srl_i64_var_mask31_to_i16:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_and_b32_e32 v2, 31, v2
; GCN-NEXT:    v_lshrrev_b64 v[0:1], v2, v[0:1]
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %amt.masked = and i64 %amt, 31
  %shift = lshr i64 %x, %amt.masked
  %trunc = trunc i64 %shift to i16
  ret i16 %trunc
}

define i32 @trunc_srl_i64_25_to_i26(i64 %x) {
; GCN-LABEL: trunc_srl_i64_25_to_i26:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_and_b32_e32 v0, 0xa000000, v0
; GCN-NEXT:    v_lshrrev_b32_e32 v0, 25, v0
; GCN-NEXT:    v_add_u32_e32 v0, 55, v0
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %value.knownbits2 = and i64 %x, 167772160 ; 0xA000000
  %shift = lshr i64 %value.knownbits2, 25
  %trunc = trunc i64 %shift to i26
  %add = add i26 %trunc, 55
  %ext = zext i26 %add to i32
  ret i32 %ext
}