llvm/llvm/test/CodeGen/AMDGPU/scalar_to_vector.v8i16.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX900 %s
; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 < %s | FileCheck -check-prefixes=GFX906 %s
; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 < %s | FileCheck -check-prefixes=GFX908 %s
; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX90A %s

define amdgpu_kernel void @scalar_to_vector_v8i16(<2 x i32> %in, ptr %out) #0 {
; GFX900-LABEL: scalar_to_vector_v8i16:
; GFX900:       ; %bb.0: ; %entry
; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
; GFX900-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
; GFX900-NEXT:    v_mov_b32_e32 v5, s3
; GFX900-NEXT:    v_add_co_u32_e32 v4, vcc, s2, v4
; GFX900-NEXT:    v_mov_b32_e32 v0, s0
; GFX900-NEXT:    v_mov_b32_e32 v1, s1
; GFX900-NEXT:    v_mov_b32_e32 v2, s0
; GFX900-NEXT:    v_mov_b32_e32 v3, s0
; GFX900-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX900-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
; GFX900-NEXT:    s_endpgm
;
; GFX906-LABEL: scalar_to_vector_v8i16:
; GFX906:       ; %bb.0: ; %entry
; GFX906-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
; GFX906-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
; GFX906-NEXT:    s_waitcnt lgkmcnt(0)
; GFX906-NEXT:    v_mov_b32_e32 v5, s3
; GFX906-NEXT:    v_add_co_u32_e32 v4, vcc, s2, v4
; GFX906-NEXT:    v_mov_b32_e32 v0, s0
; GFX906-NEXT:    v_mov_b32_e32 v1, s1
; GFX906-NEXT:    v_mov_b32_e32 v2, s0
; GFX906-NEXT:    v_mov_b32_e32 v3, s0
; GFX906-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX906-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
; GFX906-NEXT:    s_endpgm
;
; GFX908-LABEL: scalar_to_vector_v8i16:
; GFX908:       ; %bb.0: ; %entry
; GFX908-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
; GFX908-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
; GFX908-NEXT:    s_waitcnt lgkmcnt(0)
; GFX908-NEXT:    v_mov_b32_e32 v5, s3
; GFX908-NEXT:    v_add_co_u32_e32 v4, vcc, s2, v4
; GFX908-NEXT:    v_mov_b32_e32 v0, s0
; GFX908-NEXT:    v_mov_b32_e32 v1, s1
; GFX908-NEXT:    v_mov_b32_e32 v2, s0
; GFX908-NEXT:    v_mov_b32_e32 v3, s0
; GFX908-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX908-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
; GFX908-NEXT:    s_endpgm
;
; GFX90A-LABEL: scalar_to_vector_v8i16:
; GFX90A:       ; %bb.0: ; %entry
; GFX90A-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
; GFX90A-NEXT:    v_and_b32_e32 v4, 0x3ff, v0
; GFX90A-NEXT:    v_lshlrev_b32_e32 v4, 4, v4
; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
; GFX90A-NEXT:    v_mov_b32_e32 v5, s3
; GFX90A-NEXT:    v_add_co_u32_e32 v4, vcc, s2, v4
; GFX90A-NEXT:    v_mov_b32_e32 v0, s0
; GFX90A-NEXT:    v_mov_b32_e32 v1, s1
; GFX90A-NEXT:    v_mov_b32_e32 v2, s0
; GFX90A-NEXT:    v_mov_b32_e32 v3, s0
; GFX90A-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX90A-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
; GFX90A-NEXT:    s_endpgm
entry:
  %val.1.i32 = extractelement <2 x i32> %in, i64 0
  %val.2.vec2.i16 = bitcast i32 %val.1.i32 to <2 x i16>
  %val.3.vec8.i16 = shufflevector <2 x i16> %val.2.vec2.i16, <2 x i16> %val.2.vec2.i16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>

  %val.4.vec4.i32 = shufflevector <2 x i32> %in, <2 x i32> %in, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
  %val.5.vec8.i16 = bitcast <4 x i32> %val.4.vec4.i32 to <8 x i16>

  %val.6.vec8.i16 = shufflevector <8 x i16> %val.5.vec8.i16, <8 x i16> %val.3.vec8.i16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>

  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
  %tid.ext = sext i32 %tid to i64
  %out.gep = getelementptr inbounds <8 x i16>, ptr %out, i64 %tid.ext
  store <8 x i16> %val.6.vec8.i16, ptr %out.gep, align 16

  ret void
}

define amdgpu_kernel void @scalar_to_vector_v8f16(<2 x float> %in, ptr %out) #0 {
; GFX900-LABEL: scalar_to_vector_v8f16:
; GFX900:       ; %bb.0: ; %entry
; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
; GFX900-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
; GFX900-NEXT:    v_mov_b32_e32 v5, s3
; GFX900-NEXT:    v_add_co_u32_e32 v4, vcc, s2, v4
; GFX900-NEXT:    v_mov_b32_e32 v0, s0
; GFX900-NEXT:    v_mov_b32_e32 v1, s1
; GFX900-NEXT:    v_mov_b32_e32 v3, s0
; GFX900-NEXT:    v_mov_b32_e32 v2, s0
; GFX900-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX900-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
; GFX900-NEXT:    s_endpgm
;
; GFX906-LABEL: scalar_to_vector_v8f16:
; GFX906:       ; %bb.0: ; %entry
; GFX906-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
; GFX906-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
; GFX906-NEXT:    s_waitcnt lgkmcnt(0)
; GFX906-NEXT:    v_mov_b32_e32 v5, s3
; GFX906-NEXT:    v_add_co_u32_e32 v4, vcc, s2, v4
; GFX906-NEXT:    v_mov_b32_e32 v0, s0
; GFX906-NEXT:    v_mov_b32_e32 v1, s1
; GFX906-NEXT:    v_mov_b32_e32 v3, s0
; GFX906-NEXT:    v_mov_b32_e32 v2, s0
; GFX906-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX906-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
; GFX906-NEXT:    s_endpgm
;
; GFX908-LABEL: scalar_to_vector_v8f16:
; GFX908:       ; %bb.0: ; %entry
; GFX908-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
; GFX908-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
; GFX908-NEXT:    s_waitcnt lgkmcnt(0)
; GFX908-NEXT:    v_mov_b32_e32 v5, s3
; GFX908-NEXT:    v_add_co_u32_e32 v4, vcc, s2, v4
; GFX908-NEXT:    v_mov_b32_e32 v0, s0
; GFX908-NEXT:    v_mov_b32_e32 v1, s1
; GFX908-NEXT:    v_mov_b32_e32 v3, s0
; GFX908-NEXT:    v_mov_b32_e32 v2, s0
; GFX908-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX908-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
; GFX908-NEXT:    s_endpgm
;
; GFX90A-LABEL: scalar_to_vector_v8f16:
; GFX90A:       ; %bb.0: ; %entry
; GFX90A-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
; GFX90A-NEXT:    v_and_b32_e32 v4, 0x3ff, v0
; GFX90A-NEXT:    v_lshlrev_b32_e32 v4, 4, v4
; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
; GFX90A-NEXT:    v_mov_b32_e32 v5, s3
; GFX90A-NEXT:    v_add_co_u32_e32 v4, vcc, s2, v4
; GFX90A-NEXT:    v_mov_b32_e32 v0, s0
; GFX90A-NEXT:    v_mov_b32_e32 v1, s1
; GFX90A-NEXT:    v_mov_b32_e32 v3, s0
; GFX90A-NEXT:    v_mov_b32_e32 v2, s0
; GFX90A-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX90A-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
; GFX90A-NEXT:    s_endpgm
entry:
  %val.1.float = extractelement <2 x float> %in, i64 0
  %val.2.vec2.half = bitcast float %val.1.float to <2 x half>
  %val.3.vec8.half = shufflevector <2 x half> %val.2.vec2.half, <2 x half> %val.2.vec2.half, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>

  %val.4.vec4.float = shufflevector <2 x float> %in, <2 x float> %in, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
  %val.5.vec8.half = bitcast <4 x float> %val.4.vec4.float to <8 x half>

  %val.6.vec8.half = shufflevector <8 x half> %val.5.vec8.half, <8 x half> %val.3.vec8.half, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>

  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
  %tid.ext = sext i32 %tid to i64
  %out.gep = getelementptr inbounds <8 x half>, ptr %out, i64 %tid.ext
  store <8 x half> %val.6.vec8.half, ptr %out.gep, align 16

  ret void
}

declare i32 @llvm.amdgcn.workitem.id.x() #1

attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }