; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn < %s | FileCheck --check-prefixes=SI,GCN,FUNC %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck --check-prefixes=VI,GCN,FUNC %s
; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck --check-prefixes=R600,FUNC %s
define amdgpu_kernel void @local_size_x(ptr addrspace(1) %out) {
; SI-LABEL: local_size_x:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x6
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_x:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x18
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_x:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; R600-NEXT: MOV * T1.X, KC0[1].Z,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
entry:
%0 = call i32 @llvm.r600.read.local.size.x() #0
store i32 %0, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @local_size_y(ptr addrspace(1) %out) {
; SI-LABEL: local_size_y:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x7
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_y:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x1c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_y:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; R600-NEXT: MOV * T1.X, KC0[1].W,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
entry:
%0 = call i32 @llvm.r600.read.local.size.y() #0
store i32 %0, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @local_size_z(ptr addrspace(1) %out) {
; SI-LABEL: local_size_z:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x8
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_z:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x20
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_z:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; R600-NEXT: MOV * T1.X, KC0[2].X,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
entry:
%0 = call i32 @llvm.r600.read.local.size.z() #0
store i32 %0, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @local_size_xy(ptr addrspace(1) %out) {
; SI-LABEL: local_size_xy:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x6
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mul_i32 s4, s4, s5
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_xy:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x18
; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mul_i32 s0, s0, s1
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_xy:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; R600-NEXT: MULLO_INT * T1.X, KC0[1].Z, KC0[1].W,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
entry:
%x = call i32 @llvm.r600.read.local.size.x() #0
%y = call i32 @llvm.r600.read.local.size.y() #0
%val = mul i32 %x, %y
store i32 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @local_size_xz(ptr addrspace(1) %out) {
; SI-LABEL: local_size_xz:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x6
; SI-NEXT: s_load_dword s5, s[2:3], 0x8
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mul_i32 s4, s4, s5
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_xz:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x18
; VI-NEXT: s_load_dword s5, s[2:3], 0x20
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mul_i32 s2, s4, s5
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_xz:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; R600-NEXT: MULLO_INT * T1.X, KC0[1].Z, KC0[2].X,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
entry:
%x = call i32 @llvm.r600.read.local.size.x() #0
%z = call i32 @llvm.r600.read.local.size.z() #0
%val = mul i32 %x, %z
store i32 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @local_size_yz(ptr addrspace(1) %out) {
; SI-LABEL: local_size_yz:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x7
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mul_i32 s0, s0, s1
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: v_mov_b32_e32 v0, s0
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_yz:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x1c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mul_i32 s0, s0, s1
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_yz:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; R600-NEXT: MULLO_INT * T1.X, KC0[1].W, KC0[2].X,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
entry:
%y = call i32 @llvm.r600.read.local.size.y() #0
%z = call i32 @llvm.r600.read.local.size.z() #0
%val = mul i32 %y, %z
store i32 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @local_size_xyz(ptr addrspace(1) %out) {
; SI-LABEL: local_size_xyz:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x6
; SI-NEXT: s_load_dword s6, s[2:3], 0x8
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mul_i32 s2, s4, s5
; SI-NEXT: s_add_i32 s4, s2, s6
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_xyz:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x18
; VI-NEXT: s_load_dword s4, s[2:3], 0x20
; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mul_i32 s0, s0, s1
; VI-NEXT: s_add_i32 s0, s0, s4
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_xyz:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 3, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: MULLO_INT * T0.X, KC0[1].Z, KC0[1].W,
; R600-NEXT: ADD_INT T0.X, PS, KC0[2].X,
; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
entry:
%x = call i32 @llvm.r600.read.local.size.x() #0
%y = call i32 @llvm.r600.read.local.size.y() #0
%z = call i32 @llvm.r600.read.local.size.z() #0
%xy = mul i32 %x, %y
%xyz = add i32 %xy, %z
store i32 %xyz, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @local_size_x_known_bits(ptr addrspace(1) %out) {
; SI-LABEL: local_size_x_known_bits:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x6
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_x_known_bits:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x18
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_x_known_bits:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; R600-NEXT: AND_INT * T1.X, KC0[1].Z, literal.y,
; R600-NEXT: 2(2.802597e-45), 65535(9.183409e-41)
entry:
%size = call i32 @llvm.r600.read.local.size.x() #0
%shl = shl i32 %size, 16
%shr = lshr i32 %shl, 16
store i32 %shr, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @local_size_y_known_bits(ptr addrspace(1) %out) {
; SI-LABEL: local_size_y_known_bits:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x7
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_y_known_bits:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x1c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_y_known_bits:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; R600-NEXT: AND_INT * T1.X, KC0[1].W, literal.y,
; R600-NEXT: 2(2.802597e-45), 65535(9.183409e-41)
entry:
%size = call i32 @llvm.r600.read.local.size.y() #0
%shl = shl i32 %size, 16
%shr = lshr i32 %shl, 16
store i32 %shr, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @local_size_z_known_bits(ptr addrspace(1) %out) {
; SI-LABEL: local_size_z_known_bits:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x8
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: local_size_z_known_bits:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x20
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; R600-LABEL: local_size_z_known_bits:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x,
; R600-NEXT: AND_INT * T1.X, KC0[2].X, literal.y,
; R600-NEXT: 2(2.802597e-45), 65535(9.183409e-41)
entry:
%size = call i32 @llvm.r600.read.local.size.z() #0
%shl = shl i32 %size, 16
%shr = lshr i32 %shl, 16
store i32 %shr, ptr addrspace(1) %out
ret void
}
declare i32 @llvm.r600.read.local.size.x() #0
declare i32 @llvm.r600.read.local.size.y() #0
declare i32 @llvm.r600.read.local.size.z() #0
attributes #0 = { nounwind readnone }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; FUNC: {{.*}}
; GCN: {{.*}}