; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -misched-cluster=0 < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -misched-cluster=0 -amdgpu-igrouplp-exact-solver-max-branches=250000 < %s | FileCheck -check-prefix=EXACTCUTOFF %s
define amdgpu_kernel void @test_sched_group_barrier() #0 {
; GCN-LABEL: test_sched_group_barrier:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: ; sched_group_barrier mask(0x00000000) size(1) SyncID(2)
; GCN-NEXT: ; sched_group_barrier mask(0x00000001) size(2) SyncID(4)
; GCN-NEXT: ; sched_group_barrier mask(0x00000004) size(8) SyncID(16)
; GCN-NEXT: ; sched_group_barrier mask(0x0000000F) size(10000) SyncID(-1)
; GCN-NEXT: s_endpgm
;
; EXACTCUTOFF-LABEL: test_sched_group_barrier:
; EXACTCUTOFF: ; %bb.0: ; %entry
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000000) size(1) SyncID(2)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000001) size(2) SyncID(4)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000004) size(8) SyncID(16)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x0000000F) size(10000) SyncID(-1)
; EXACTCUTOFF-NEXT: s_endpgm
entry:
call void @llvm.amdgcn.sched.group.barrier(i32 0, i32 1, i32 2) #1
call void @llvm.amdgcn.sched.group.barrier(i32 1, i32 2, i32 4) #1
call void @llvm.amdgcn.sched.group.barrier(i32 4, i32 8, i32 16) #1
call void @llvm.amdgcn.sched.group.barrier(i32 15, i32 10000, i32 -1) #1
ret void
}
define amdgpu_kernel void @test_sched_group_barrier_pipeline_READ_VALU_WRITE(ptr addrspace(1) noalias %in, ptr addrspace(1) noalias %out) #0 {
; GCN-LABEL: test_sched_group_barrier_pipeline_READ_VALU_WRITE:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v32, 7, v0
; GCN-NEXT: ; kill: killed $sgpr4_sgpr5
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dwordx4 v[0:3], v32, s[4:5]
; GCN-NEXT: global_load_dwordx4 v[4:7], v32, s[4:5] offset:16
; GCN-NEXT: global_load_dwordx4 v[8:11], v32, s[4:5] offset:32
; GCN-NEXT: global_load_dwordx4 v[12:15], v32, s[4:5] offset:48
; GCN-NEXT: global_load_dwordx4 v[16:19], v32, s[4:5] offset:64
; GCN-NEXT: global_load_dwordx4 v[20:23], v32, s[4:5] offset:80
; GCN-NEXT: global_load_dwordx4 v[24:27], v32, s[4:5] offset:96
; GCN-NEXT: global_load_dwordx4 v[28:31], v32, s[4:5] offset:112
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(8) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(7)
; GCN-NEXT: v_mul_lo_u32 v3, v3, v3
; GCN-NEXT: v_mul_lo_u32 v2, v2, v2
; GCN-NEXT: v_mul_lo_u32 v1, v1, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v0
; GCN-NEXT: s_waitcnt vmcnt(6)
; GCN-NEXT: v_mul_lo_u32 v7, v7, v7
; GCN-NEXT: v_mul_lo_u32 v6, v6, v6
; GCN-NEXT: v_mul_lo_u32 v5, v5, v5
; GCN-NEXT: v_mul_lo_u32 v4, v4, v4
; GCN-NEXT: s_waitcnt vmcnt(5)
; GCN-NEXT: v_mul_lo_u32 v11, v11, v11
; GCN-NEXT: v_mul_lo_u32 v10, v10, v10
; GCN-NEXT: v_mul_lo_u32 v9, v9, v9
; GCN-NEXT: v_mul_lo_u32 v8, v8, v8
; GCN-NEXT: s_waitcnt vmcnt(4)
; GCN-NEXT: v_mul_lo_u32 v15, v15, v15
; GCN-NEXT: v_mul_lo_u32 v14, v14, v14
; GCN-NEXT: v_mul_lo_u32 v13, v13, v13
; GCN-NEXT: v_mul_lo_u32 v12, v12, v12
; GCN-NEXT: s_waitcnt vmcnt(3)
; GCN-NEXT: v_mul_lo_u32 v19, v19, v19
; GCN-NEXT: v_mul_lo_u32 v18, v18, v18
; GCN-NEXT: v_mul_lo_u32 v17, v17, v17
; GCN-NEXT: v_mul_lo_u32 v16, v16, v16
; GCN-NEXT: s_waitcnt vmcnt(2)
; GCN-NEXT: v_mul_lo_u32 v23, v23, v23
; GCN-NEXT: v_mul_lo_u32 v22, v22, v22
; GCN-NEXT: v_mul_lo_u32 v21, v21, v21
; GCN-NEXT: v_mul_lo_u32 v20, v20, v20
; GCN-NEXT: s_waitcnt vmcnt(1)
; GCN-NEXT: v_mul_lo_u32 v27, v27, v27
; GCN-NEXT: v_mul_lo_u32 v26, v26, v26
; GCN-NEXT: v_mul_lo_u32 v25, v25, v25
; GCN-NEXT: v_mul_lo_u32 v24, v24, v24
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v31, v31, v31
; GCN-NEXT: v_mul_lo_u32 v30, v30, v30
; GCN-NEXT: v_mul_lo_u32 v29, v29, v29
; GCN-NEXT: v_mul_lo_u32 v28, v28, v28
; GCN-NEXT: global_store_dwordx4 v32, v[28:31], s[6:7] offset:112
; GCN-NEXT: global_store_dwordx4 v32, v[24:27], s[6:7] offset:96
; GCN-NEXT: global_store_dwordx4 v32, v[20:23], s[6:7] offset:80
; GCN-NEXT: global_store_dwordx4 v32, v[16:19], s[6:7] offset:64
; GCN-NEXT: global_store_dwordx4 v32, v[12:15], s[6:7] offset:48
; GCN-NEXT: global_store_dwordx4 v32, v[8:11], s[6:7] offset:32
; GCN-NEXT: global_store_dwordx4 v32, v[4:7], s[6:7] offset:16
; GCN-NEXT: global_store_dwordx4 v32, v[0:3], s[6:7]
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(30) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(8) SyncID(0)
; GCN-NEXT: s_endpgm
;
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_READ_VALU_WRITE:
; EXACTCUTOFF: ; %bb.0:
; EXACTCUTOFF-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v32, 7, v0
; EXACTCUTOFF-NEXT: ; kill: killed $sgpr4_sgpr5
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[0:3], v32, s[4:5]
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[4:7], v32, s[4:5] offset:16
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[8:11], v32, s[4:5] offset:32
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[12:15], v32, s[4:5] offset:48
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[16:19], v32, s[4:5] offset:64
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[20:23], v32, s[4:5] offset:80
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[24:27], v32, s[4:5] offset:96
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[28:31], v32, s[4:5] offset:112
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(7)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v3, v3, v3
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v2, v2, v2
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v1, v1, v1
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v0, v0, v0
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(6)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v7, v7, v7
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v6, v6, v6
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v5, v5, v5
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v4, v4, v4
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(5)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v11, v11, v11
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v10, v10, v10
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v9, v9, v9
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v8, v8, v8
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(4)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v15, v15, v15
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v14, v14, v14
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v13, v13, v13
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v12, v12, v12
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(3)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v19, v19, v19
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v18, v18, v18
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v17, v17, v17
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v16, v16, v16
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(2)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v23, v23, v23
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v22, v22, v22
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v21, v21, v21
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v20, v20, v20
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(1)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v27, v27, v27
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v26, v26, v26
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v25, v25, v25
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v24, v24, v24
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v31, v31, v31
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v30, v30, v30
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v29, v29, v29
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v28, v28, v28
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[28:31], s[6:7] offset:112
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[24:27], s[6:7] offset:96
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[20:23], s[6:7] offset:80
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[16:19], s[6:7] offset:64
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[12:15], s[6:7] offset:48
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[8:11], s[6:7] offset:32
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[4:7], s[6:7] offset:16
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[0:3], s[6:7]
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(30) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #2
%gep1 = getelementptr <32 x i32>, ptr addrspace(1) %in, i32 %tid
%load = load <32 x i32>, ptr addrspace(1) %gep1
%mul = mul <32 x i32> %load, %load
%gep2 = getelementptr <32 x i32>, ptr addrspace(1) %out, i32 %tid
store <32 x i32> %mul, ptr addrspace(1) %gep2
; 8 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 8, i32 0)
; 30 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 30, i32 0)
; 8 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 8, i32 0)
ret void
}
define amdgpu_kernel void @test_sched_group_barrier_pipeline_alternating_READ_VALU(ptr addrspace(1) noalias %in, ptr addrspace(1) noalias %out) #0 {
; GCN-LABEL: test_sched_group_barrier_pipeline_alternating_READ_VALU:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v32, 7, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dwordx4 v[28:31], v32, s[4:5] offset:16
; GCN-NEXT: global_load_dwordx4 v[0:3], v32, s[4:5]
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(1)
; GCN-NEXT: v_mul_lo_u32 v29, v29, v29
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v3, v3, v3
; GCN-NEXT: v_mul_lo_u32 v2, v2, v2
; GCN-NEXT: global_load_dwordx4 v[4:7], v32, s[4:5] offset:112
; GCN-NEXT: v_mul_lo_u32 v1, v1, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v0
; GCN-NEXT: global_load_dwordx4 v[8:11], v32, s[4:5] offset:96
; GCN-NEXT: v_mul_lo_u32 v28, v28, v28
; GCN-NEXT: v_mul_lo_u32 v31, v31, v31
; GCN-NEXT: v_mul_lo_u32 v30, v30, v30
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(1)
; GCN-NEXT: v_mul_lo_u32 v7, v7, v7
; GCN-NEXT: v_mul_lo_u32 v6, v6, v6
; GCN-NEXT: global_load_dwordx4 v[12:15], v32, s[4:5] offset:80
; GCN-NEXT: v_mul_lo_u32 v5, v5, v5
; GCN-NEXT: v_mul_lo_u32 v4, v4, v4
; GCN-NEXT: global_load_dwordx4 v[16:19], v32, s[4:5] offset:48
; GCN-NEXT: s_waitcnt vmcnt(2)
; GCN-NEXT: v_mul_lo_u32 v11, v11, v11
; GCN-NEXT: v_mul_lo_u32 v10, v10, v10
; GCN-NEXT: v_mul_lo_u32 v9, v9, v9
; GCN-NEXT: v_mul_lo_u32 v8, v8, v8
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(1)
; GCN-NEXT: v_mul_lo_u32 v15, v15, v15
; GCN-NEXT: v_mul_lo_u32 v14, v14, v14
; GCN-NEXT: v_mul_lo_u32 v13, v13, v13
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v19, v19, v19
; GCN-NEXT: v_mul_lo_u32 v18, v18, v18
; GCN-NEXT: global_load_dwordx4 v[20:23], v32, s[4:5] offset:64
; GCN-NEXT: global_load_dwordx4 v[24:27], v32, s[4:5] offset:32
; GCN-NEXT: v_mul_lo_u32 v17, v17, v17
; GCN-NEXT: v_mul_lo_u32 v16, v16, v16
; GCN-NEXT: v_mul_lo_u32 v12, v12, v12
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(1)
; GCN-NEXT: v_mul_lo_u32 v23, v23, v23
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v27, v27, v27
; GCN-NEXT: v_mul_lo_u32 v26, v26, v26
; GCN-NEXT: v_mul_lo_u32 v25, v25, v25
; GCN-NEXT: v_mul_lo_u32 v24, v24, v24
; GCN-NEXT: v_mul_lo_u32 v22, v22, v22
; GCN-NEXT: v_mul_lo_u32 v21, v21, v21
; GCN-NEXT: v_mul_lo_u32 v20, v20, v20
; GCN-NEXT: global_store_dwordx4 v32, v[4:7], s[6:7] offset:112
; GCN-NEXT: global_store_dwordx4 v32, v[8:11], s[6:7] offset:96
; GCN-NEXT: global_store_dwordx4 v32, v[12:15], s[6:7] offset:80
; GCN-NEXT: global_store_dwordx4 v32, v[20:23], s[6:7] offset:64
; GCN-NEXT: global_store_dwordx4 v32, v[16:19], s[6:7] offset:48
; GCN-NEXT: global_store_dwordx4 v32, v[24:27], s[6:7] offset:32
; GCN-NEXT: global_store_dwordx4 v32, v[28:31], s[6:7] offset:16
; GCN-NEXT: global_store_dwordx4 v32, v[0:3], s[6:7]
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(8) SyncID(0)
; GCN-NEXT: s_endpgm
;
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_alternating_READ_VALU:
; EXACTCUTOFF: ; %bb.0:
; EXACTCUTOFF-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v32, 7, v0
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[28:31], v32, s[4:5] offset:16
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[0:3], v32, s[4:5]
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(1)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v29, v29, v29
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v3, v3, v3
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v2, v2, v2
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[4:7], v32, s[4:5] offset:112
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v1, v1, v1
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v0, v0, v0
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[8:11], v32, s[4:5] offset:96
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v28, v28, v28
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v31, v31, v31
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v30, v30, v30
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(1)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v7, v7, v7
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v6, v6, v6
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[12:15], v32, s[4:5] offset:80
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v5, v5, v5
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v4, v4, v4
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[16:19], v32, s[4:5] offset:48
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(2)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v11, v11, v11
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v10, v10, v10
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v9, v9, v9
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v8, v8, v8
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(1)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v15, v15, v15
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v14, v14, v14
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v13, v13, v13
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v19, v19, v19
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v18, v18, v18
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[20:23], v32, s[4:5] offset:64
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[24:27], v32, s[4:5] offset:32
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v17, v17, v17
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v16, v16, v16
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v12, v12, v12
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(1)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v23, v23, v23
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v27, v27, v27
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v26, v26, v26
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v25, v25, v25
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v24, v24, v24
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v22, v22, v22
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v21, v21, v21
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v20, v20, v20
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[4:7], s[6:7] offset:112
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[8:11], s[6:7] offset:96
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[12:15], s[6:7] offset:80
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[20:23], s[6:7] offset:64
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[16:19], s[6:7] offset:48
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[24:27], s[6:7] offset:32
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[28:31], s[6:7] offset:16
; EXACTCUTOFF-NEXT: global_store_dwordx4 v32, v[0:3], s[6:7]
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #2
%gep1 = getelementptr <32 x i32>, ptr addrspace(1) %in, i32 %tid
%load = load <32 x i32>, ptr addrspace(1) %gep1
%mul = mul <32 x i32> %load, %load
%gep2 = getelementptr <32 x i32>, ptr addrspace(1) %out, i32 %tid
store <32 x i32> %mul, ptr addrspace(1) %gep2
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 8 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 8, i32 0)
ret void
}
define amdgpu_kernel void @test_sched_group_barrier_pipeline_alternating_READ_VALU_WRITE(ptr addrspace(1) noalias %in, ptr addrspace(1) noalias %out) #0 {
; GCN-LABEL: test_sched_group_barrier_pipeline_alternating_READ_VALU_WRITE:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v16, 7, v0
; GCN-NEXT: ; kill: killed $sgpr4_sgpr5
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dwordx4 v[12:15], v16, s[4:5] offset:32
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v13, v13, v13
; GCN-NEXT: v_mul_lo_u32 v12, v12, v12
; GCN-NEXT: v_mul_lo_u32 v15, v15, v15
; GCN-NEXT: v_mul_lo_u32 v14, v14, v14
; GCN-NEXT: global_store_dwordx4 v16, v[12:15], s[6:7] offset:32
; GCN-NEXT: global_load_dwordx4 v[0:3], v16, s[4:5]
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v3, v3, v3
; GCN-NEXT: v_mul_lo_u32 v2, v2, v2
; GCN-NEXT: v_mul_lo_u32 v1, v1, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v0
; GCN-NEXT: global_store_dwordx4 v16, v[0:3], s[6:7]
; GCN-NEXT: global_load_dwordx4 v[0:3], v16, s[4:5] offset:112
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v3, v3, v3
; GCN-NEXT: v_mul_lo_u32 v2, v2, v2
; GCN-NEXT: v_mul_lo_u32 v1, v1, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v0
; GCN-NEXT: global_store_dwordx4 v16, v[0:3], s[6:7] offset:112
; GCN-NEXT: global_load_dwordx4 v[0:3], v16, s[4:5] offset:96
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v3, v3, v3
; GCN-NEXT: v_mul_lo_u32 v2, v2, v2
; GCN-NEXT: v_mul_lo_u32 v1, v1, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v0
; GCN-NEXT: global_store_dwordx4 v16, v[0:3], s[6:7] offset:96
; GCN-NEXT: global_load_dwordx4 v[0:3], v16, s[4:5] offset:80
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v3, v3, v3
; GCN-NEXT: v_mul_lo_u32 v2, v2, v2
; GCN-NEXT: v_mul_lo_u32 v1, v1, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v0
; GCN-NEXT: global_store_dwordx4 v16, v[0:3], s[6:7] offset:80
; GCN-NEXT: global_load_dwordx4 v[4:7], v16, s[4:5] offset:48
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v7, v7, v7
; GCN-NEXT: v_mul_lo_u32 v6, v6, v6
; GCN-NEXT: v_mul_lo_u32 v5, v5, v5
; GCN-NEXT: v_mul_lo_u32 v4, v4, v4
; GCN-NEXT: global_store_dwordx4 v16, v[4:7], s[6:7] offset:48
; GCN-NEXT: global_load_dwordx4 v[8:11], v16, s[4:5] offset:16
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v9, v9, v9
; GCN-NEXT: v_mul_lo_u32 v8, v8, v8
; GCN-NEXT: v_mul_lo_u32 v11, v11, v11
; GCN-NEXT: v_mul_lo_u32 v10, v10, v10
; GCN-NEXT: global_store_dwordx4 v16, v[8:11], s[6:7] offset:16
; GCN-NEXT: global_load_dwordx4 v[8:11], v16, s[4:5] offset:64
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v11, v11, v11
; GCN-NEXT: v_mul_lo_u32 v10, v10, v10
; GCN-NEXT: v_mul_lo_u32 v9, v9, v9
; GCN-NEXT: v_mul_lo_u32 v8, v8, v8
; GCN-NEXT: global_store_dwordx4 v16, v[8:11], s[6:7] offset:64
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; GCN-NEXT: s_endpgm
;
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_alternating_READ_VALU_WRITE:
; EXACTCUTOFF: ; %bb.0:
; EXACTCUTOFF-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v16, 7, v0
; EXACTCUTOFF-NEXT: ; kill: killed $sgpr4_sgpr5
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[12:15], v16, s[4:5] offset:32
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v13, v13, v13
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v12, v12, v12
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v15, v15, v15
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v14, v14, v14
; EXACTCUTOFF-NEXT: global_store_dwordx4 v16, v[12:15], s[6:7] offset:32
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[0:3], v16, s[4:5]
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v3, v3, v3
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v2, v2, v2
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v1, v1, v1
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v0, v0, v0
; EXACTCUTOFF-NEXT: global_store_dwordx4 v16, v[0:3], s[6:7]
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[0:3], v16, s[4:5] offset:112
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v3, v3, v3
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v2, v2, v2
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v1, v1, v1
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v0, v0, v0
; EXACTCUTOFF-NEXT: global_store_dwordx4 v16, v[0:3], s[6:7] offset:112
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[0:3], v16, s[4:5] offset:96
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v3, v3, v3
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v2, v2, v2
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v1, v1, v1
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v0, v0, v0
; EXACTCUTOFF-NEXT: global_store_dwordx4 v16, v[0:3], s[6:7] offset:96
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[0:3], v16, s[4:5] offset:80
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v3, v3, v3
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v2, v2, v2
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v1, v1, v1
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v0, v0, v0
; EXACTCUTOFF-NEXT: global_store_dwordx4 v16, v[0:3], s[6:7] offset:80
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[4:7], v16, s[4:5] offset:48
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v7, v7, v7
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v6, v6, v6
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v5, v5, v5
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v4, v4, v4
; EXACTCUTOFF-NEXT: global_store_dwordx4 v16, v[4:7], s[6:7] offset:48
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[8:11], v16, s[4:5] offset:16
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v9, v9, v9
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v8, v8, v8
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v11, v11, v11
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v10, v10, v10
; EXACTCUTOFF-NEXT: global_store_dwordx4 v16, v[8:11], s[6:7] offset:16
; EXACTCUTOFF-NEXT: global_load_dwordx4 v[8:11], v16, s[4:5] offset:64
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt vmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v11, v11, v11
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v10, v10, v10
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v9, v9, v9
; EXACTCUTOFF-NEXT: v_mul_lo_u32 v8, v8, v8
; EXACTCUTOFF-NEXT: global_store_dwordx4 v16, v[8:11], s[6:7] offset:64
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #2
%gep1 = getelementptr <32 x i32>, ptr addrspace(1) %in, i32 %tid
%load = load <32 x i32>, ptr addrspace(1) %gep1
%mul = mul <32 x i32> %load, %load
%gep2 = getelementptr <32 x i32>, ptr addrspace(1) %out, i32 %tid
store <32 x i32> %mul, ptr addrspace(1) %gep2
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 1, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 1, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 1, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 1, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 1, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 1, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 1, i32 0)
; 1 VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 1, i32 0)
; 2 VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 2, i32 0)
; 1 VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 1, i32 0)
ret void
}
define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
; GCN-LABEL: test_sched_group_barrier_pipeline_MFMA_cluster:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0
; GCN-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_u32_e32 v1, s0, v0
; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:112
; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:96
; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:80
; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:64
; GCN-NEXT: ds_read_b128 a[0:3], v1
; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:16
; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:32
; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:48
; GCN-NEXT: ds_read_b128 a[60:63], v1 offset:8304
; GCN-NEXT: ds_read_b128 a[56:59], v1 offset:8288
; GCN-NEXT: ds_read_b128 a[52:55], v1 offset:8272
; GCN-NEXT: ds_read_b128 a[48:51], v1 offset:8256
; GCN-NEXT: ds_read_b128 a[44:47], v1 offset:8240
; GCN-NEXT: ds_read_b128 a[40:43], v1 offset:8224
; GCN-NEXT: ds_read_b128 a[36:39], v1 offset:8208
; GCN-NEXT: ds_read_b128 a[32:35], v1 offset:8192
; GCN-NEXT: v_add_u32_e32 v2, 0x6000, v1
; GCN-NEXT: ds_read_b128 a[92:95], v1 offset:24688
; GCN-NEXT: ds_read_b128 a[88:91], v1 offset:24672
; GCN-NEXT: ds_read_b128 a[84:87], v1 offset:24656
; GCN-NEXT: ds_read_b128 a[80:83], v1 offset:24640
; GCN-NEXT: ds_read_b128 a[76:79], v1 offset:24624
; GCN-NEXT: ds_read_b128 a[72:75], v1 offset:24608
; GCN-NEXT: ds_read_b128 a[68:71], v1 offset:24592
; GCN-NEXT: ds_read_b128 a[64:67], v1 offset:24576
; GCN-NEXT: ds_read_b128 a[124:127], v1 offset:49264
; GCN-NEXT: ds_read_b128 a[120:123], v1 offset:49248
; GCN-NEXT: ds_read_b128 a[116:119], v1 offset:49232
; GCN-NEXT: ds_read_b128 a[112:115], v1 offset:49216
; GCN-NEXT: ds_read_b128 a[108:111], v1 offset:49200
; GCN-NEXT: ds_read_b128 a[104:107], v1 offset:49184
; GCN-NEXT: ds_read_b128 a[100:103], v1 offset:49168
; GCN-NEXT: ds_read_b128 a[96:99], v1 offset:49152
; GCN-NEXT: v_mov_b32_e32 v1, 1.0
; GCN-NEXT: ds_read_b128 a[156:159], v2 offset:57456
; GCN-NEXT: ds_read_b128 a[152:155], v2 offset:57440
; GCN-NEXT: ds_read_b128 a[148:151], v2 offset:57424
; GCN-NEXT: ds_read_b128 a[144:147], v2 offset:57408
; GCN-NEXT: ds_read_b128 a[128:131], v2 offset:57344
; GCN-NEXT: ds_read_b128 a[132:135], v2 offset:57360
; GCN-NEXT: ds_read_b128 a[136:139], v2 offset:57376
; GCN-NEXT: ds_read_b128 a[140:143], v2 offset:57392
; GCN-NEXT: v_mov_b32_e32 v2, 2.0
; GCN-NEXT: v_add_u32_e32 v0, s1, v0
; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(40) SyncID(0)
; GCN-NEXT: s_waitcnt lgkmcnt(14)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31]
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v1, v2, a[32:63]
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v1, v2, a[64:95]
; GCN-NEXT: s_waitcnt lgkmcnt(8)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v1, v2, a[96:127]
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v1, v2, a[128:159]
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 4
; GCN-NEXT: ds_write_b128 v0, a[28:31] offset:112
; GCN-NEXT: ds_write_b128 v0, a[24:27] offset:96
; GCN-NEXT: ds_write_b128 v0, a[20:23] offset:80
; GCN-NEXT: ds_write_b128 v0, a[16:19] offset:64
; GCN-NEXT: ds_write_b128 v0, a[12:15] offset:48
; GCN-NEXT: ds_write_b128 v0, a[8:11] offset:32
; GCN-NEXT: ds_write_b128 v0, a[4:7] offset:16
; GCN-NEXT: ds_write_b128 v0, a[0:3]
; GCN-NEXT: v_mov_b32_e32 v0, s1
; GCN-NEXT: ds_write_b128 v0, a[56:59] offset:8288
; GCN-NEXT: ds_write_b128 v0, a[60:63] offset:8304
; GCN-NEXT: ds_write_b128 v0, a[48:51] offset:8256
; GCN-NEXT: ds_write_b128 v0, a[52:55] offset:8272
; GCN-NEXT: ds_write_b128 v0, a[40:43] offset:8224
; GCN-NEXT: ds_write_b128 v0, a[44:47] offset:8240
; GCN-NEXT: ds_write_b128 v0, a[32:35] offset:8192
; GCN-NEXT: ds_write_b128 v0, a[36:39] offset:8208
; GCN-NEXT: ds_write_b128 v0, a[88:91] offset:16480
; GCN-NEXT: ds_write_b128 v0, a[92:95] offset:16496
; GCN-NEXT: ds_write_b128 v0, a[80:83] offset:16448
; GCN-NEXT: ds_write_b128 v0, a[84:87] offset:16464
; GCN-NEXT: ds_write_b128 v0, a[72:75] offset:16416
; GCN-NEXT: ds_write_b128 v0, a[76:79] offset:16432
; GCN-NEXT: ds_write_b128 v0, a[64:67] offset:16384
; GCN-NEXT: ds_write_b128 v0, a[68:71] offset:16400
; GCN-NEXT: ds_write_b128 v0, a[120:123] offset:24672
; GCN-NEXT: ds_write_b128 v0, a[124:127] offset:24688
; GCN-NEXT: ds_write_b128 v0, a[112:115] offset:24640
; GCN-NEXT: ds_write_b128 v0, a[116:119] offset:24656
; GCN-NEXT: ds_write_b128 v0, a[104:107] offset:24608
; GCN-NEXT: ds_write_b128 v0, a[108:111] offset:24624
; GCN-NEXT: ds_write_b128 v0, a[96:99] offset:24576
; GCN-NEXT: ds_write_b128 v0, a[100:103] offset:24592
; GCN-NEXT: ds_write_b128 v0, a[152:155] offset:32864
; GCN-NEXT: ds_write_b128 v0, a[156:159] offset:32880
; GCN-NEXT: ds_write_b128 v0, a[144:147] offset:32832
; GCN-NEXT: ds_write_b128 v0, a[148:151] offset:32848
; GCN-NEXT: ds_write_b128 v0, a[136:139] offset:32800
; GCN-NEXT: ds_write_b128 v0, a[140:143] offset:32816
; GCN-NEXT: ds_write_b128 v0, a[128:131] offset:32768
; GCN-NEXT: ds_write_b128 v0, a[132:135] offset:32784
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(40) SyncID(0)
; GCN-NEXT: s_endpgm
;
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_MFMA_cluster:
; EXACTCUTOFF: ; %bb.0: ; %entry
; EXACTCUTOFF-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0
; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, s0, v0
; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v1 offset:112
; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v1 offset:96
; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v1 offset:80
; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v1 offset:64
; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1
; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v1 offset:16
; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v1 offset:32
; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v1 offset:48
; EXACTCUTOFF-NEXT: ds_read_b128 a[60:63], v1 offset:8304
; EXACTCUTOFF-NEXT: ds_read_b128 a[56:59], v1 offset:8288
; EXACTCUTOFF-NEXT: ds_read_b128 a[52:55], v1 offset:8272
; EXACTCUTOFF-NEXT: ds_read_b128 a[48:51], v1 offset:8256
; EXACTCUTOFF-NEXT: ds_read_b128 a[44:47], v1 offset:8240
; EXACTCUTOFF-NEXT: ds_read_b128 a[40:43], v1 offset:8224
; EXACTCUTOFF-NEXT: ds_read_b128 a[36:39], v1 offset:8208
; EXACTCUTOFF-NEXT: ds_read_b128 a[32:35], v1 offset:8192
; EXACTCUTOFF-NEXT: v_add_u32_e32 v2, 0x6000, v1
; EXACTCUTOFF-NEXT: ds_read_b128 a[92:95], v1 offset:24688
; EXACTCUTOFF-NEXT: ds_read_b128 a[88:91], v1 offset:24672
; EXACTCUTOFF-NEXT: ds_read_b128 a[84:87], v1 offset:24656
; EXACTCUTOFF-NEXT: ds_read_b128 a[80:83], v1 offset:24640
; EXACTCUTOFF-NEXT: ds_read_b128 a[76:79], v1 offset:24624
; EXACTCUTOFF-NEXT: ds_read_b128 a[72:75], v1 offset:24608
; EXACTCUTOFF-NEXT: ds_read_b128 a[68:71], v1 offset:24592
; EXACTCUTOFF-NEXT: ds_read_b128 a[64:67], v1 offset:24576
; EXACTCUTOFF-NEXT: ds_read_b128 a[124:127], v1 offset:49264
; EXACTCUTOFF-NEXT: ds_read_b128 a[120:123], v1 offset:49248
; EXACTCUTOFF-NEXT: ds_read_b128 a[116:119], v1 offset:49232
; EXACTCUTOFF-NEXT: ds_read_b128 a[112:115], v1 offset:49216
; EXACTCUTOFF-NEXT: ds_read_b128 a[108:111], v1 offset:49200
; EXACTCUTOFF-NEXT: ds_read_b128 a[104:107], v1 offset:49184
; EXACTCUTOFF-NEXT: ds_read_b128 a[100:103], v1 offset:49168
; EXACTCUTOFF-NEXT: ds_read_b128 a[96:99], v1 offset:49152
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v1, 1.0
; EXACTCUTOFF-NEXT: ds_read_b128 a[156:159], v2 offset:57456
; EXACTCUTOFF-NEXT: ds_read_b128 a[152:155], v2 offset:57440
; EXACTCUTOFF-NEXT: ds_read_b128 a[148:151], v2 offset:57424
; EXACTCUTOFF-NEXT: ds_read_b128 a[144:147], v2 offset:57408
; EXACTCUTOFF-NEXT: ds_read_b128 a[128:131], v2 offset:57344
; EXACTCUTOFF-NEXT: ds_read_b128 a[132:135], v2 offset:57360
; EXACTCUTOFF-NEXT: ds_read_b128 a[136:139], v2 offset:57376
; EXACTCUTOFF-NEXT: ds_read_b128 a[140:143], v2 offset:57392
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v2, 2.0
; EXACTCUTOFF-NEXT: v_add_u32_e32 v0, s1, v0
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(40) SyncID(0)
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(14)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31]
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v1, v2, a[32:63]
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v1, v2, a[64:95]
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(8)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v1, v2, a[96:127]
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v1, v2, a[128:159]
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 4
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[28:31] offset:112
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[24:27] offset:96
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[20:23] offset:80
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[16:19] offset:64
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[12:15] offset:48
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[8:11] offset:32
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[4:7] offset:16
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[0:3]
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v0, s1
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[56:59] offset:8288
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[60:63] offset:8304
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[48:51] offset:8256
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[52:55] offset:8272
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[40:43] offset:8224
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[44:47] offset:8240
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[32:35] offset:8192
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[36:39] offset:8208
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[88:91] offset:16480
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[92:95] offset:16496
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[80:83] offset:16448
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[84:87] offset:16464
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[72:75] offset:16416
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[76:79] offset:16432
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[64:67] offset:16384
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[68:71] offset:16400
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[120:123] offset:24672
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[124:127] offset:24688
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[112:115] offset:24640
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[116:119] offset:24656
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[104:107] offset:24608
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[108:111] offset:24624
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[96:99] offset:24576
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[100:103] offset:24592
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[152:155] offset:32864
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[156:159] offset:32880
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[144:147] offset:32832
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[148:151] offset:32848
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[136:139] offset:32800
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[140:143] offset:32816
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[128:131] offset:32768
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[132:135] offset:32784
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(40) SyncID(0)
; EXACTCUTOFF-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%load.0.addr = getelementptr <32 x float>, ptr addrspace(3) %in, i32 %idx
%load.0 = load <32 x float>, ptr addrspace(3) %load.0.addr
%load.1.addr = getelementptr <32 x float>, ptr addrspace(3) %load.0.addr, i32 64
%load.1 = load <32 x float>, ptr addrspace(3) %load.1.addr
%load.2.addr = getelementptr <32 x float>, ptr addrspace(3) %load.1.addr, i32 128
%load.2 = load <32 x float>, ptr addrspace(3) %load.2.addr
%load.3.addr = getelementptr <32 x float>, ptr addrspace(3) %load.2.addr, i32 192
%load.3 = load <32 x float>, ptr addrspace(3) %load.3.addr
%load.4.addr = getelementptr <32 x float>, ptr addrspace(3) %load.3.addr, i32 256
%load.4 = load <32 x float>, ptr addrspace(3) %load.4.addr
%mai.0 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.0, i32 0, i32 0, i32 0)
%mai.1 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.1, i32 0, i32 0, i32 0)
%mai.2 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.2, i32 0, i32 0, i32 0)
%mai.3 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.3, i32 0, i32 0, i32 0)
%mai.4 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.4, i32 0, i32 0, i32 0)
%store.0.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 %idx
store <32 x float> %mai.0, ptr addrspace(3) %store.0.addr
%store.1.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 64
store <32 x float> %mai.1, ptr addrspace(3) %store.1.addr
%store.2.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 128
store <32 x float> %mai.2, ptr addrspace(3) %store.2.addr
%store.3.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 192
store <32 x float> %mai.3, ptr addrspace(3) %store.3.addr
%store.4.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 256
store <32 x float> %mai.4, ptr addrspace(3) %store.4.addr
; 40 DS read
call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 40, i32 0)
; 5 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 5, i32 0)
; 40 DS write
call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 40, i32 0)
ret void
}
define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
; GCN-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0
; GCN-NEXT: v_and_b32_e32 v1, 0x1ff80, v0
; GCN-NEXT: v_mov_b32_e32 v2, 1.0
; GCN-NEXT: v_mov_b32_e32 v3, 2.0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_u32_e32 v0, s0, v1
; GCN-NEXT: ds_read_b128 a[28:31], v0 offset:112
; GCN-NEXT: ds_read_b128 a[24:27], v0 offset:96
; GCN-NEXT: ds_read_b128 a[20:23], v0 offset:80
; GCN-NEXT: ds_read_b128 a[16:19], v0 offset:64
; GCN-NEXT: ds_read_b128 a[0:3], v0
; GCN-NEXT: ds_read_b128 a[4:7], v0 offset:16
; GCN-NEXT: ds_read_b128 a[8:11], v0 offset:32
; GCN-NEXT: ds_read_b128 a[12:15], v0 offset:48
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; GCN-NEXT: v_add_u32_e32 v1, s1, v1
; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 1
; GCN-NEXT: ds_write_b128 v1, a[28:31] offset:112
; GCN-NEXT: ds_write_b128 v1, a[24:27] offset:96
; GCN-NEXT: ds_write_b128 v1, a[20:23] offset:80
; GCN-NEXT: ds_write_b128 v1, a[16:19] offset:64
; GCN-NEXT: ds_write_b128 v1, a[12:15] offset:48
; GCN-NEXT: ds_write_b128 v1, a[8:11] offset:32
; GCN-NEXT: ds_write_b128 v1, a[4:7] offset:16
; GCN-NEXT: ds_write_b128 v1, a[0:3]
; GCN-NEXT: ds_read_b128 a[28:31], v0 offset:8304
; GCN-NEXT: ds_read_b128 a[24:27], v0 offset:8288
; GCN-NEXT: ds_read_b128 a[20:23], v0 offset:8272
; GCN-NEXT: ds_read_b128 a[16:19], v0 offset:8256
; GCN-NEXT: ds_read_b128 a[12:15], v0 offset:8240
; GCN-NEXT: ds_read_b128 a[8:11], v0 offset:8224
; GCN-NEXT: ds_read_b128 a[4:7], v0 offset:8208
; GCN-NEXT: ds_read_b128 a[0:3], v0 offset:8192
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; GCN-NEXT: v_mov_b32_e32 v1, s1
; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 1
; GCN-NEXT: ds_write_b128 v1, a[24:27] offset:8288
; GCN-NEXT: ds_write_b128 v1, a[28:31] offset:8304
; GCN-NEXT: ds_write_b128 v1, a[16:19] offset:8256
; GCN-NEXT: ds_write_b128 v1, a[20:23] offset:8272
; GCN-NEXT: ds_write_b128 v1, a[8:11] offset:8224
; GCN-NEXT: ds_write_b128 v1, a[12:15] offset:8240
; GCN-NEXT: ds_write_b128 v1, a[0:3] offset:8192
; GCN-NEXT: ds_write_b128 v1, a[4:7] offset:8208
; GCN-NEXT: ds_read_b128 a[28:31], v0 offset:24688
; GCN-NEXT: ds_read_b128 a[24:27], v0 offset:24672
; GCN-NEXT: ds_read_b128 a[20:23], v0 offset:24656
; GCN-NEXT: ds_read_b128 a[16:19], v0 offset:24640
; GCN-NEXT: ds_read_b128 a[12:15], v0 offset:24624
; GCN-NEXT: ds_read_b128 a[8:11], v0 offset:24608
; GCN-NEXT: ds_read_b128 a[4:7], v0 offset:24592
; GCN-NEXT: ds_read_b128 a[0:3], v0 offset:24576
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 2
; GCN-NEXT: ds_write_b128 v1, a[24:27] offset:16480
; GCN-NEXT: ds_write_b128 v1, a[28:31] offset:16496
; GCN-NEXT: ds_write_b128 v1, a[16:19] offset:16448
; GCN-NEXT: ds_write_b128 v1, a[20:23] offset:16464
; GCN-NEXT: ds_write_b128 v1, a[8:11] offset:16416
; GCN-NEXT: ds_write_b128 v1, a[12:15] offset:16432
; GCN-NEXT: ds_write_b128 v1, a[0:3] offset:16384
; GCN-NEXT: ds_write_b128 v1, a[4:7] offset:16400
; GCN-NEXT: ds_read_b128 a[28:31], v0 offset:49264
; GCN-NEXT: ds_read_b128 a[24:27], v0 offset:49248
; GCN-NEXT: ds_read_b128 a[20:23], v0 offset:49232
; GCN-NEXT: ds_read_b128 a[16:19], v0 offset:49216
; GCN-NEXT: ds_read_b128 a[12:15], v0 offset:49200
; GCN-NEXT: ds_read_b128 a[8:11], v0 offset:49184
; GCN-NEXT: ds_read_b128 a[4:7], v0 offset:49168
; GCN-NEXT: ds_read_b128 a[0:3], v0 offset:49152
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; GCN-NEXT: v_add_u32_e32 v0, 0x6000, v0
; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 1
; GCN-NEXT: ds_write_b128 v1, a[24:27] offset:24672
; GCN-NEXT: ds_write_b128 v1, a[28:31] offset:24688
; GCN-NEXT: ds_write_b128 v1, a[16:19] offset:24640
; GCN-NEXT: ds_write_b128 v1, a[20:23] offset:24656
; GCN-NEXT: ds_write_b128 v1, a[8:11] offset:24608
; GCN-NEXT: ds_write_b128 v1, a[12:15] offset:24624
; GCN-NEXT: ds_write_b128 v1, a[0:3] offset:24576
; GCN-NEXT: ds_write_b128 v1, a[4:7] offset:24592
; GCN-NEXT: ds_read_b128 a[28:31], v0 offset:57456
; GCN-NEXT: ds_read_b128 a[24:27], v0 offset:57440
; GCN-NEXT: ds_read_b128 a[20:23], v0 offset:57424
; GCN-NEXT: ds_read_b128 a[16:19], v0 offset:57408
; GCN-NEXT: ds_read_b128 a[0:3], v0 offset:57344
; GCN-NEXT: ds_read_b128 a[4:7], v0 offset:57360
; GCN-NEXT: ds_read_b128 a[8:11], v0 offset:57376
; GCN-NEXT: ds_read_b128 a[12:15], v0 offset:57392
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 7
; GCN-NEXT: s_nop 2
; GCN-NEXT: ds_write_b128 v1, a[24:27] offset:32864
; GCN-NEXT: ds_write_b128 v1, a[28:31] offset:32880
; GCN-NEXT: ds_write_b128 v1, a[16:19] offset:32832
; GCN-NEXT: ds_write_b128 v1, a[20:23] offset:32848
; GCN-NEXT: ds_write_b128 v1, a[8:11] offset:32800
; GCN-NEXT: ds_write_b128 v1, a[12:15] offset:32816
; GCN-NEXT: ds_write_b128 v1, a[0:3] offset:32768
; GCN-NEXT: ds_write_b128 v1, a[4:7] offset:32784
; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; GCN-NEXT: s_endpgm
;
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
; EXACTCUTOFF: ; %bb.0: ; %entry
; EXACTCUTOFF-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0
; EXACTCUTOFF-NEXT: v_and_b32_e32 v1, 0x1ff80, v0
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v2, 1.0
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v3, 2.0
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_add_u32_e32 v0, s0, v1
; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v0 offset:112
; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v0 offset:96
; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v0 offset:80
; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v0 offset:64
; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v0
; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v0 offset:16
; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v0 offset:32
; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v0 offset:48
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, s1, v1
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 1
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[28:31] offset:112
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[24:27] offset:96
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[20:23] offset:80
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[16:19] offset:64
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[12:15] offset:48
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[8:11] offset:32
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[4:7] offset:16
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[0:3]
; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v0 offset:8304
; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v0 offset:8288
; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v0 offset:8272
; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v0 offset:8256
; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v0 offset:8240
; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v0 offset:8224
; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v0 offset:8208
; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v0 offset:8192
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v1, s1
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 1
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[24:27] offset:8288
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[28:31] offset:8304
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[16:19] offset:8256
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[20:23] offset:8272
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[8:11] offset:8224
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[12:15] offset:8240
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[0:3] offset:8192
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[4:7] offset:8208
; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v0 offset:24688
; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v0 offset:24672
; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v0 offset:24656
; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v0 offset:24640
; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v0 offset:24624
; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v0 offset:24608
; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v0 offset:24592
; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v0 offset:24576
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 2
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[24:27] offset:16480
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[28:31] offset:16496
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[16:19] offset:16448
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[20:23] offset:16464
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[8:11] offset:16416
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[12:15] offset:16432
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[0:3] offset:16384
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[4:7] offset:16400
; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v0 offset:49264
; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v0 offset:49248
; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v0 offset:49232
; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v0 offset:49216
; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v0 offset:49200
; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v0 offset:49184
; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v0 offset:49168
; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v0 offset:49152
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; EXACTCUTOFF-NEXT: v_add_u32_e32 v0, 0x6000, v0
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 1
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[24:27] offset:24672
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[28:31] offset:24688
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[16:19] offset:24640
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[20:23] offset:24656
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[8:11] offset:24608
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[12:15] offset:24624
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[0:3] offset:24576
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[4:7] offset:24592
; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v0 offset:57456
; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v0 offset:57440
; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v0 offset:57424
; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v0 offset:57408
; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v0 offset:57344
; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v0 offset:57360
; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v0 offset:57376
; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v0 offset:57392
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31]
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 7
; EXACTCUTOFF-NEXT: s_nop 2
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[24:27] offset:32864
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[28:31] offset:32880
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[16:19] offset:32832
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[20:23] offset:32848
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[8:11] offset:32800
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[12:15] offset:32816
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[0:3] offset:32768
; EXACTCUTOFF-NEXT: ds_write_b128 v1, a[4:7] offset:32784
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
; EXACTCUTOFF-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%load.0.addr = getelementptr <32 x float>, ptr addrspace(3) %in, i32 %idx
%load.0 = load <32 x float>, ptr addrspace(3) %load.0.addr
%load.1.addr = getelementptr <32 x float>, ptr addrspace(3) %load.0.addr, i32 64
%load.1 = load <32 x float>, ptr addrspace(3) %load.1.addr
%load.2.addr = getelementptr <32 x float>, ptr addrspace(3) %load.1.addr, i32 128
%load.2 = load <32 x float>, ptr addrspace(3) %load.2.addr
%load.3.addr = getelementptr <32 x float>, ptr addrspace(3) %load.2.addr, i32 192
%load.3 = load <32 x float>, ptr addrspace(3) %load.3.addr
%load.4.addr = getelementptr <32 x float>, ptr addrspace(3) %load.3.addr, i32 256
%load.4 = load <32 x float>, ptr addrspace(3) %load.4.addr
%mai.0 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.0, i32 0, i32 0, i32 0)
%mai.1 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.1, i32 0, i32 0, i32 0)
%mai.2 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.2, i32 0, i32 0, i32 0)
%mai.3 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.3, i32 0, i32 0, i32 0)
%mai.4 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.4, i32 0, i32 0, i32 0)
%store.0.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 %idx
store <32 x float> %mai.0, ptr addrspace(3) %store.0.addr
%store.1.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 64
store <32 x float> %mai.1, ptr addrspace(3) %store.1.addr
%store.2.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 128
store <32 x float> %mai.2, ptr addrspace(3) %store.2.addr
%store.3.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 192
store <32 x float> %mai.3, ptr addrspace(3) %store.3.addr
%store.4.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 256
store <32 x float> %mai.4, ptr addrspace(3) %store.4.addr
; 8 DS read
call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 8, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
; 8 DS write
call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 8, i32 0)
; 8 DS read
call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 8, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
; 8 DS write
call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 8, i32 0)
; 8 DS read
call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 8, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
; 8 DS write
call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 8, i32 0)
; 8 DS read
call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 8, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
; 8 DS write
call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 8, i32 0)
; 8 DS read
call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 8, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
; 8 DS write
call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 8, i32 0)
ret void
}
define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out, <5 x float> %in1) #0 {
; GCN-LABEL: test_sched_group_barrier_pipeline_interleave_EXP_MFMA:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x44
; GCN-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b
; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GCN-NEXT: v_mov_b32_e32 v7, 0x32a5705f
; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v4, s4, v3
; GCN-NEXT: v_rndne_f32_e32 v5, v4
; GCN-NEXT: v_sub_f32_e32 v6, v4, v5
; GCN-NEXT: v_fma_f32 v4, s4, v3, -v4
; GCN-NEXT: v_fmac_f32_e32 v4, s4, v7
; GCN-NEXT: v_add_f32_e32 v4, v6, v4
; GCN-NEXT: v_exp_f32_e32 v4, v4
; GCN-NEXT: v_cvt_i32_f32_e32 v5, v5
; GCN-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; GCN-NEXT: v_add_u32_e32 v1, s0, v0
; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:112
; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:96
; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:80
; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:64
; GCN-NEXT: ds_read_b128 a[0:3], v1
; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:16
; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:32
; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:48
; GCN-NEXT: v_mov_b32_e32 v9, 1.0
; GCN-NEXT: v_ldexp_f32 v4, v4, v5
; GCN-NEXT: v_mov_b32_e32 v5, 0xc2ce8ed0
; GCN-NEXT: v_mul_f32_e32 v10, s5, v3
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s4, v5
; GCN-NEXT: v_mov_b32_e32 v6, 0x42b17218
; GCN-NEXT: v_rndne_f32_e32 v11, v10
; GCN-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s4, v6
; GCN-NEXT: v_mov_b32_e32 v8, 0x7f800000
; GCN-NEXT: v_sub_f32_e32 v12, v10, v11
; GCN-NEXT: v_fma_f32 v10, s5, v3, -v10
; GCN-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
; GCN-NEXT: v_fmac_f32_e32 v10, s5, v7
; GCN-NEXT: ds_read_b128 a[60:63], v1 offset:8304
; GCN-NEXT: s_waitcnt lgkmcnt(1)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v9, v4, a[0:31]
; GCN-NEXT: v_add_f32_e32 v4, v12, v10
; GCN-NEXT: v_exp_f32_e32 v4, v4
; GCN-NEXT: v_cvt_i32_f32_e32 v10, v11
; GCN-NEXT: ds_read_b128 a[56:59], v1 offset:8288
; GCN-NEXT: ds_read_b128 a[52:55], v1 offset:8272
; GCN-NEXT: ds_read_b128 a[48:51], v1 offset:8256
; GCN-NEXT: ds_read_b128 a[44:47], v1 offset:8240
; GCN-NEXT: ds_read_b128 a[40:43], v1 offset:8224
; GCN-NEXT: ds_read_b128 a[36:39], v1 offset:8208
; GCN-NEXT: ds_read_b128 a[32:35], v1 offset:8192
; GCN-NEXT: v_ldexp_f32 v4, v4, v10
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s5, v5
; GCN-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s5, v6
; GCN-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
; GCN-NEXT: v_mul_f32_e32 v10, s6, v3
; GCN-NEXT: v_rndne_f32_e32 v11, v10
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v9, v4, a[32:63]
; GCN-NEXT: v_fma_f32 v4, s6, v3, -v10
; GCN-NEXT: v_sub_f32_e32 v12, v10, v11
; GCN-NEXT: v_fmac_f32_e32 v4, s6, v7
; GCN-NEXT: v_add_f32_e32 v4, v12, v4
; GCN-NEXT: v_exp_f32_e32 v4, v4
; GCN-NEXT: v_cvt_i32_f32_e32 v10, v11
; GCN-NEXT: ds_read_b128 a[92:95], v1 offset:24688
; GCN-NEXT: ds_read_b128 a[88:91], v1 offset:24672
; GCN-NEXT: ds_read_b128 a[84:87], v1 offset:24656
; GCN-NEXT: ds_read_b128 a[80:83], v1 offset:24640
; GCN-NEXT: ds_read_b128 a[76:79], v1 offset:24624
; GCN-NEXT: ds_read_b128 a[72:75], v1 offset:24608
; GCN-NEXT: ds_read_b128 a[68:71], v1 offset:24592
; GCN-NEXT: ds_read_b128 a[64:67], v1 offset:24576
; GCN-NEXT: v_add_u32_e32 v2, 0x6000, v1
; GCN-NEXT: ds_read_b128 a[124:127], v1 offset:49264
; GCN-NEXT: ds_read_b128 a[120:123], v1 offset:49248
; GCN-NEXT: ds_read_b128 a[116:119], v1 offset:49232
; GCN-NEXT: ds_read_b128 a[112:115], v1 offset:49216
; GCN-NEXT: ds_read_b128 a[108:111], v1 offset:49200
; GCN-NEXT: ds_read_b128 a[104:107], v1 offset:49184
; GCN-NEXT: ds_read_b128 a[100:103], v1 offset:49168
; GCN-NEXT: ds_read_b128 a[96:99], v1 offset:49152
; GCN-NEXT: v_ldexp_f32 v1, v4, v10
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s6, v5
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v6
; GCN-NEXT: v_mul_f32_e32 v4, s7, v3
; GCN-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
; GCN-NEXT: v_rndne_f32_e32 v10, v4
; GCN-NEXT: s_load_dword s8, s[2:3], 0x54
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v9, v1, a[64:95]
; GCN-NEXT: v_sub_f32_e32 v1, v4, v10
; GCN-NEXT: v_fma_f32 v4, s7, v3, -v4
; GCN-NEXT: v_fmac_f32_e32 v4, s7, v7
; GCN-NEXT: v_add_f32_e32 v1, v1, v4
; GCN-NEXT: v_exp_f32_e32 v1, v1
; GCN-NEXT: v_cvt_i32_f32_e32 v4, v10
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v5
; GCN-NEXT: ds_read_b128 a[156:159], v2 offset:57456
; GCN-NEXT: ds_read_b128 a[152:155], v2 offset:57440
; GCN-NEXT: v_ldexp_f32 v1, v1, v4
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s7, v6
; GCN-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
; GCN-NEXT: v_mul_f32_e32 v4, s8, v3
; GCN-NEXT: v_fma_f32 v3, s8, v3, -v4
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v9, v1, a[96:127]
; GCN-NEXT: v_rndne_f32_e32 v1, v4
; GCN-NEXT: v_sub_f32_e32 v10, v4, v1
; GCN-NEXT: v_fmac_f32_e32 v3, s8, v7
; GCN-NEXT: v_add_f32_e32 v3, v10, v3
; GCN-NEXT: v_exp_f32_e32 v3, v3
; GCN-NEXT: v_cvt_i32_f32_e32 v1, v1
; GCN-NEXT: ds_read_b128 a[148:151], v2 offset:57424
; GCN-NEXT: ds_read_b128 a[144:147], v2 offset:57408
; GCN-NEXT: ds_read_b128 a[128:131], v2 offset:57344
; GCN-NEXT: ds_read_b128 a[132:135], v2 offset:57360
; GCN-NEXT: ds_read_b128 a[136:139], v2 offset:57376
; GCN-NEXT: ds_read_b128 a[140:143], v2 offset:57392
; GCN-NEXT: v_ldexp_f32 v1, v3, v1
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s8, v5
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s8, v6
; GCN-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
; GCN-NEXT: v_add_u32_e32 v0, s1, v0
; GCN-NEXT: ds_write_b128 v0, a[28:31] offset:112
; GCN-NEXT: s_waitcnt lgkmcnt(1)
; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v9, v1, a[128:159]
; GCN-NEXT: ds_write_b128 v0, a[24:27] offset:96
; GCN-NEXT: ds_write_b128 v0, a[20:23] offset:80
; GCN-NEXT: ds_write_b128 v0, a[16:19] offset:64
; GCN-NEXT: ds_write_b128 v0, a[12:15] offset:48
; GCN-NEXT: ds_write_b128 v0, a[8:11] offset:32
; GCN-NEXT: ds_write_b128 v0, a[4:7] offset:16
; GCN-NEXT: ds_write_b128 v0, a[0:3]
; GCN-NEXT: v_mov_b32_e32 v0, s1
; GCN-NEXT: ; kill: killed $sgpr2_sgpr3
; GCN-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; GCN-NEXT: ds_write_b128 v0, a[56:59] offset:8288
; GCN-NEXT: ds_write_b128 v0, a[60:63] offset:8304
; GCN-NEXT: ds_write_b128 v0, a[48:51] offset:8256
; GCN-NEXT: ds_write_b128 v0, a[52:55] offset:8272
; GCN-NEXT: ds_write_b128 v0, a[40:43] offset:8224
; GCN-NEXT: ds_write_b128 v0, a[44:47] offset:8240
; GCN-NEXT: ds_write_b128 v0, a[32:35] offset:8192
; GCN-NEXT: ds_write_b128 v0, a[36:39] offset:8208
; GCN-NEXT: ds_write_b128 v0, a[88:91] offset:16480
; GCN-NEXT: ds_write_b128 v0, a[92:95] offset:16496
; GCN-NEXT: ds_write_b128 v0, a[80:83] offset:16448
; GCN-NEXT: ds_write_b128 v0, a[84:87] offset:16464
; GCN-NEXT: ds_write_b128 v0, a[72:75] offset:16416
; GCN-NEXT: ds_write_b128 v0, a[76:79] offset:16432
; GCN-NEXT: ds_write_b128 v0, a[64:67] offset:16384
; GCN-NEXT: ds_write_b128 v0, a[68:71] offset:16400
; GCN-NEXT: ds_write_b128 v0, a[120:123] offset:24672
; GCN-NEXT: ds_write_b128 v0, a[124:127] offset:24688
; GCN-NEXT: ds_write_b128 v0, a[112:115] offset:24640
; GCN-NEXT: ds_write_b128 v0, a[116:119] offset:24656
; GCN-NEXT: ds_write_b128 v0, a[104:107] offset:24608
; GCN-NEXT: ds_write_b128 v0, a[108:111] offset:24624
; GCN-NEXT: ds_write_b128 v0, a[96:99] offset:24576
; GCN-NEXT: ds_write_b128 v0, a[100:103] offset:24592
; GCN-NEXT: ds_write_b128 v0, a[152:155] offset:32864
; GCN-NEXT: ds_write_b128 v0, a[156:159] offset:32880
; GCN-NEXT: ds_write_b128 v0, a[144:147] offset:32832
; GCN-NEXT: ds_write_b128 v0, a[148:151] offset:32848
; GCN-NEXT: ds_write_b128 v0, a[136:139] offset:32800
; GCN-NEXT: ds_write_b128 v0, a[140:143] offset:32816
; GCN-NEXT: ds_write_b128 v0, a[128:131] offset:32768
; GCN-NEXT: ds_write_b128 v0, a[132:135] offset:32784
; GCN-NEXT: s_endpgm
;
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_interleave_EXP_MFMA:
; EXACTCUTOFF: ; %bb.0: ; %entry
; EXACTCUTOFF-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x44
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b
; EXACTCUTOFF-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v7, 0x32a5705f
; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_f32_e32 v4, s4, v3
; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v5, v4
; EXACTCUTOFF-NEXT: v_sub_f32_e32 v6, v4, v5
; EXACTCUTOFF-NEXT: v_fma_f32 v4, s4, v3, -v4
; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v4, s4, v7
; EXACTCUTOFF-NEXT: v_add_f32_e32 v4, v6, v4
; EXACTCUTOFF-NEXT: v_exp_f32_e32 v4, v4
; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v5, v5
; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, s0, v0
; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v1 offset:112
; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v1 offset:96
; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v1 offset:80
; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v1 offset:64
; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1
; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v1 offset:16
; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v1 offset:32
; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v1 offset:48
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v9, 1.0
; EXACTCUTOFF-NEXT: v_ldexp_f32 v4, v4, v5
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v5, 0xc2ce8ed0
; EXACTCUTOFF-NEXT: v_mul_f32_e32 v10, s5, v3
; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s4, v5
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v6, 0x42b17218
; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v11, v10
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s4, v6
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v8, 0x7f800000
; EXACTCUTOFF-NEXT: v_sub_f32_e32 v12, v10, v11
; EXACTCUTOFF-NEXT: v_fma_f32 v10, s5, v3, -v10
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v10, s5, v7
; EXACTCUTOFF-NEXT: ds_read_b128 a[60:63], v1 offset:8304
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(1)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v9, v4, a[0:31]
; EXACTCUTOFF-NEXT: v_add_f32_e32 v4, v12, v10
; EXACTCUTOFF-NEXT: v_exp_f32_e32 v4, v4
; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v10, v11
; EXACTCUTOFF-NEXT: ds_read_b128 a[56:59], v1 offset:8288
; EXACTCUTOFF-NEXT: ds_read_b128 a[52:55], v1 offset:8272
; EXACTCUTOFF-NEXT: ds_read_b128 a[48:51], v1 offset:8256
; EXACTCUTOFF-NEXT: ds_read_b128 a[44:47], v1 offset:8240
; EXACTCUTOFF-NEXT: ds_read_b128 a[40:43], v1 offset:8224
; EXACTCUTOFF-NEXT: ds_read_b128 a[36:39], v1 offset:8208
; EXACTCUTOFF-NEXT: ds_read_b128 a[32:35], v1 offset:8192
; EXACTCUTOFF-NEXT: v_ldexp_f32 v4, v4, v10
; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s5, v5
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s5, v6
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
; EXACTCUTOFF-NEXT: v_mul_f32_e32 v10, s6, v3
; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v11, v10
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v9, v4, a[32:63]
; EXACTCUTOFF-NEXT: v_fma_f32 v4, s6, v3, -v10
; EXACTCUTOFF-NEXT: v_sub_f32_e32 v12, v10, v11
; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v4, s6, v7
; EXACTCUTOFF-NEXT: v_add_f32_e32 v4, v12, v4
; EXACTCUTOFF-NEXT: v_exp_f32_e32 v4, v4
; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v10, v11
; EXACTCUTOFF-NEXT: ds_read_b128 a[92:95], v1 offset:24688
; EXACTCUTOFF-NEXT: ds_read_b128 a[88:91], v1 offset:24672
; EXACTCUTOFF-NEXT: ds_read_b128 a[84:87], v1 offset:24656
; EXACTCUTOFF-NEXT: ds_read_b128 a[80:83], v1 offset:24640
; EXACTCUTOFF-NEXT: ds_read_b128 a[76:79], v1 offset:24624
; EXACTCUTOFF-NEXT: ds_read_b128 a[72:75], v1 offset:24608
; EXACTCUTOFF-NEXT: ds_read_b128 a[68:71], v1 offset:24592
; EXACTCUTOFF-NEXT: ds_read_b128 a[64:67], v1 offset:24576
; EXACTCUTOFF-NEXT: v_add_u32_e32 v2, 0x6000, v1
; EXACTCUTOFF-NEXT: ds_read_b128 a[124:127], v1 offset:49264
; EXACTCUTOFF-NEXT: ds_read_b128 a[120:123], v1 offset:49248
; EXACTCUTOFF-NEXT: ds_read_b128 a[116:119], v1 offset:49232
; EXACTCUTOFF-NEXT: ds_read_b128 a[112:115], v1 offset:49216
; EXACTCUTOFF-NEXT: ds_read_b128 a[108:111], v1 offset:49200
; EXACTCUTOFF-NEXT: ds_read_b128 a[104:107], v1 offset:49184
; EXACTCUTOFF-NEXT: ds_read_b128 a[100:103], v1 offset:49168
; EXACTCUTOFF-NEXT: ds_read_b128 a[96:99], v1 offset:49152
; EXACTCUTOFF-NEXT: v_ldexp_f32 v1, v4, v10
; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s6, v5
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v6
; EXACTCUTOFF-NEXT: v_mul_f32_e32 v4, s7, v3
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v10, v4
; EXACTCUTOFF-NEXT: s_load_dword s8, s[2:3], 0x54
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v9, v1, a[64:95]
; EXACTCUTOFF-NEXT: v_sub_f32_e32 v1, v4, v10
; EXACTCUTOFF-NEXT: v_fma_f32 v4, s7, v3, -v4
; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v4, s7, v7
; EXACTCUTOFF-NEXT: v_add_f32_e32 v1, v1, v4
; EXACTCUTOFF-NEXT: v_exp_f32_e32 v1, v1
; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v4, v10
; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v5
; EXACTCUTOFF-NEXT: ds_read_b128 a[156:159], v2 offset:57456
; EXACTCUTOFF-NEXT: ds_read_b128 a[152:155], v2 offset:57440
; EXACTCUTOFF-NEXT: v_ldexp_f32 v1, v1, v4
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s7, v6
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
; EXACTCUTOFF-NEXT: v_mul_f32_e32 v4, s8, v3
; EXACTCUTOFF-NEXT: v_fma_f32 v3, s8, v3, -v4
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v9, v1, a[96:127]
; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v1, v4
; EXACTCUTOFF-NEXT: v_sub_f32_e32 v10, v4, v1
; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v3, s8, v7
; EXACTCUTOFF-NEXT: v_add_f32_e32 v3, v10, v3
; EXACTCUTOFF-NEXT: v_exp_f32_e32 v3, v3
; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v1, v1
; EXACTCUTOFF-NEXT: ds_read_b128 a[148:151], v2 offset:57424
; EXACTCUTOFF-NEXT: ds_read_b128 a[144:147], v2 offset:57408
; EXACTCUTOFF-NEXT: ds_read_b128 a[128:131], v2 offset:57344
; EXACTCUTOFF-NEXT: ds_read_b128 a[132:135], v2 offset:57360
; EXACTCUTOFF-NEXT: ds_read_b128 a[136:139], v2 offset:57376
; EXACTCUTOFF-NEXT: ds_read_b128 a[140:143], v2 offset:57392
; EXACTCUTOFF-NEXT: v_ldexp_f32 v1, v3, v1
; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s8, v5
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s8, v6
; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
; EXACTCUTOFF-NEXT: v_add_u32_e32 v0, s1, v0
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[28:31] offset:112
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(1)
; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v9, v1, a[128:159]
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[24:27] offset:96
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[20:23] offset:80
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[16:19] offset:64
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[12:15] offset:48
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[8:11] offset:32
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[4:7] offset:16
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[0:3]
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v0, s1
; EXACTCUTOFF-NEXT: ; kill: killed $sgpr2_sgpr3
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[56:59] offset:8288
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[60:63] offset:8304
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[48:51] offset:8256
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[52:55] offset:8272
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[40:43] offset:8224
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[44:47] offset:8240
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[32:35] offset:8192
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[36:39] offset:8208
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[88:91] offset:16480
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[92:95] offset:16496
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[80:83] offset:16448
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[84:87] offset:16464
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[72:75] offset:16416
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[76:79] offset:16432
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[64:67] offset:16384
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[68:71] offset:16400
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[120:123] offset:24672
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[124:127] offset:24688
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[112:115] offset:24640
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[116:119] offset:24656
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[104:107] offset:24608
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[108:111] offset:24624
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[96:99] offset:24576
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[100:103] offset:24592
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[152:155] offset:32864
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[156:159] offset:32880
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[144:147] offset:32832
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[148:151] offset:32848
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[136:139] offset:32800
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[140:143] offset:32816
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[128:131] offset:32768
; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[132:135] offset:32784
; EXACTCUTOFF-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%load.0.addr = getelementptr <32 x float>, ptr addrspace(3) %in, i32 %idx
%load.0 = load <32 x float>, ptr addrspace(3) %load.0.addr
%load.1.addr = getelementptr <32 x float>, ptr addrspace(3) %load.0.addr, i32 64
%load.1 = load <32 x float>, ptr addrspace(3) %load.1.addr
%load.2.addr = getelementptr <32 x float>, ptr addrspace(3) %load.1.addr, i32 128
%load.2 = load <32 x float>, ptr addrspace(3) %load.2.addr
%load.3.addr = getelementptr <32 x float>, ptr addrspace(3) %load.2.addr, i32 192
%load.3 = load <32 x float>, ptr addrspace(3) %load.3.addr
%load.4.addr = getelementptr <32 x float>, ptr addrspace(3) %load.3.addr, i32 256
%load.4 = load <32 x float>, ptr addrspace(3) %load.4.addr
%el0 = extractelement <5 x float> %in1, i32 0
%el1 = extractelement <5 x float> %in1, i32 1
%el2 = extractelement <5 x float> %in1, i32 2
%el3 = extractelement <5 x float> %in1, i32 3
%el4 = extractelement <5 x float> %in1, i32 4
%exp0 = tail call float @llvm.exp.f32(float %el0)
%exp1 = tail call float @llvm.exp.f32(float %el1)
%exp2 = tail call float @llvm.exp.f32(float %el2)
%exp3 = tail call float @llvm.exp.f32(float %el3)
%exp4 = tail call float @llvm.exp.f32(float %el4)
%mai.0 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float %exp0, <32 x float> %load.0, i32 0, i32 0, i32 0)
%mai.1 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float %exp1, <32 x float> %load.1, i32 0, i32 0, i32 0)
%mai.2 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float %exp2, <32 x float> %load.2, i32 0, i32 0, i32 0)
%mai.3 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float %exp3, <32 x float> %load.3, i32 0, i32 0, i32 0)
%mai.4 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float %exp4, <32 x float> %load.4, i32 0, i32 0, i32 0)
; 1 TRANS
call void @llvm.amdgcn.sched.group.barrier(i32 1024, i32 1, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
; 1 TRANS
call void @llvm.amdgcn.sched.group.barrier(i32 1024, i32 1, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
; 1 TRANS
call void @llvm.amdgcn.sched.group.barrier(i32 1024, i32 1, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
; 1 TRANS
call void @llvm.amdgcn.sched.group.barrier(i32 1024, i32 1, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
; 1 TRANS
call void @llvm.amdgcn.sched.group.barrier(i32 1024, i32 1, i32 0)
; 1 MFMA
call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
%store.0.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 %idx
store <32 x float> %mai.0, ptr addrspace(3) %store.0.addr
%store.1.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 64
store <32 x float> %mai.1, ptr addrspace(3) %store.1.addr
%store.2.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 128
store <32 x float> %mai.2, ptr addrspace(3) %store.2.addr
%store.3.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 192
store <32 x float> %mai.3, ptr addrspace(3) %store.3.addr
%store.4.addr = getelementptr <32 x float>, ptr addrspace(3) %out, i32 256
store <32 x float> %mai.4, ptr addrspace(3) %store.4.addr
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x() #2
declare void @llvm.amdgcn.sched.group.barrier(i32, i32, i32) #1
declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32, i32, i32) #1
declare float @llvm.exp.f32(float) #2
attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,256" }
attributes #1 = { nounwind }
attributes #2 = { nounwind readnone speculatable }