# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn -mcpu=hawaii -mattr=+flat-for-global -run-pass=amdgpu-regbankselect %s -verify-machineinstrs -o - | FileCheck %s
--- |
define amdgpu_kernel void @load_constant(ptr addrspace(4) %ptr0) {
ret void
}
define amdgpu_kernel void @load_constant_volatile(ptr addrspace(4) %ptr0) {
ret void
}
define amdgpu_kernel void @load_global_uniform_invariant(ptr addrspace(1) %ptr1) {
%tmp0 = load i32, ptr addrspace(1) %ptr1
ret void
}
define amdgpu_kernel void @load_global_uniform_noclobber(ptr addrspace(1) %ptr1) {
%tmp0 = load i32, ptr addrspace(1) %ptr1, !amdgpu.noclobber !0
ret void
}
define amdgpu_kernel void @load_global_uniform_variant(ptr addrspace(1) %ptr1) {
%tmp0 = load i32, ptr addrspace(1) %ptr1
ret void
}
define amdgpu_kernel void @load_global_uniform_volatile_invariant(ptr addrspace(1) %ptr1) {
%tmp0 = load i32, ptr addrspace(1) %ptr1
ret void
}
define amdgpu_kernel void @load_global_uniform_atomic_invariant(ptr addrspace(1) %ptr1) {
%tmp0 = load i32, ptr addrspace(1) %ptr1
ret void
}
define amdgpu_kernel void @load_global_non_uniform(ptr addrspace(1) %ptr2) {
%tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
%tmp1 = getelementptr i32, ptr addrspace(1) %ptr2, i32 %tmp0
%tmp2 = load i32, ptr addrspace(1) %tmp1
ret void
}
define amdgpu_kernel void @load_constant_v4i16_from_8_align8(ptr addrspace(4) %ptr0) {
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x() #0
attributes #0 = { nounwind readnone }
!0 = !{}
...
---
name: load_constant
legalized: true
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: load_constant
; CHECK: liveins: $sgpr0_sgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:sgpr(s32) = G_LOAD [[COPY]](p4) :: (load (s32) from %ir.ptr0, addrspace 4)
%0:_(p4) = COPY $sgpr0_sgpr1
%1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.ptr0)
...
---
name: load_constant_volatile
legalized: true
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: load_constant_volatile
; CHECK: liveins: $sgpr0_sgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:sgpr(s32) = G_LOAD [[COPY]](p4) :: (volatile load (s32) from %ir.ptr0, addrspace 4)
%0:_(p4) = COPY $sgpr0_sgpr1
%1:_(s32) = G_LOAD %0 :: (volatile load (s32) from %ir.ptr0)
...
---
name: load_global_uniform_invariant
legalized: true
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: load_global_uniform_invariant
; CHECK: liveins: $sgpr0_sgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:sgpr(s32) = G_LOAD [[COPY]](p1) :: (invariant load (s32) from %ir.ptr1, addrspace 1)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_LOAD %0 :: (invariant load (s32) from %ir.ptr1)
...
---
name: load_global_uniform_noclobber
legalized: true
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: load_global_uniform_noclobber
; CHECK: liveins: $sgpr0_sgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (load (s32) from %ir.ptr1, addrspace 1)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.ptr1)
...
---
name: load_global_uniform_variant
legalized: true
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: load_global_uniform_variant
; CHECK: liveins: $sgpr0_sgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (load (s32) from %ir.ptr1, addrspace 1)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.ptr1)
...
---
name: load_global_uniform_volatile_invariant
legalized: true
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: load_global_uniform_volatile_invariant
; CHECK: liveins: $sgpr0_sgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (volatile invariant load (s32) from %ir.ptr1, addrspace 1)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_LOAD %0 :: (volatile invariant load (s32) from %ir.ptr1)
...
---
name: load_global_uniform_atomic_invariant
legalized: true
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: load_global_uniform_atomic_invariant
; CHECK: liveins: $sgpr0_sgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (invariant load acquire (s32) from %ir.ptr1, addrspace 1)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_LOAD %0 :: (invariant load acquire (s32) from %ir.ptr1)
...
---
name: load_global_non_uniform
legalized: true
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: load_global_non_uniform
; CHECK: liveins: $sgpr0_sgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (load (s32) from %ir.tmp1, addrspace 1)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.tmp1)
...
---
name: load_constant_v4i16_from_8_align8
legalized: true
body: |
bb.0:
; CHECK-LABEL: name: load_constant_v4i16_from_8_align8
; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:sgpr(<4 x s16>) = G_LOAD [[COPY]](p4) :: (load (<4 x s16>) from %ir.ptr0, addrspace 4)
%0:_(p4) = COPY $sgpr0_sgpr1
%1:_(<4 x s16>) = G_LOAD %0 :: (load (<4 x s16>) from %ir.ptr0, align 8, addrspace 4)
...