llvm/llvm/test/CodeGen/X86/AMX/amx-config.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8,+avx512f -verify-machineinstrs | FileCheck %s --check-prefix=AVX512
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8,+avx2 -verify-machineinstrs | FileCheck %s --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8,+avx -verify-machineinstrs | FileCheck %s --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -verify-machineinstrs | FileCheck %s --check-prefix=SSE2

@buf = dso_local global [1024 x i8] zeroinitializer, align 64
@buf2 = dso_local global [1024 x i8] zeroinitializer, align 64

; Function Attrs: nounwind uwtable
define <4 x i32> @test_api(i32 %0, i16 signext %1, i16 signext %2, <4 x i32> %xmm0) {
; AVX512-LABEL: test_api:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX512-NEXT:    vmovups %zmm1, -{{[0-9]+}}(%rsp)
; AVX512-NEXT:    movb $1, -{{[0-9]+}}(%rsp)
; AVX512-NEXT:    movw %dx, -{{[0-9]+}}(%rsp)
; AVX512-NEXT:    movw %dx, -{{[0-9]+}}(%rsp)
; AVX512-NEXT:    movw %si, -{{[0-9]+}}(%rsp)
; AVX512-NEXT:    testl %edi, %edi
; AVX512-NEXT:    movsbl %sil, %eax
; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; AVX512-NEXT:    ldtilecfg -{{[0-9]+}}(%rsp)
; AVX512-NEXT:    je .LBB0_2
; AVX512-NEXT:  # %bb.1:
; AVX512-NEXT:    movl $buf, %ecx
; AVX512-NEXT:    jmp .LBB0_3
; AVX512-NEXT:  .LBB0_2:
; AVX512-NEXT:    movl $buf2, %ecx
; AVX512-NEXT:  .LBB0_3:
; AVX512-NEXT:    movl $32, %edi
; AVX512-NEXT:    tileloadd (%rcx,%rdi), %tmm0
; AVX512-NEXT:    tileloadd (%rcx,%rdi), %tmm2
; AVX512-NEXT:    tileloadd (%rcx,%rdi), %tmm1
; AVX512-NEXT:    tdpbssd %tmm2, %tmm0, %tmm1
; AVX512-NEXT:    movl $buf, %ecx
; AVX512-NEXT:    movl $32, %esi
; AVX512-NEXT:    tilestored %tmm1, (%rcx,%rsi)
; AVX512-NEXT:    tilerelease
; AVX512-NEXT:    vzeroupper
; AVX512-NEXT:    retq
;
; AVX2-LABEL: test_api:
; AVX2:       # %bb.0:
; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT:    vmovups %ymm1, -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    vmovups %ymm1, -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    movb $1, -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    movw %dx, -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    movw %dx, -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    movw %si, -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    testl %edi, %edi
; AVX2-NEXT:    movsbl %sil, %eax
; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    ldtilecfg -{{[0-9]+}}(%rsp)
; AVX2-NEXT:    je .LBB0_2
; AVX2-NEXT:  # %bb.1:
; AVX2-NEXT:    movl $buf, %ecx
; AVX2-NEXT:    jmp .LBB0_3
; AVX2-NEXT:  .LBB0_2:
; AVX2-NEXT:    movl $buf2, %ecx
; AVX2-NEXT:  .LBB0_3:
; AVX2-NEXT:    movl $32, %edi
; AVX2-NEXT:    tileloadd (%rcx,%rdi), %tmm0
; AVX2-NEXT:    tileloadd (%rcx,%rdi), %tmm2
; AVX2-NEXT:    tileloadd (%rcx,%rdi), %tmm1
; AVX2-NEXT:    tdpbssd %tmm2, %tmm0, %tmm1
; AVX2-NEXT:    movl $buf, %ecx
; AVX2-NEXT:    movl $32, %esi
; AVX2-NEXT:    tilestored %tmm1, (%rcx,%rsi)
; AVX2-NEXT:    tilerelease
; AVX2-NEXT:    vzeroupper
; AVX2-NEXT:    retq
;
; AVX1-LABEL: test_api:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT:    vmovups %xmm1, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    vmovups %xmm1, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    vmovups %xmm1, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    vmovups %xmm1, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    movb $1, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    movw %dx, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    movw %dx, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    movw %si, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    testl %edi, %edi
; AVX1-NEXT:    movsbl %sil, %eax
; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    ldtilecfg -{{[0-9]+}}(%rsp)
; AVX1-NEXT:    je .LBB0_2
; AVX1-NEXT:  # %bb.1:
; AVX1-NEXT:    movl $buf, %ecx
; AVX1-NEXT:    jmp .LBB0_3
; AVX1-NEXT:  .LBB0_2:
; AVX1-NEXT:    movl $buf2, %ecx
; AVX1-NEXT:  .LBB0_3:
; AVX1-NEXT:    movl $32, %edi
; AVX1-NEXT:    tileloadd (%rcx,%rdi), %tmm0
; AVX1-NEXT:    tileloadd (%rcx,%rdi), %tmm2
; AVX1-NEXT:    tileloadd (%rcx,%rdi), %tmm1
; AVX1-NEXT:    tdpbssd %tmm2, %tmm0, %tmm1
; AVX1-NEXT:    movl $buf, %ecx
; AVX1-NEXT:    movl $32, %esi
; AVX1-NEXT:    tilestored %tmm1, (%rcx,%rsi)
; AVX1-NEXT:    tilerelease
; AVX1-NEXT:    retq
;
; SSE2-LABEL: test_api:
; SSE2:       # %bb.0:
; SSE2-NEXT:    xorps %xmm1, %xmm1
; SSE2-NEXT:    movups %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    movups %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    movups %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    movups %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    movb $1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    movw %dx, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    movw %dx, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    movw %si, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    testl %edi, %edi
; SSE2-NEXT:    movsbl %sil, %eax
; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    ldtilecfg -{{[0-9]+}}(%rsp)
; SSE2-NEXT:    je .LBB0_2
; SSE2-NEXT:  # %bb.1:
; SSE2-NEXT:    movl $buf, %ecx
; SSE2-NEXT:    jmp .LBB0_3
; SSE2-NEXT:  .LBB0_2:
; SSE2-NEXT:    movl $buf2, %ecx
; SSE2-NEXT:  .LBB0_3:
; SSE2-NEXT:    movl $32, %edi
; SSE2-NEXT:    tileloadd (%rcx,%rdi), %tmm0
; SSE2-NEXT:    tileloadd (%rcx,%rdi), %tmm2
; SSE2-NEXT:    tileloadd (%rcx,%rdi), %tmm1
; SSE2-NEXT:    tdpbssd %tmm2, %tmm0, %tmm1
; SSE2-NEXT:    movl $buf, %ecx
; SSE2-NEXT:    movl $32, %esi
; SSE2-NEXT:    tilestored %tmm1, (%rcx,%rsi)
; SSE2-NEXT:    tilerelease
; SSE2-NEXT:    retq
  %4 = icmp eq i32 %0, 0
  %5 = shl i16 %1, 8
  %6 = ashr exact i16 %5, 8
  br i1 %4, label %11, label %7

7:                                                ; preds = %3
  %8 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %6, i16 %1, ptr @buf, i64 32)
  %9 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %6, i16 %2, ptr @buf, i64 32)
  %10 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %6, i16 %2, ptr @buf, i64 32)
  br label %15

11:                                               ; preds = %3
  %12 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %6, i16 %1, ptr @buf2, i64 32)
  %13 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %6, i16 %2, ptr @buf2, i64 32)
  %14 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %6, i16 %2, ptr @buf2, i64 32)
  br label %15

15:                                               ; preds = %11, %7
  %16 = phi x86_amx [ %12, %11 ], [ %8, %7 ]
  %17 = phi x86_amx [ %13, %11 ], [ %9, %7 ]
  %18 = phi x86_amx [ %14, %11 ], [ %10, %7 ]
  %19 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %6, i16 %2, i16 %1, x86_amx %18, x86_amx %16, x86_amx %17)
  tail call void @llvm.x86.tilestored64.internal(i16 %6, i16 %2, ptr @buf, i64 32, x86_amx %19)
  ret <4 x i32> %xmm0
}

declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)