llvm/llvm/test/CodeGen/X86/x86-64-baseptr.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-pc-linux -stackrealign -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=x86_64-pc-linux-gnux32 -stackrealign -verify-machineinstrs < %s | FileCheck -check-prefix=X32ABI %s

; This should run with NaCl as well ( -mtriple=x86_64-pc-nacl ) but currently doesn't due to PR22655

; Make sure the correct register gets set up as the base pointer
; This should be rbx for x64 and 64-bit NaCl and ebx for x32
; NACL-LABEL: base
; NACL: subq $32, %rsp
; NACL: movq %rsp, %rbx

declare i32 @helper() nounwind
define void @base() #0 {
; CHECK-LABEL: base:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    pushq %rbp
; CHECK-NEXT:    .cfi_def_cfa_offset 16
; CHECK-NEXT:    .cfi_offset %rbp, -16
; CHECK-NEXT:    movq %rsp, %rbp
; CHECK-NEXT:    .cfi_def_cfa_register %rbp
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    andq $-32, %rsp
; CHECK-NEXT:    subq $32, %rsp
; CHECK-NEXT:    movq %rsp, %rbx
; CHECK-NEXT:    .cfi_offset %rbx, -24
; CHECK-NEXT:    callq helper@PLT
; CHECK-NEXT:    movq %rsp, %rcx
; CHECK-NEXT:    movl %eax, %eax
; CHECK-NEXT:    leaq 31(,%rax,4), %rax
; CHECK-NEXT:    andq $-32, %rax
; CHECK-NEXT:    movq %rcx, %rdx
; CHECK-NEXT:    subq %rax, %rdx
; CHECK-NEXT:    movq %rdx, %rsp
; CHECK-NEXT:    negq %rax
; CHECK-NEXT:    movl $0, (%rcx,%rax)
; CHECK-NEXT:    leaq -8(%rbp), %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    popq %rbp
; CHECK-NEXT:    .cfi_def_cfa %rsp, 8
; CHECK-NEXT:    retq
;
; X32ABI-LABEL: base:
; X32ABI:       # %bb.0: # %entry
; X32ABI-NEXT:    pushq %rbp
; X32ABI-NEXT:    .cfi_def_cfa_offset 16
; X32ABI-NEXT:    .cfi_offset %rbp, -16
; X32ABI-NEXT:    movl %esp, %ebp
; X32ABI-NEXT:    .cfi_def_cfa_register %rbp
; X32ABI-NEXT:    pushq %rbx
; X32ABI-NEXT:    andl $-32, %esp
; X32ABI-NEXT:    subl $32, %esp
; X32ABI-NEXT:    movl %esp, %ebx
; X32ABI-NEXT:    .cfi_offset %rbx, -24
; X32ABI-NEXT:    callq helper@PLT
; X32ABI-NEXT:    # kill: def $eax killed $eax def $rax
; X32ABI-NEXT:    leal 31(,%rax,4), %eax
; X32ABI-NEXT:    andl $-32, %eax
; X32ABI-NEXT:    movl %esp, %ecx
; X32ABI-NEXT:    movl %ecx, %edx
; X32ABI-NEXT:    subl %eax, %edx
; X32ABI-NEXT:    negl %eax
; X32ABI-NEXT:    movl %edx, %esp
; X32ABI-NEXT:    movl $0, (%ecx,%eax)
; X32ABI-NEXT:    leal -8(%ebp), %esp
; X32ABI-NEXT:    popq %rbx
; X32ABI-NEXT:    popq %rbp
; X32ABI-NEXT:    .cfi_def_cfa %rsp, 8
; X32ABI-NEXT:    retq
entry:
  %k = call i32 @helper()
  %a = alloca i32, i32 %k, align 4
  store i32 0, ptr %a, align 4
  ret void
}

define void @clobber_base() #0 {
; CHECK-LABEL: clobber_base:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %r10
; CHECK-NEXT:    .cfi_def_cfa %r10, 0
; CHECK-NEXT:    andq $-128, %rsp
; CHECK-NEXT:    pushq -8(%r10)
; CHECK-NEXT:    pushq %rbp
; CHECK-NEXT:    movq %rsp, %rbp
; CHECK-NEXT:    .cfi_escape 0x10, 0x06, 0x02, 0x76, 0x00 #
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $232, %rsp
; CHECK-NEXT:    movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT:    .cfi_escape 0x10, 0x03, 0x02, 0x76, 0x78 #
; CHECK-NEXT:    .cfi_escape 0x0f, 0x04, 0x76, 0x88, 0x7f, 0x06 #
; CHECK-NEXT:    callq helper@PLT
; CHECK-NEXT:    movq %rsp, %rcx
; CHECK-NEXT:    movl %eax, %eax
; CHECK-NEXT:    leaq 31(,%rax,4), %rax
; CHECK-NEXT:    andq $-32, %rax
; CHECK-NEXT:    movq %rcx, %rdx
; CHECK-NEXT:    subq %rax, %rdx
; CHECK-NEXT:    movq %rdx, %rsp
; CHECK-NEXT:    negq %rax
; CHECK-NEXT:    movl $405, %ebx # imm = 0x195
; CHECK-NEXT:    #APP
; CHECK-NEXT:    nop
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    movl $8, %edx
; CHECK-NEXT:    #APP
; CHECK-NEXT:    movl %edx, -112(%rbp)
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    movl $0, (%rcx,%rax)
; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
; CHECK-NEXT:    leaq -8(%rbp), %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    popq %rbp
; CHECK-NEXT:    leaq -8(%r10), %rsp
; CHECK-NEXT:    .cfi_def_cfa %rsp, 8
; CHECK-NEXT:    retq
;
; X32ABI-LABEL: clobber_base:
; X32ABI:       # %bb.0: # %entry
; X32ABI-NEXT:    pushq %rbp
; X32ABI-NEXT:    .cfi_def_cfa_offset 16
; X32ABI-NEXT:    .cfi_offset %rbp, -16
; X32ABI-NEXT:    movl %esp, %ebp
; X32ABI-NEXT:    .cfi_def_cfa_register %rbp
; X32ABI-NEXT:    pushq %rbx
; X32ABI-NEXT:    andl $-128, %esp
; X32ABI-NEXT:    subl $128, %esp
; X32ABI-NEXT:    movl %esp, %ebx
; X32ABI-NEXT:    .cfi_offset %rbx, -24
; X32ABI-NEXT:    callq helper@PLT
; X32ABI-NEXT:    # kill: def $eax killed $eax def $rax
; X32ABI-NEXT:    leal 31(,%rax,4), %eax
; X32ABI-NEXT:    andl $-32, %eax
; X32ABI-NEXT:    movl %esp, %ecx
; X32ABI-NEXT:    movl %ecx, %edx
; X32ABI-NEXT:    subl %eax, %edx
; X32ABI-NEXT:    negl %eax
; X32ABI-NEXT:    movl %edx, %esp
; X32ABI-NEXT:    pushq %rbx
; X32ABI-NEXT:    subl $24, %esp
; X32ABI-NEXT:    movl $405, %ebx # imm = 0x195
; X32ABI-NEXT:    #APP
; X32ABI-NEXT:    nop
; X32ABI-NEXT:    #NO_APP
; X32ABI-NEXT:    addl $24, %esp
; X32ABI-NEXT:    popq %rbx
; X32ABI-NEXT:    movl $8, %edx
; X32ABI-NEXT:    #APP
; X32ABI-NEXT:    movl %edx, (%ebx)
; X32ABI-NEXT:    #NO_APP
; X32ABI-NEXT:    movl $0, (%ecx,%eax)
; X32ABI-NEXT:    leal -8(%ebp), %esp
; X32ABI-NEXT:    popq %rbx
; X32ABI-NEXT:    popq %rbp
; X32ABI-NEXT:    .cfi_def_cfa %rsp, 8
; X32ABI-NEXT:    retq
entry:
  %k = call i32 @helper()
  %a = alloca i32, align 128
  %b = alloca i32, i32 %k, align 4
  ; clobber base pointer register
  tail call void asm sideeffect "nop", "{bx}"(i32 405)
  call void asm sideeffect "movl $0, $1", "r,*m"(i32 8, ptr elementtype(i32) %a)
  store i32 0, ptr %b, align 4
  ret void
}

define x86_regcallcc void @clobber_baseptr_argptr(i32 %param1, i32 %param2, i32 %param3, i32 %param4, i32 %param5, i32 %param6, i32 %param7, i32 %param8, i32 %param9, i32 %param10, i32 %param11, i32 %param12) #0 {
; CHECK-LABEL: clobber_baseptr_argptr:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %r10
; CHECK-NEXT:    .cfi_def_cfa %r10, 0
; CHECK-NEXT:    andq $-128, %rsp
; CHECK-NEXT:    pushq -8(%r10)
; CHECK-NEXT:    pushq %rbp
; CHECK-NEXT:    movq %rsp, %rbp
; CHECK-NEXT:    .cfi_escape 0x10, 0x06, 0x02, 0x76, 0x00 #
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $360, %rsp # imm = 0x168
; CHECK-NEXT:    movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT:    .cfi_escape 0x10, 0x03, 0x02, 0x76, 0x78 #
; CHECK-NEXT:    .cfi_escape 0x10, 0x19, 0x02, 0x76, 0xf0, 0x7e #
; CHECK-NEXT:    .cfi_escape 0x10, 0x1a, 0x02, 0x76, 0x80, 0x7f #
; CHECK-NEXT:    .cfi_escape 0x10, 0x1b, 0x02, 0x76, 0x90, 0x7f #
; CHECK-NEXT:    .cfi_escape 0x10, 0x1c, 0x02, 0x76, 0xa0, 0x7f #
; CHECK-NEXT:    .cfi_escape 0x10, 0x1d, 0x02, 0x76, 0xb0, 0x7f #
; CHECK-NEXT:    .cfi_escape 0x10, 0x1e, 0x02, 0x76, 0x40 #
; CHECK-NEXT:    .cfi_escape 0x10, 0x1f, 0x02, 0x76, 0x50 #
; CHECK-NEXT:    .cfi_escape 0x10, 0x20, 0x02, 0x76, 0x60 #
; CHECK-NEXT:    .cfi_escape 0x0f, 0x04, 0x76, 0x88, 0x7e, 0x06 #
; CHECK-NEXT:    movl (%r10), %r14d
; CHECK-NEXT:    callq helper@PLT
; CHECK-NEXT:    movq %rsp, %rcx
; CHECK-NEXT:    movl %eax, %eax
; CHECK-NEXT:    leaq 31(,%rax,4), %rax
; CHECK-NEXT:    andq $-32, %rax
; CHECK-NEXT:    movq %rcx, %rdx
; CHECK-NEXT:    subq %rax, %rdx
; CHECK-NEXT:    movq %rdx, %rsp
; CHECK-NEXT:    negq %rax
; CHECK-NEXT:    movl $405, %ebx # imm = 0x195
; CHECK-NEXT:    #APP
; CHECK-NEXT:    nop
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    #APP
; CHECK-NEXT:    nop
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    movl $8, %edx
; CHECK-NEXT:    #APP
; CHECK-NEXT:    movl %edx, -240(%rbp)
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    movl %r14d, (%rcx,%rax)
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
; CHECK-NEXT:    leaq -8(%rbp), %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    popq %rbp
; CHECK-NEXT:    leaq -8(%r10), %rsp
; CHECK-NEXT:    .cfi_def_cfa %rsp, 8
; CHECK-NEXT:    retq
;
; X32ABI-LABEL: clobber_baseptr_argptr:
; X32ABI:       # %bb.0: # %entry
; X32ABI-NEXT:    pushq %rbp
; X32ABI-NEXT:    .cfi_def_cfa_offset 16
; X32ABI-NEXT:    .cfi_offset %rbp, -16
; X32ABI-NEXT:    movl %esp, %ebp
; X32ABI-NEXT:    .cfi_def_cfa_register %rbp
; X32ABI-NEXT:    pushq %rbx
; X32ABI-NEXT:    andl $-128, %esp
; X32ABI-NEXT:    subl $256, %esp # imm = 0x100
; X32ABI-NEXT:    movaps %xmm15, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32ABI-NEXT:    movaps %xmm14, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32ABI-NEXT:    movaps %xmm13, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32ABI-NEXT:    movaps %xmm12, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32ABI-NEXT:    movaps %xmm11, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32ABI-NEXT:    movaps %xmm10, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32ABI-NEXT:    movaps %xmm9, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32ABI-NEXT:    movaps %xmm8, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32ABI-NEXT:    movl %esp, %ebx
; X32ABI-NEXT:    .cfi_offset %rbx, -24
; X32ABI-NEXT:    .cfi_offset %xmm8, -160
; X32ABI-NEXT:    .cfi_offset %xmm9, -144
; X32ABI-NEXT:    .cfi_offset %xmm10, -128
; X32ABI-NEXT:    .cfi_offset %xmm11, -112
; X32ABI-NEXT:    .cfi_offset %xmm12, -96
; X32ABI-NEXT:    .cfi_offset %xmm13, -80
; X32ABI-NEXT:    .cfi_offset %xmm14, -64
; X32ABI-NEXT:    .cfi_offset %xmm15, -48
; X32ABI-NEXT:    movl 16(%ebp), %r14d
; X32ABI-NEXT:    callq helper@PLT
; X32ABI-NEXT:    # kill: def $eax killed $eax def $rax
; X32ABI-NEXT:    leal 31(,%rax,4), %eax
; X32ABI-NEXT:    andl $-32, %eax
; X32ABI-NEXT:    movl %esp, %ecx
; X32ABI-NEXT:    movl %ecx, %edx
; X32ABI-NEXT:    subl %eax, %edx
; X32ABI-NEXT:    negl %eax
; X32ABI-NEXT:    movl %edx, %esp
; X32ABI-NEXT:    pushq %rbx
; X32ABI-NEXT:    subl $24, %esp
; X32ABI-NEXT:    movl $405, %ebx # imm = 0x195
; X32ABI-NEXT:    #APP
; X32ABI-NEXT:    nop
; X32ABI-NEXT:    #NO_APP
; X32ABI-NEXT:    #APP
; X32ABI-NEXT:    nop
; X32ABI-NEXT:    #NO_APP
; X32ABI-NEXT:    addl $24, %esp
; X32ABI-NEXT:    popq %rbx
; X32ABI-NEXT:    movl $8, %edx
; X32ABI-NEXT:    #APP
; X32ABI-NEXT:    movl %edx, (%ebx)
; X32ABI-NEXT:    #NO_APP
; X32ABI-NEXT:    movl %r14d, (%ecx,%eax)
; X32ABI-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm8 # 16-byte Reload
; X32ABI-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm9 # 16-byte Reload
; X32ABI-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm10 # 16-byte Reload
; X32ABI-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm11 # 16-byte Reload
; X32ABI-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm12 # 16-byte Reload
; X32ABI-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm13 # 16-byte Reload
; X32ABI-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm14 # 16-byte Reload
; X32ABI-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm15 # 16-byte Reload
; X32ABI-NEXT:    leal -8(%ebp), %esp
; X32ABI-NEXT:    popq %rbx
; X32ABI-NEXT:    popq %rbp
; X32ABI-NEXT:    .cfi_def_cfa %rsp, 8
; X32ABI-NEXT:    retq
entry:
  %k = call i32 @helper()
  %a = alloca i32, align 128
  %b = alloca i32, i32 %k, align 4
  ; clobber base pointer register
  tail call void asm sideeffect "nop", "{bx}"(i32 405)
  ; clobber argument pointer register
  tail call void asm sideeffect "nop", "~{bx},~{r10},~{r11}"()
  call void asm sideeffect "movl $0, $1", "r,*m"(i32 8, ptr elementtype(i32) %a)
  store i32 %param12, ptr %b, align 4
  ret void
}

; pr62625
define void @vmw_host_printf(ptr %fmt, ...) nounwind {
; CHECK-LABEL: vmw_host_printf:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %r10
; CHECK-NEXT:    andq $-16, %rsp
; CHECK-NEXT:    pushq -8(%r10)
; CHECK-NEXT:    pushq %rbp
; CHECK-NEXT:    movq %rsp, %rbp
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $200, %rsp
; CHECK-NEXT:    movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT:    movq %rsi, -184(%rbp)
; CHECK-NEXT:    movq %rdx, -176(%rbp)
; CHECK-NEXT:    movq %rcx, -168(%rbp)
; CHECK-NEXT:    movq %r8, -160(%rbp)
; CHECK-NEXT:    movq %r9, -152(%rbp)
; CHECK-NEXT:    testb %al, %al
; CHECK-NEXT:    je .LBB3_2
; CHECK-NEXT:  # %bb.1: # %entry
; CHECK-NEXT:    movaps %xmm0, -144(%rbp)
; CHECK-NEXT:    movaps %xmm1, -128(%rbp)
; CHECK-NEXT:    movaps %xmm2, -112(%rbp)
; CHECK-NEXT:    movaps %xmm3, -96(%rbp)
; CHECK-NEXT:    movaps %xmm4, -80(%rbp)
; CHECK-NEXT:    movaps %xmm5, -64(%rbp)
; CHECK-NEXT:    movaps %xmm6, -48(%rbp)
; CHECK-NEXT:    movaps %xmm7, -32(%rbp)
; CHECK-NEXT:  .LBB3_2: # %entry
; CHECK-NEXT:    leaq -192(%rbp), %rax
; CHECK-NEXT:    movq %rax, (%rax)
; CHECK-NEXT:    leaq (%r10), %rax
; CHECK-NEXT:    movq %rax, (%rax)
; CHECK-NEXT:    movl $48, (%rax)
; CHECK-NEXT:    movl $8, (%rax)
; CHECK-NEXT:    xorl %eax, %eax
; CHECK-NEXT:    xorl %ebx, %ebx
; CHECK-NEXT:    xorl %ecx, %ecx
; CHECK-NEXT:    #APP
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
; CHECK-NEXT:    leaq -8(%rbp), %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    popq %rbp
; CHECK-NEXT:    leaq -8(%r10), %rsp
; CHECK-NEXT:    retq
;
; X32ABI-LABEL: vmw_host_printf:
; X32ABI:       # %bb.0: # %entry
; X32ABI-NEXT:    pushq %rbp
; X32ABI-NEXT:    movl %esp, %ebp
; X32ABI-NEXT:    pushq %rbx
; X32ABI-NEXT:    andl $-16, %esp
; X32ABI-NEXT:    subl $208, %esp
; X32ABI-NEXT:    movl %esp, %ebx
; X32ABI-NEXT:    movq %rsi, 24(%ebx)
; X32ABI-NEXT:    movq %rdx, 32(%ebx)
; X32ABI-NEXT:    movq %rcx, 40(%ebx)
; X32ABI-NEXT:    movq %r8, 48(%ebx)
; X32ABI-NEXT:    movq %r9, 56(%ebx)
; X32ABI-NEXT:    testb %al, %al
; X32ABI-NEXT:    je .LBB3_2
; X32ABI-NEXT:  # %bb.1: # %entry
; X32ABI-NEXT:    movaps %xmm0, 64(%ebx)
; X32ABI-NEXT:    movaps %xmm1, 80(%ebx)
; X32ABI-NEXT:    movaps %xmm2, 96(%ebx)
; X32ABI-NEXT:    movaps %xmm3, 112(%ebx)
; X32ABI-NEXT:    movaps %xmm4, 128(%ebx)
; X32ABI-NEXT:    movaps %xmm5, 144(%ebx)
; X32ABI-NEXT:    movaps %xmm6, 160(%ebx)
; X32ABI-NEXT:    movaps %xmm7, 176(%ebx)
; X32ABI-NEXT:  .LBB3_2: # %entry
; X32ABI-NEXT:    leal 16(%rbx), %eax
; X32ABI-NEXT:    movl %eax, (%eax)
; X32ABI-NEXT:    leal 16(%rbp), %eax
; X32ABI-NEXT:    movl %eax, (%eax)
; X32ABI-NEXT:    movl $48, (%eax)
; X32ABI-NEXT:    movl $8, (%eax)
; X32ABI-NEXT:    xorl %eax, %eax
; X32ABI-NEXT:    pushq %rbx
; X32ABI-NEXT:    subl $24, %esp
; X32ABI-NEXT:    xorl %ebx, %ebx
; X32ABI-NEXT:    xorl %ecx, %ecx
; X32ABI-NEXT:    #APP
; X32ABI-NEXT:    #NO_APP
; X32ABI-NEXT:    addl $24, %esp
; X32ABI-NEXT:    popq %rbx
; X32ABI-NEXT:    leal -8(%ebp), %esp
; X32ABI-NEXT:    popq %rbx
; X32ABI-NEXT:    popq %rbp
; X32ABI-NEXT:    retq
entry:
  %0 = alloca i8, i64 poison, align 8
  call void @llvm.va_start(ptr nonnull poison)
  %1 = call { i64, i64, i64, i64, i64, i64 } asm sideeffect "", "={ax},={bx},={cx},={dx},={si},={di},{ax},{bx},{cx},{dx},{si},{di},~{memory},~{dirflag},~{fpsr},~{flags}"(i32 0, i32 0, i32 0, i16 undef, i64 undef, i64 undef)
  ret void
}

declare void @llvm.va_start(ptr)

attributes #0 = {"frame-pointer"="all"}
!llvm.module.flags = !{!0}
!0 = !{i32 2, !"override-stack-alignment", i32 32}