llvm/llvm/test/CodeGen/X86/clobber_base_ptr.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc < %s | FileCheck %s

target datalayout = "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32"
target triple = "i386-pc-windows-gnu"

; This function uses esi as base pointer, the inline asm clobbers esi, so we
; should save esi using esp before the inline asm, and restore esi after the
; inline asm.

define i32 @clober_bp() {
; CHECK-LABEL: clober_bp:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    pushl %ebp
; CHECK-NEXT:    .cfi_def_cfa_offset 8
; CHECK-NEXT:    .cfi_offset %ebp, -8
; CHECK-NEXT:    movl %esp, %ebp
; CHECK-NEXT:    .cfi_def_cfa_register %ebp
; CHECK-NEXT:    pushl %edi
; CHECK-NEXT:    pushl %esi
; CHECK-NEXT:    andl $-16, %esp
; CHECK-NEXT:    subl $16, %esp
; CHECK-NEXT:    movl %esp, %esi
; CHECK-NEXT:    .cfi_offset %esi, -16
; CHECK-NEXT:    .cfi_offset %edi, -12
; CHECK-NEXT:    movl $4, 12(%esi)
; CHECK-NEXT:    movl 12(%esi), %eax
; CHECK-NEXT:    addl $3, %eax
; CHECK-NEXT:    andl $-4, %eax
; CHECK-NEXT:    calll __alloca
; CHECK-NEXT:    movl %esp, %eax
; CHECK-NEXT:    andl $-16, %eax
; CHECK-NEXT:    movl %eax, %esp
; CHECK-NEXT:    movl $1, (%eax)
; CHECK-NEXT:    leal 8(%esi), %edi
; CHECK-NEXT:    movl $4, %ecx
; CHECK-NEXT:    pushl %esi
; CHECK-NEXT:    movl %eax, %esi
; CHECK-NEXT:    #APP
; CHECK-NEXT:    rep movsb (%esi), %es:(%edi)
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    popl %esi
; CHECK-NEXT:    movl 8(%esi), %eax
; CHECK-NEXT:    leal -8(%ebp), %esp
; CHECK-NEXT:    popl %esi
; CHECK-NEXT:    popl %edi
; CHECK-NEXT:    popl %ebp
; CHECK-NEXT:    retl
entry:
  %size = alloca i32, align 4
  %g = alloca i32, align 4
  store volatile i32 4, ptr %size, align 4
  %len = load volatile i32, ptr %size, align 4
  %var_array = alloca i8, i32 %len, align 16
  store i32 1, ptr %var_array, align 16
  %nil = call { ptr, ptr, i32 } asm "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %g, ptr %var_array, i32 4)
  %retval = load i32, ptr %g, align 4
  ret i32 %retval
}

; This function has the same code except the inline asm also clobbers
; frame pointer.

define i32 @clobber_bpfp() {
; CHECK-LABEL: clobber_bpfp:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    pushl %ebp
; CHECK-NEXT:    .cfi_def_cfa_offset 8
; CHECK-NEXT:    .cfi_offset %ebp, -8
; CHECK-NEXT:    movl %esp, %ebp
; CHECK-NEXT:    .cfi_def_cfa_register %ebp
; CHECK-NEXT:    pushl %edi
; CHECK-NEXT:    pushl %esi
; CHECK-NEXT:    andl $-16, %esp
; CHECK-NEXT:    subl $16, %esp
; CHECK-NEXT:    movl %esp, %esi
; CHECK-NEXT:    .cfi_offset %esi, -16
; CHECK-NEXT:    .cfi_offset %edi, -12
; CHECK-NEXT:    movl $4, 12(%esi)
; CHECK-NEXT:    movl 12(%esi), %eax
; CHECK-NEXT:    addl $3, %eax
; CHECK-NEXT:    andl $-4, %eax
; CHECK-NEXT:    calll __alloca
; CHECK-NEXT:    movl %esp, %eax
; CHECK-NEXT:    andl $-16, %eax
; CHECK-NEXT:    movl %eax, %esp
; CHECK-NEXT:    movl $1, (%eax)
; CHECK-NEXT:    leal 8(%esi), %edi
; CHECK-NEXT:    movl $4, %ecx
; CHECK-NEXT:    pushl %ebp
; CHECK-NEXT:    pushl %esi
; CHECK-NEXT:    .cfi_remember_state
; CHECK-NEXT:    .cfi_escape 0x0f, 0x06, 0x74, 0x04, 0x06, 0x11, 0x08, 0x22 #
; CHECK-NEXT:    movl %eax, %esi
; CHECK-NEXT:    #APP
; CHECK-NEXT:    rep movsb (%esi), %es:(%edi)
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    popl %esi
; CHECK-NEXT:    popl %ebp
; CHECK-NEXT:    .cfi_restore_state
; CHECK-NEXT:    movl 8(%esi), %eax
; CHECK-NEXT:    leal -8(%ebp), %esp
; CHECK-NEXT:    popl %esi
; CHECK-NEXT:    popl %edi
; CHECK-NEXT:    popl %ebp
; CHECK-NEXT:    retl
entry:
  %size = alloca i32, align 4
  %g = alloca i32, align 4
  store volatile i32 4, ptr %size, align 4
  %len = load volatile i32, ptr %size, align 4
  %var_array = alloca i8, i32 %len, align 16
  store i32 1, ptr %var_array, align 16
  %nil = call { ptr, ptr, i32 } asm "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags},~{ebp}"(ptr %g, ptr %var_array, i32 4)
  %retval = load i32, ptr %g, align 4
  ret i32 %retval
}