llvm/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple i686-pc-linux < %s | FileCheck %s --check-prefix=CHECK-32
; RUN: llc -mtriple x86_64-pc-linux < %s | FileCheck %s --check-prefix=CHECK-64

define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
; CHECK-32-LABEL: atomicrmw_usub_cond_i8:
; CHECK-32:       # %bb.0:
; CHECK-32-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
; CHECK-32-NEXT:    movzbl (%edx), %eax
; CHECK-32-NEXT:    jmp .LBB0_1
; CHECK-32-NEXT:    .p2align 4
; CHECK-32-NEXT:  .LBB0_3: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB0_1 Depth=1
; CHECK-32-NEXT:    lock cmpxchgb %ah, (%edx)
; CHECK-32-NEXT:    je .LBB0_4
; CHECK-32-NEXT:  .LBB0_1: # %atomicrmw.start
; CHECK-32-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-32-NEXT:    movb %al, %ah
; CHECK-32-NEXT:    subb %cl, %ah
; CHECK-32-NEXT:    jae .LBB0_3
; CHECK-32-NEXT:  # %bb.2: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB0_1 Depth=1
; CHECK-32-NEXT:    movb %al, %ah
; CHECK-32-NEXT:    jmp .LBB0_3
; CHECK-32-NEXT:  .LBB0_4: # %atomicrmw.end
; CHECK-32-NEXT:    retl
;
; CHECK-64-LABEL: atomicrmw_usub_cond_i8:
; CHECK-64:       # %bb.0:
; CHECK-64-NEXT:    movzbl (%rdi), %eax
; CHECK-64-NEXT:    .p2align 4
; CHECK-64-NEXT:  .LBB0_1: # %atomicrmw.start
; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-64-NEXT:    movzbl %al, %ecx
; CHECK-64-NEXT:    subb %sil, %al
; CHECK-64-NEXT:    movzbl %al, %edx
; CHECK-64-NEXT:    cmovbl %ecx, %edx
; CHECK-64-NEXT:    movl %ecx, %eax
; CHECK-64-NEXT:    lock cmpxchgb %dl, (%rdi)
; CHECK-64-NEXT:    jne .LBB0_1
; CHECK-64-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-64-NEXT:    retq
  %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst
  ret i8 %result
}

define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
; CHECK-32-LABEL: atomicrmw_usub_cond_i16:
; CHECK-32:       # %bb.0:
; CHECK-32-NEXT:    pushl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    .cfi_offset %esi, -8
; CHECK-32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
; CHECK-32-NEXT:    movzwl (%edx), %eax
; CHECK-32-NEXT:    jmp .LBB1_1
; CHECK-32-NEXT:    .p2align 4
; CHECK-32-NEXT:  .LBB1_3: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB1_1 Depth=1
; CHECK-32-NEXT:    # kill: def $ax killed $ax killed $eax
; CHECK-32-NEXT:    lock cmpxchgw %si, (%edx)
; CHECK-32-NEXT:    # kill: def $ax killed $ax def $eax
; CHECK-32-NEXT:    je .LBB1_4
; CHECK-32-NEXT:  .LBB1_1: # %atomicrmw.start
; CHECK-32-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-32-NEXT:    movl %eax, %esi
; CHECK-32-NEXT:    subw %cx, %si
; CHECK-32-NEXT:    jae .LBB1_3
; CHECK-32-NEXT:  # %bb.2: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB1_1 Depth=1
; CHECK-32-NEXT:    movl %eax, %esi
; CHECK-32-NEXT:    jmp .LBB1_3
; CHECK-32-NEXT:  .LBB1_4: # %atomicrmw.end
; CHECK-32-NEXT:    # kill: def $ax killed $ax killed $eax
; CHECK-32-NEXT:    popl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 4
; CHECK-32-NEXT:    retl
;
; CHECK-64-LABEL: atomicrmw_usub_cond_i16:
; CHECK-64:       # %bb.0:
; CHECK-64-NEXT:    movzwl (%rdi), %eax
; CHECK-64-NEXT:    .p2align 4
; CHECK-64-NEXT:  .LBB1_1: # %atomicrmw.start
; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-64-NEXT:    movl %eax, %ecx
; CHECK-64-NEXT:    subw %si, %cx
; CHECK-64-NEXT:    cmovbl %eax, %ecx
; CHECK-64-NEXT:    # kill: def $ax killed $ax killed $eax
; CHECK-64-NEXT:    lock cmpxchgw %cx, (%rdi)
; CHECK-64-NEXT:    # kill: def $ax killed $ax def $eax
; CHECK-64-NEXT:    jne .LBB1_1
; CHECK-64-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-64-NEXT:    # kill: def $ax killed $ax killed $eax
; CHECK-64-NEXT:    retq
  %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst
  ret i16 %result
}

define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
; CHECK-32-LABEL: atomicrmw_usub_cond_i32:
; CHECK-32:       # %bb.0:
; CHECK-32-NEXT:    pushl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    .cfi_offset %esi, -8
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
; CHECK-32-NEXT:    movl (%edx), %eax
; CHECK-32-NEXT:    jmp .LBB2_1
; CHECK-32-NEXT:    .p2align 4
; CHECK-32-NEXT:  .LBB2_3: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB2_1 Depth=1
; CHECK-32-NEXT:    lock cmpxchgl %esi, (%edx)
; CHECK-32-NEXT:    je .LBB2_4
; CHECK-32-NEXT:  .LBB2_1: # %atomicrmw.start
; CHECK-32-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-32-NEXT:    movl %eax, %esi
; CHECK-32-NEXT:    subl %ecx, %esi
; CHECK-32-NEXT:    jae .LBB2_3
; CHECK-32-NEXT:  # %bb.2: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB2_1 Depth=1
; CHECK-32-NEXT:    movl %eax, %esi
; CHECK-32-NEXT:    jmp .LBB2_3
; CHECK-32-NEXT:  .LBB2_4: # %atomicrmw.end
; CHECK-32-NEXT:    popl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 4
; CHECK-32-NEXT:    retl
;
; CHECK-64-LABEL: atomicrmw_usub_cond_i32:
; CHECK-64:       # %bb.0:
; CHECK-64-NEXT:    movl (%rdi), %eax
; CHECK-64-NEXT:    .p2align 4
; CHECK-64-NEXT:  .LBB2_1: # %atomicrmw.start
; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-64-NEXT:    movl %eax, %ecx
; CHECK-64-NEXT:    subl %esi, %ecx
; CHECK-64-NEXT:    cmovbl %eax, %ecx
; CHECK-64-NEXT:    lock cmpxchgl %ecx, (%rdi)
; CHECK-64-NEXT:    jne .LBB2_1
; CHECK-64-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-64-NEXT:    retq
  %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst
  ret i32 %result
}

define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
; CHECK-32-LABEL: atomicrmw_usub_cond_i64:
; CHECK-32:       # %bb.0:
; CHECK-32-NEXT:    pushl %ebp
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    pushl %ebx
; CHECK-32-NEXT:    .cfi_def_cfa_offset 12
; CHECK-32-NEXT:    pushl %edi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 16
; CHECK-32-NEXT:    pushl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 20
; CHECK-32-NEXT:    .cfi_offset %esi, -20
; CHECK-32-NEXT:    .cfi_offset %edi, -16
; CHECK-32-NEXT:    .cfi_offset %ebx, -12
; CHECK-32-NEXT:    .cfi_offset %ebp, -8
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %esi
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edi
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %ebp
; CHECK-32-NEXT:    movl (%ebp), %eax
; CHECK-32-NEXT:    movl 4(%ebp), %edx
; CHECK-32-NEXT:    jmp .LBB3_1
; CHECK-32-NEXT:    .p2align 4
; CHECK-32-NEXT:  .LBB3_3: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB3_1 Depth=1
; CHECK-32-NEXT:    lock cmpxchg8b (%ebp)
; CHECK-32-NEXT:    je .LBB3_4
; CHECK-32-NEXT:  .LBB3_1: # %atomicrmw.start
; CHECK-32-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-32-NEXT:    movl %eax, %ebx
; CHECK-32-NEXT:    subl %edi, %ebx
; CHECK-32-NEXT:    movl %edx, %ecx
; CHECK-32-NEXT:    sbbl %esi, %ecx
; CHECK-32-NEXT:    jae .LBB3_3
; CHECK-32-NEXT:  # %bb.2: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB3_1 Depth=1
; CHECK-32-NEXT:    movl %edx, %ecx
; CHECK-32-NEXT:    movl %eax, %ebx
; CHECK-32-NEXT:    jmp .LBB3_3
; CHECK-32-NEXT:  .LBB3_4: # %atomicrmw.end
; CHECK-32-NEXT:    popl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 16
; CHECK-32-NEXT:    popl %edi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 12
; CHECK-32-NEXT:    popl %ebx
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    popl %ebp
; CHECK-32-NEXT:    .cfi_def_cfa_offset 4
; CHECK-32-NEXT:    retl
;
; CHECK-64-LABEL: atomicrmw_usub_cond_i64:
; CHECK-64:       # %bb.0:
; CHECK-64-NEXT:    movq (%rdi), %rax
; CHECK-64-NEXT:    .p2align 4
; CHECK-64-NEXT:  .LBB3_1: # %atomicrmw.start
; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-64-NEXT:    movq %rax, %rcx
; CHECK-64-NEXT:    subq %rsi, %rcx
; CHECK-64-NEXT:    cmovbq %rax, %rcx
; CHECK-64-NEXT:    lock cmpxchgq %rcx, (%rdi)
; CHECK-64-NEXT:    jne .LBB3_1
; CHECK-64-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-64-NEXT:    retq
  %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst
  ret i64 %result
}

define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
; CHECK-32-LABEL: atomicrmw_usub_sat_i8:
; CHECK-32:       # %bb.0:
; CHECK-32-NEXT:    pushl %ebx
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    .cfi_offset %ebx, -8
; CHECK-32-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
; CHECK-32-NEXT:    movzbl (%edx), %eax
; CHECK-32-NEXT:    jmp .LBB4_1
; CHECK-32-NEXT:    .p2align 4
; CHECK-32-NEXT:  .LBB4_3: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB4_1 Depth=1
; CHECK-32-NEXT:    lock cmpxchgb %bl, (%edx)
; CHECK-32-NEXT:    je .LBB4_4
; CHECK-32-NEXT:  .LBB4_1: # %atomicrmw.start
; CHECK-32-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-32-NEXT:    movl %eax, %ebx
; CHECK-32-NEXT:    subb %cl, %bl
; CHECK-32-NEXT:    jae .LBB4_3
; CHECK-32-NEXT:  # %bb.2: # in Loop: Header=BB4_1 Depth=1
; CHECK-32-NEXT:    xorl %ebx, %ebx
; CHECK-32-NEXT:    jmp .LBB4_3
; CHECK-32-NEXT:  .LBB4_4: # %atomicrmw.end
; CHECK-32-NEXT:    popl %ebx
; CHECK-32-NEXT:    .cfi_def_cfa_offset 4
; CHECK-32-NEXT:    retl
;
; CHECK-64-LABEL: atomicrmw_usub_sat_i8:
; CHECK-64:       # %bb.0:
; CHECK-64-NEXT:    movzbl (%rdi), %eax
; CHECK-64-NEXT:    xorl %ecx, %ecx
; CHECK-64-NEXT:    .p2align 4
; CHECK-64-NEXT:  .LBB4_1: # %atomicrmw.start
; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-64-NEXT:    movl %eax, %edx
; CHECK-64-NEXT:    subb %sil, %dl
; CHECK-64-NEXT:    movzbl %dl, %edx
; CHECK-64-NEXT:    cmovbl %ecx, %edx
; CHECK-64-NEXT:    lock cmpxchgb %dl, (%rdi)
; CHECK-64-NEXT:    jne .LBB4_1
; CHECK-64-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-64-NEXT:    retq
  %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst
  ret i8 %result
}

define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
; CHECK-32-LABEL: atomicrmw_usub_sat_i16:
; CHECK-32:       # %bb.0:
; CHECK-32-NEXT:    pushl %edi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    pushl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 12
; CHECK-32-NEXT:    .cfi_offset %esi, -12
; CHECK-32-NEXT:    .cfi_offset %edi, -8
; CHECK-32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
; CHECK-32-NEXT:    movzwl (%edx), %eax
; CHECK-32-NEXT:    jmp .LBB5_1
; CHECK-32-NEXT:    .p2align 4
; CHECK-32-NEXT:  .LBB5_3: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB5_1 Depth=1
; CHECK-32-NEXT:    lock cmpxchgw %si, (%edx)
; CHECK-32-NEXT:    je .LBB5_4
; CHECK-32-NEXT:  .LBB5_1: # %atomicrmw.start
; CHECK-32-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-32-NEXT:    xorl %esi, %esi
; CHECK-32-NEXT:    movl %eax, %edi
; CHECK-32-NEXT:    subw %cx, %di
; CHECK-32-NEXT:    jb .LBB5_3
; CHECK-32-NEXT:  # %bb.2: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB5_1 Depth=1
; CHECK-32-NEXT:    movl %edi, %esi
; CHECK-32-NEXT:    jmp .LBB5_3
; CHECK-32-NEXT:  .LBB5_4: # %atomicrmw.end
; CHECK-32-NEXT:    popl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    popl %edi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 4
; CHECK-32-NEXT:    retl
;
; CHECK-64-LABEL: atomicrmw_usub_sat_i16:
; CHECK-64:       # %bb.0:
; CHECK-64-NEXT:    movzwl (%rdi), %eax
; CHECK-64-NEXT:    xorl %ecx, %ecx
; CHECK-64-NEXT:    .p2align 4
; CHECK-64-NEXT:  .LBB5_1: # %atomicrmw.start
; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-64-NEXT:    movl %eax, %edx
; CHECK-64-NEXT:    subw %si, %dx
; CHECK-64-NEXT:    cmovbl %ecx, %edx
; CHECK-64-NEXT:    lock cmpxchgw %dx, (%rdi)
; CHECK-64-NEXT:    jne .LBB5_1
; CHECK-64-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-64-NEXT:    retq
  %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst
  ret i16 %result
}

define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
; CHECK-32-LABEL: atomicrmw_usub_sat_i32:
; CHECK-32:       # %bb.0:
; CHECK-32-NEXT:    pushl %edi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    pushl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 12
; CHECK-32-NEXT:    .cfi_offset %esi, -12
; CHECK-32-NEXT:    .cfi_offset %edi, -8
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
; CHECK-32-NEXT:    movl (%edx), %eax
; CHECK-32-NEXT:    jmp .LBB6_1
; CHECK-32-NEXT:    .p2align 4
; CHECK-32-NEXT:  .LBB6_3: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB6_1 Depth=1
; CHECK-32-NEXT:    lock cmpxchgl %esi, (%edx)
; CHECK-32-NEXT:    je .LBB6_4
; CHECK-32-NEXT:  .LBB6_1: # %atomicrmw.start
; CHECK-32-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-32-NEXT:    xorl %esi, %esi
; CHECK-32-NEXT:    movl %eax, %edi
; CHECK-32-NEXT:    subl %ecx, %edi
; CHECK-32-NEXT:    jb .LBB6_3
; CHECK-32-NEXT:  # %bb.2: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB6_1 Depth=1
; CHECK-32-NEXT:    movl %edi, %esi
; CHECK-32-NEXT:    jmp .LBB6_3
; CHECK-32-NEXT:  .LBB6_4: # %atomicrmw.end
; CHECK-32-NEXT:    popl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    popl %edi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 4
; CHECK-32-NEXT:    retl
;
; CHECK-64-LABEL: atomicrmw_usub_sat_i32:
; CHECK-64:       # %bb.0:
; CHECK-64-NEXT:    movl (%rdi), %eax
; CHECK-64-NEXT:    xorl %ecx, %ecx
; CHECK-64-NEXT:    .p2align 4
; CHECK-64-NEXT:  .LBB6_1: # %atomicrmw.start
; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-64-NEXT:    movl %eax, %edx
; CHECK-64-NEXT:    subl %esi, %edx
; CHECK-64-NEXT:    cmovbl %ecx, %edx
; CHECK-64-NEXT:    lock cmpxchgl %edx, (%rdi)
; CHECK-64-NEXT:    jne .LBB6_1
; CHECK-64-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-64-NEXT:    retq
  %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst
  ret i32 %result
}

define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
; CHECK-32-LABEL: atomicrmw_usub_sat_i64:
; CHECK-32:       # %bb.0:
; CHECK-32-NEXT:    pushl %ebp
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    pushl %ebx
; CHECK-32-NEXT:    .cfi_def_cfa_offset 12
; CHECK-32-NEXT:    pushl %edi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 16
; CHECK-32-NEXT:    pushl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 20
; CHECK-32-NEXT:    .cfi_offset %esi, -20
; CHECK-32-NEXT:    .cfi_offset %edi, -16
; CHECK-32-NEXT:    .cfi_offset %ebx, -12
; CHECK-32-NEXT:    .cfi_offset %ebp, -8
; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %ebp
; CHECK-32-NEXT:    movl (%ebp), %esi
; CHECK-32-NEXT:    movl 4(%ebp), %edi
; CHECK-32-NEXT:    jmp .LBB7_1
; CHECK-32-NEXT:    .p2align 4
; CHECK-32-NEXT:  .LBB7_3: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB7_1 Depth=1
; CHECK-32-NEXT:    movl %esi, %eax
; CHECK-32-NEXT:    movl %edi, %edx
; CHECK-32-NEXT:    lock cmpxchg8b (%ebp)
; CHECK-32-NEXT:    movl %eax, %esi
; CHECK-32-NEXT:    movl %edx, %edi
; CHECK-32-NEXT:    je .LBB7_4
; CHECK-32-NEXT:  .LBB7_1: # %atomicrmw.start
; CHECK-32-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-32-NEXT:    xorl %ecx, %ecx
; CHECK-32-NEXT:    movl %esi, %eax
; CHECK-32-NEXT:    subl {{[0-9]+}}(%esp), %eax
; CHECK-32-NEXT:    movl %edi, %edx
; CHECK-32-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
; CHECK-32-NEXT:    movl $0, %ebx
; CHECK-32-NEXT:    jb .LBB7_3
; CHECK-32-NEXT:  # %bb.2: # %atomicrmw.start
; CHECK-32-NEXT:    # in Loop: Header=BB7_1 Depth=1
; CHECK-32-NEXT:    movl %edx, %ecx
; CHECK-32-NEXT:    movl %eax, %ebx
; CHECK-32-NEXT:    jmp .LBB7_3
; CHECK-32-NEXT:  .LBB7_4: # %atomicrmw.end
; CHECK-32-NEXT:    movl %esi, %eax
; CHECK-32-NEXT:    movl %edi, %edx
; CHECK-32-NEXT:    popl %esi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 16
; CHECK-32-NEXT:    popl %edi
; CHECK-32-NEXT:    .cfi_def_cfa_offset 12
; CHECK-32-NEXT:    popl %ebx
; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
; CHECK-32-NEXT:    popl %ebp
; CHECK-32-NEXT:    .cfi_def_cfa_offset 4
; CHECK-32-NEXT:    retl
;
; CHECK-64-LABEL: atomicrmw_usub_sat_i64:
; CHECK-64:       # %bb.0:
; CHECK-64-NEXT:    movq (%rdi), %rax
; CHECK-64-NEXT:    xorl %ecx, %ecx
; CHECK-64-NEXT:    .p2align 4
; CHECK-64-NEXT:  .LBB7_1: # %atomicrmw.start
; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-64-NEXT:    movq %rax, %rdx
; CHECK-64-NEXT:    subq %rsi, %rdx
; CHECK-64-NEXT:    cmovbq %rcx, %rdx
; CHECK-64-NEXT:    lock cmpxchgq %rdx, (%rdi)
; CHECK-64-NEXT:    jne .LBB7_1
; CHECK-64-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-64-NEXT:    retq
  %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst
  ret i64 %result
}