llvm/llvm/test/CodeGen/X86/apx/flags-copy-lowering.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+nf | FileCheck %s

define i32 @flag_copy_1(i32 %x, i32 %y, ptr %pz) nounwind {
; CHECK-LABEL: flag_copy_1:
; CHECK:       # %bb.0:
; CHECK-NEXT:    movq %rdx, %rcx
; CHECK-NEXT:    movl %edi, %eax
; CHECK-NEXT:    mull %esi
; CHECK-NEXT:    movl (%rcx), %ecx
; CHECK-NEXT:    {nf} addl %eax, %ecx
; CHECK-NEXT:    cmovol %ecx, %eax
; CHECK-NEXT:    retq
  %o = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
  %v1 = extractvalue { i32, i1 } %o, 1
  %v2 = extractvalue { i32, i1 } %o, 0
  %z = load i32, ptr %pz
  %a = add i32 %v2, %z
  %r = select i1 %v1, i32 %a, i32 %v2
  ret i32 %r
}

declare <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128>, <2 x i128>)

define <2 x i128> @flag_copy_2(<2 x i128> %x, <2 x i128> %y) nounwind {
; CHECK-LABEL: flag_copy_2:
; CHECK:       # %bb.0:
; CHECK-NEXT:    movq %rdi, %rax
; CHECK-NEXT:    subq {{[0-9]+}}(%rsp), %rcx
; CHECK-NEXT:    sbbq {{[0-9]+}}(%rsp), %r8
; CHECK-NEXT:    movq %r8, %rdi
; CHECK-NEXT:    {nf} sarq $63, %rdi
; CHECK-NEXT:    cmovoq %rdi, %rcx
; CHECK-NEXT:    movabsq $-9223372036854775808, %r9 # imm = 0x8000000000000000
; CHECK-NEXT:    {nf} xorq %r9, %rdi
; CHECK-NEXT:    cmovnoq %r8, %rdi
; CHECK-NEXT:    subq {{[0-9]+}}(%rsp), %rsi
; CHECK-NEXT:    sbbq {{[0-9]+}}(%rsp), %rdx
; CHECK-NEXT:    movq %rdx, %r8
; CHECK-NEXT:    {nf} sarq $63, %r8
; CHECK-NEXT:    cmovoq %r8, %rsi
; CHECK-NEXT:    {nf} xorq %r9, %r8
; CHECK-NEXT:    cmovnoq %rdx, %r8
; CHECK-NEXT:    movq %rcx, 16(%rax)
; CHECK-NEXT:    movq %rsi, (%rax)
; CHECK-NEXT:    movq %rdi, 24(%rax)
; CHECK-NEXT:    movq %r8, 8(%rax)
; CHECK-NEXT:    retq
  %z = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %x, <2 x i128> %y)
  ret <2 x i128> %z
}

; TODO: Remove the 2nd cmpl by using NF imul.
define void @flag_copy_3(i32 %x, i32 %y, ptr %pa, ptr %pb, ptr %pc) nounwind {
; CHECK-LABEL: flag_copy_3:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    # kill: def $esi killed $esi def $rsi
; CHECK-NEXT:    cmpl $2, %edi
; CHECK-NEXT:    jl .LBB2_2
; CHECK-NEXT:  # %bb.1: # %bb1
; CHECK-NEXT:    movl %edi, %eax
; CHECK-NEXT:    imull %esi, %eax
; CHECK-NEXT:    movl %eax, (%rdx)
; CHECK-NEXT:    jmp .LBB2_3
; CHECK-NEXT:  .LBB2_2: # %bb2
; CHECK-NEXT:    leal -2(%rsi), %eax
; CHECK-NEXT:    movl %eax, (%rcx)
; CHECK-NEXT:  .LBB2_3: # %bb3
; CHECK-NEXT:    cmpl $2, %edi
; CHECK-NEXT:    cmovgel %edi, %esi
; CHECK-NEXT:    movl %esi, (%r8)
; CHECK-NEXT:    retq
entry:
  %cmp = icmp sgt i32 %x, 1
  br i1 %cmp, label %bb1, label %bb2
bb1:
  %add = mul nuw nsw i32 %x, %y
  store i32 %add, ptr %pa
  br label %bb3

bb2:
  %sub = sub nuw nsw i32 %y, 2
  store i32 %sub, ptr %pb
  br label %bb3

bb3:
  %s = select i1 %cmp, i32 %x, i32 %y
  store i32 %s, ptr %pc
  ret void
}