llvm/llvm/test/CodeGen/X86/funnel-shift-logic-fold.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64

declare i64 @llvm.fshl.i64(i64, i64, i64) nounwind readnone
declare i64 @llvm.fshr.i64(i64, i64, i64) nounwind readnone

define i64 @hoist_fshl_from_or(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
; X64-LABEL: hoist_fshl_from_or:
; X64:       # %bb.0:
; X64-NEXT:    movq %rdi, %rax
; X64-NEXT:    orq %rcx, %rsi
; X64-NEXT:    orq %rdx, %rax
; X64-NEXT:    movl %r8d, %ecx
; X64-NEXT:    shldq %cl, %rsi, %rax
; X64-NEXT:    retq
  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s)
  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s)
  %res = or i64 %fshl.0, %fshl.1
  ret i64 %res
}

define i64 @hoist_fshl_from_and(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
; X64-LABEL: hoist_fshl_from_and:
; X64:       # %bb.0:
; X64-NEXT:    movq %rdi, %rax
; X64-NEXT:    andq %rcx, %rsi
; X64-NEXT:    andq %rdx, %rax
; X64-NEXT:    movl %r8d, %ecx
; X64-NEXT:    shldq %cl, %rsi, %rax
; X64-NEXT:    retq
  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s)
  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s)
  %res = and i64 %fshl.0, %fshl.1
  ret i64 %res
}

define i64 @hoist_fshl_from_xor(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
; X64-LABEL: hoist_fshl_from_xor:
; X64:       # %bb.0:
; X64-NEXT:    movq %rdi, %rax
; X64-NEXT:    xorq %rcx, %rsi
; X64-NEXT:    xorq %rdx, %rax
; X64-NEXT:    movl %r8d, %ecx
; X64-NEXT:    shldq %cl, %rsi, %rax
; X64-NEXT:    retq
  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s)
  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s)
  %res = xor i64 %fshl.0, %fshl.1
  ret i64 %res
}

define i64 @fshl_or_with_different_shift_value(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; X64-LABEL: fshl_or_with_different_shift_value:
; X64:       # %bb.0:
; X64-NEXT:    movq %rdx, %rax
; X64-NEXT:    shldq $12, %rsi, %rdi
; X64-NEXT:    shldq $13, %rcx, %rax
; X64-NEXT:    orq %rdi, %rax
; X64-NEXT:    retq
  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 12)
  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 13)
  %res = or i64 %fshl.0, %fshl.1
  ret i64 %res
}

define i64 @hoist_fshl_from_or_const_shift(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; X64-LABEL: hoist_fshl_from_or_const_shift:
; X64:       # %bb.0:
; X64-NEXT:    movq %rdi, %rax
; X64-NEXT:    orq %rcx, %rsi
; X64-NEXT:    orq %rdx, %rax
; X64-NEXT:    shldq $15, %rsi, %rax
; X64-NEXT:    retq
  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 15)
  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 15)
  %res = or i64 %fshl.0, %fshl.1
  ret i64 %res
}

define i64 @hoist_fshr_from_or(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
; X64-LABEL: hoist_fshr_from_or:
; X64:       # %bb.0:
; X64-NEXT:    movq %rsi, %rax
; X64-NEXT:    orq %rdx, %rdi
; X64-NEXT:    orq %rcx, %rax
; X64-NEXT:    movl %r8d, %ecx
; X64-NEXT:    shrdq %cl, %rdi, %rax
; X64-NEXT:    retq
  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s)
  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s)
  %res = or i64 %fshr.0, %fshr.1
  ret i64 %res
}

define i64 @hoist_fshr_from_and(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
; X64-LABEL: hoist_fshr_from_and:
; X64:       # %bb.0:
; X64-NEXT:    movq %rsi, %rax
; X64-NEXT:    andq %rdx, %rdi
; X64-NEXT:    andq %rcx, %rax
; X64-NEXT:    movl %r8d, %ecx
; X64-NEXT:    shrdq %cl, %rdi, %rax
; X64-NEXT:    retq
  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s)
  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s)
  %res = and i64 %fshr.0, %fshr.1
  ret i64 %res
}

define i64 @hoist_fshr_from_xor(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
; X64-LABEL: hoist_fshr_from_xor:
; X64:       # %bb.0:
; X64-NEXT:    movq %rsi, %rax
; X64-NEXT:    xorq %rdx, %rdi
; X64-NEXT:    xorq %rcx, %rax
; X64-NEXT:    movl %r8d, %ecx
; X64-NEXT:    shrdq %cl, %rdi, %rax
; X64-NEXT:    retq
  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s)
  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s)
  %res = xor i64 %fshr.0, %fshr.1
  ret i64 %res
}

define i64 @fshr_or_with_different_shift_value(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; X64-LABEL: fshr_or_with_different_shift_value:
; X64:       # %bb.0:
; X64-NEXT:    movq %rdx, %rax
; X64-NEXT:    shldq $52, %rsi, %rdi
; X64-NEXT:    shldq $51, %rcx, %rax
; X64-NEXT:    orq %rdi, %rax
; X64-NEXT:    retq
  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 12)
  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 13)
  %res = or i64 %fshr.0, %fshr.1
  ret i64 %res
}

define i64 @hoist_fshr_from_or_const_shift(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; X64-LABEL: hoist_fshr_from_or_const_shift:
; X64:       # %bb.0:
; X64-NEXT:    movq %rdi, %rax
; X64-NEXT:    orq %rcx, %rsi
; X64-NEXT:    orl %edx, %eax
; X64-NEXT:    shldq $49, %rsi, %rax
; X64-NEXT:    retq
  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 15)
  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 15)
  %res = or i64 %fshr.0, %fshr.1
  ret i64 %res
}