llvm/llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X86
; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X64

declare i64 @llvm.abs.i64(i64, i1)
declare <2 x i64> @llvm.abs.2xi64(<2 x i64>, i1)
declare i32 @llvm.abs.i32(i32, i1)
declare i16 @llvm.abs.i16(i16, i1)
declare i8 @llvm.abs.i8(i8, i1)

define i1 @eq_pow_or(i32 %0) nounwind {
; X86-LABEL: eq_pow_or:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    addl $32, %eax
; X86-NEXT:    testl $-65, %eax
; X86-NEXT:    sete %al
; X86-NEXT:    retl
;
; X64-LABEL: eq_pow_or:
; X64:       # %bb.0:
; X64-NEXT:    addl $32, %edi
; X64-NEXT:    testl $-65, %edi
; X64-NEXT:    sete %al
; X64-NEXT:    retq
  %2 = icmp eq i32 %0, 32
  %3 = icmp eq i32 %0, -32
  %4 = or i1 %2, %3
  ret i1 %4
}

define i1 @ne_pow_and(i8 %0) nounwind {
; X86-LABEL: ne_pow_and:
; X86:       # %bb.0:
; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    addb $16, %al
; X86-NEXT:    testb $-33, %al
; X86-NEXT:    setne %al
; X86-NEXT:    retl
;
; X64-LABEL: ne_pow_and:
; X64:       # %bb.0:
; X64-NEXT:    addb $16, %dil
; X64-NEXT:    testb $-33, %dil
; X64-NEXT:    setne %al
; X64-NEXT:    retq
  %2 = icmp ne i8 %0, 16
  %3 = icmp ne i8 %0, -16
  %4 = and i1 %2, %3
  ret i1 %4
}

define i1 @eq_pow_mismatch_or(i32 %0) nounwind {
; X86-LABEL: eq_pow_mismatch_or:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    cmpl $16, %eax
; X86-NEXT:    sete %cl
; X86-NEXT:    cmpl $-32, %eax
; X86-NEXT:    sete %al
; X86-NEXT:    orb %cl, %al
; X86-NEXT:    retl
;
; X64-LABEL: eq_pow_mismatch_or:
; X64:       # %bb.0:
; X64-NEXT:    cmpl $16, %edi
; X64-NEXT:    sete %cl
; X64-NEXT:    cmpl $-32, %edi
; X64-NEXT:    sete %al
; X64-NEXT:    orb %cl, %al
; X64-NEXT:    retq
  %2 = icmp eq i32 %0, 16
  %3 = icmp eq i32 %0, -32
  %4 = or i1 %2, %3
  ret i1 %4
}

define i1 @ne_non_pow_and(i8 %0) nounwind {
; X86-LABEL: ne_non_pow_and:
; X86:       # %bb.0:
; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    cmpb $17, %al
; X86-NEXT:    setne %cl
; X86-NEXT:    cmpb $-17, %al
; X86-NEXT:    setne %al
; X86-NEXT:    andb %cl, %al
; X86-NEXT:    retl
;
; X64-LABEL: ne_non_pow_and:
; X64:       # %bb.0:
; X64-NEXT:    cmpb $17, %dil
; X64-NEXT:    setne %cl
; X64-NEXT:    cmpb $-17, %dil
; X64-NEXT:    setne %al
; X64-NEXT:    andb %cl, %al
; X64-NEXT:    retq
  %2 = icmp ne i8 %0, 17
  %3 = icmp ne i8 %0, -17
  %4 = and i1 %2, %3
  ret i1 %4
}

define i1 @ne_pow_or(i32 %0) nounwind {
; X86-LABEL: ne_pow_or:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl %eax, %ecx
; X86-NEXT:    xorl $32, %ecx
; X86-NEXT:    xorl $-32, %eax
; X86-NEXT:    orl %ecx, %eax
; X86-NEXT:    setne %al
; X86-NEXT:    retl
;
; X64-LABEL: ne_pow_or:
; X64:       # %bb.0:
; X64-NEXT:    movl %edi, %eax
; X64-NEXT:    xorl $32, %eax
; X64-NEXT:    xorl $-32, %edi
; X64-NEXT:    orl %eax, %edi
; X64-NEXT:    setne %al
; X64-NEXT:    retq
  %2 = icmp ne i32 %0, 32
  %3 = icmp ne i32 %0, -32
  %4 = or i1 %2, %3
  ret i1 %4
}

define i1 @eq_pow_and(i8 %0) nounwind {
; X86-LABEL: eq_pow_and:
; X86:       # %bb.0:
; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl %eax, %ecx
; X86-NEXT:    xorb $16, %cl
; X86-NEXT:    xorb $-16, %al
; X86-NEXT:    orb %cl, %al
; X86-NEXT:    sete %al
; X86-NEXT:    retl
;
; X64-LABEL: eq_pow_and:
; X64:       # %bb.0:
; X64-NEXT:    movl %edi, %eax
; X64-NEXT:    xorb $16, %al
; X64-NEXT:    xorb $-16, %dil
; X64-NEXT:    orb %al, %dil
; X64-NEXT:    sete %al
; X64-NEXT:    retq
  %2 = icmp eq i8 %0, 16
  %3 = icmp eq i8 %0, -16
  %4 = and i1 %2, %3
  ret i1 %4
}

define i1 @abs_eq_pow2(i32 %0) nounwind {
; X86-LABEL: abs_eq_pow2:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    addl $4, %eax
; X86-NEXT:    testl $-9, %eax
; X86-NEXT:    sete %al
; X86-NEXT:    retl
;
; X64-LABEL: abs_eq_pow2:
; X64:       # %bb.0:
; X64-NEXT:    addl $4, %edi
; X64-NEXT:    testl $-9, %edi
; X64-NEXT:    sete %al
; X64-NEXT:    retq
  %2 = tail call i32 @llvm.abs.i32(i32 %0, i1 true)
  %3 = icmp eq i32 %2, 4
  ret i1 %3
}

define i1 @abs_ne_pow2(i64 %0) nounwind {
; X86-LABEL: abs_ne_pow2:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    addl $2, %eax
; X86-NEXT:    adcl $0, %ecx
; X86-NEXT:    andl $-5, %eax
; X86-NEXT:    orl %ecx, %eax
; X86-NEXT:    setne %al
; X86-NEXT:    retl
;
; X64-LABEL: abs_ne_pow2:
; X64:       # %bb.0:
; X64-NEXT:    addq $2, %rdi
; X64-NEXT:    testq $-5, %rdi
; X64-NEXT:    setne %al
; X64-NEXT:    retq
  %2 = tail call i64 @llvm.abs.i64(i64 %0, i1 true)
  %3 = icmp ne i64 %2, 2
  ret i1 %3
}

define i1 @abs_ne_nonpow2(i16 %0) nounwind {
; X86-LABEL: abs_ne_nonpow2:
; X86:       # %bb.0:
; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movswl %ax, %ecx
; X86-NEXT:    sarl $15, %ecx
; X86-NEXT:    xorl %ecx, %eax
; X86-NEXT:    subl %ecx, %eax
; X86-NEXT:    movzwl %ax, %eax
; X86-NEXT:    cmpl $57344, %eax # imm = 0xE000
; X86-NEXT:    setne %al
; X86-NEXT:    retl
;
; X64-LABEL: abs_ne_nonpow2:
; X64:       # %bb.0:
; X64-NEXT:    movl %edi, %eax
; X64-NEXT:    negw %ax
; X64-NEXT:    cmovsw %di, %ax
; X64-NEXT:    movzwl %ax, %eax
; X64-NEXT:    cmpl $57344, %eax # imm = 0xE000
; X64-NEXT:    setne %al
; X64-NEXT:    retq
  %2 = tail call i16 @llvm.abs.i16(i16 %0, i1 true)
  %3 = icmp ne i16 %2, -8192
  ret i1 %3
}

define <2 x i1> @abs_ne_vec(<2 x i64> %0) nounwind {
; X86-LABEL: abs_ne_vec:
; X86:       # %bb.0:
; X86-NEXT:    pushl %edi
; X86-NEXT:    pushl %esi
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    movl %ecx, %esi
; X86-NEXT:    sarl $31, %esi
; X86-NEXT:    xorl %esi, %ecx
; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
; X86-NEXT:    xorl %esi, %edx
; X86-NEXT:    subl %esi, %edx
; X86-NEXT:    sbbl %esi, %ecx
; X86-NEXT:    movl %eax, %esi
; X86-NEXT:    sarl $31, %esi
; X86-NEXT:    xorl %esi, %eax
; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
; X86-NEXT:    xorl %esi, %edi
; X86-NEXT:    subl %esi, %edi
; X86-NEXT:    sbbl %esi, %eax
; X86-NEXT:    xorl $8, %edi
; X86-NEXT:    orl %eax, %edi
; X86-NEXT:    setne %al
; X86-NEXT:    xorl $8, %edx
; X86-NEXT:    orl %ecx, %edx
; X86-NEXT:    setne %dl
; X86-NEXT:    popl %esi
; X86-NEXT:    popl %edi
; X86-NEXT:    retl
;
; X64-LABEL: abs_ne_vec:
; X64:       # %bb.0:
; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; X64-NEXT:    psrad $31, %xmm1
; X64-NEXT:    pxor %xmm1, %xmm0
; X64-NEXT:    psubq %xmm1, %xmm0
; X64-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X64-NEXT:    pand %xmm1, %xmm0
; X64-NEXT:    pcmpeqd %xmm1, %xmm1
; X64-NEXT:    pxor %xmm1, %xmm0
; X64-NEXT:    retq
  %2 = tail call <2 x i64> @llvm.abs.2xi64(<2 x i64> %0, i1 true)
  %3 = icmp ne <2 x i64> %2, <i64 8, i64 8>
  ret <2 x i1> %3
}