llvm/llvm/test/CodeGen/X86/freeze-unary.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,X64

define i32 @freeze_sext(i8 %a0) nounwind {
; X86-LABEL: freeze_sext:
; X86:       # %bb.0:
; X86-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    retl
;
; X64-LABEL: freeze_sext:
; X64:       # %bb.0:
; X64-NEXT:    movsbl %dil, %eax
; X64-NEXT:    retq
  %x = sext i8 %a0 to i16
  %y = freeze i16 %x
  %z = sext i16 %y to i32
  ret i32 %z
}

define <4 x i32> @freeze_sext_vec(<4 x i8> %a0) nounwind {
; X86-LABEL: freeze_sext_vec:
; X86:       # %bb.0:
; X86-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X86-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; X86-NEXT:    psrad $24, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: freeze_sext_vec:
; X64:       # %bb.0:
; X64-NEXT:    pmovsxbd %xmm0, %xmm0
; X64-NEXT:    retq
  %x = sext <4 x i8> %a0 to <4 x i16>
  %y = freeze <4 x i16> %x
  %z = sext <4 x i16> %y to <4 x i32>
  ret <4 x i32> %z
}

define i32 @freeze_zext(i8 %a0) nounwind {
; X86-LABEL: freeze_zext:
; X86:       # %bb.0:
; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    retl
;
; X64-LABEL: freeze_zext:
; X64:       # %bb.0:
; X64-NEXT:    movzbl %dil, %eax
; X64-NEXT:    retq
  %x = zext i8 %a0 to i16
  %y = freeze i16 %x
  %z = zext i16 %y to i32
  ret i32 %z
}

define <2 x i64> @freeze_zext_vec(<2 x i16> %a0) nounwind {
; X86-LABEL: freeze_zext_vec:
; X86:       # %bb.0:
; X86-NEXT:    pxor %xmm1, %xmm1
; X86-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT:    retl
;
; X64-LABEL: freeze_zext_vec:
; X64:       # %bb.0:
; X64-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; X64-NEXT:    retq
  %x = zext <2 x i16> %a0 to <2 x i32>
  %y = freeze <2 x i32> %x
  %z = zext <2 x i32> %y to <2 x i64>
  ret <2 x i64> %z
}

define i32 @freeze_bswap(i32 %a0) nounwind {
; X86-LABEL: freeze_bswap:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    retl
;
; X64-LABEL: freeze_bswap:
; X64:       # %bb.0:
; X64-NEXT:    movl %edi, %eax
; X64-NEXT:    retq
  %x = call i32 @llvm.bswap.i32(i32 %a0)
  %y = freeze i32 %x
  %z = call i32 @llvm.bswap.i32(i32 %y)
  ret i32 %z
}
declare i32 @llvm.bswap.i32(i32)

define <4 x i32> @freeze_bswap_vec(<4 x i32> %a0) nounwind {
; CHECK-LABEL: freeze_bswap_vec:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}}
  %x = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %a0)
  %y = freeze <4 x i32> %x
  %z = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %y)
  ret <4 x i32> %z
}
declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)

define i32 @freeze_bitreverse(i32 %a0) nounwind {
; X86-LABEL: freeze_bitreverse:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    retl
;
; X64-LABEL: freeze_bitreverse:
; X64:       # %bb.0:
; X64-NEXT:    movl %edi, %eax
; X64-NEXT:    retq
  %x = call i32 @llvm.bitreverse.i32(i32 %a0)
  %y = freeze i32 %x
  %z = call i32 @llvm.bitreverse.i32(i32 %y)
  ret i32 %z
}
declare i32 @llvm.bitreverse.i32(i32)

define <4 x i32> @freeze_bitreverse_vec(<4 x i32> %a0) nounwind {
; CHECK-LABEL: freeze_bitreverse_vec:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}}
  %x = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a0)
  %y = freeze <4 x i32> %x
  %z = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %y)
  ret <4 x i32> %z
}
declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>)

; split parity pattern
define i8 @freeze_ctpop(i8 %a0) nounwind {
; X86-LABEL: freeze_ctpop:
; X86:       # %bb.0:
; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
; X86-NEXT:    setnp %al
; X86-NEXT:    retl
;
; X64-LABEL: freeze_ctpop:
; X64:       # %bb.0:
; X64-NEXT:    testb %dil, %dil
; X64-NEXT:    setnp %al
; X64-NEXT:    retq
  %x = call i8 @llvm.ctpop.i8(i8 %a0)
  %y = freeze i8 %x
  %z = and i8 %y, 1
  ret i8 %z
}
declare i8 @llvm.ctpop.i8(i8)

define <16 x i8> @freeze_ctpop_vec(<16 x i8> %a0) nounwind {
; X86-LABEL: freeze_ctpop_vec:
; X86:       # %bb.0:
; X86-NEXT:    movdqa %xmm0, %xmm1
; X86-NEXT:    psrlw $1, %xmm1
; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-NEXT:    psubb %xmm1, %xmm0
; X86-NEXT:    movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-NEXT:    movdqa %xmm0, %xmm2
; X86-NEXT:    pand %xmm1, %xmm2
; X86-NEXT:    psrlw $2, %xmm0
; X86-NEXT:    pand %xmm1, %xmm0
; X86-NEXT:    paddb %xmm2, %xmm0
; X86-NEXT:    movdqa %xmm0, %xmm1
; X86-NEXT:    psrlw $4, %xmm1
; X86-NEXT:    paddb %xmm1, %xmm0
; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: freeze_ctpop_vec:
; X64:       # %bb.0:
; X64-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X64-NEXT:    movdqa %xmm0, %xmm3
; X64-NEXT:    pand %xmm2, %xmm3
; X64-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; X64-NEXT:    movdqa %xmm1, %xmm4
; X64-NEXT:    pshufb %xmm3, %xmm4
; X64-NEXT:    psrlw $4, %xmm0
; X64-NEXT:    pand %xmm2, %xmm0
; X64-NEXT:    pshufb %xmm0, %xmm1
; X64-NEXT:    paddb %xmm4, %xmm1
; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; X64-NEXT:    movdqa %xmm1, %xmm0
; X64-NEXT:    retq
  %x = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a0)
  %y = freeze <16 x i8> %x
  %z = and <16 x i8> %y, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
  ret <16 x i8> %z
}
declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)

; parity knownbits pattern
define i8 @freeze_parity(i8 %a0) nounwind {
; X86-LABEL: freeze_parity:
; X86:       # %bb.0:
; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
; X86-NEXT:    setnp %al
; X86-NEXT:    retl
;
; X64-LABEL: freeze_parity:
; X64:       # %bb.0:
; X64-NEXT:    testb %dil, %dil
; X64-NEXT:    setnp %al
; X64-NEXT:    retq
  %x = call i8 @llvm.ctpop.i8(i8 %a0)
  %y = and i8 %x, 1
  %z = freeze i8 %y
  %w = and i8 %z, 1
  ret i8 %w
}

define <16 x i8> @freeze_parity_vec(<16 x i8> %a0) nounwind {
; X86-LABEL: freeze_parity_vec:
; X86:       # %bb.0:
; X86-NEXT:    movdqa %xmm0, %xmm1
; X86-NEXT:    psrlw $1, %xmm1
; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-NEXT:    psubb %xmm1, %xmm0
; X86-NEXT:    movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-NEXT:    movdqa %xmm0, %xmm2
; X86-NEXT:    pand %xmm1, %xmm2
; X86-NEXT:    psrlw $2, %xmm0
; X86-NEXT:    pand %xmm1, %xmm0
; X86-NEXT:    paddb %xmm2, %xmm0
; X86-NEXT:    movdqa %xmm0, %xmm1
; X86-NEXT:    psrlw $4, %xmm1
; X86-NEXT:    paddb %xmm1, %xmm0
; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: freeze_parity_vec:
; X64:       # %bb.0:
; X64-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X64-NEXT:    movdqa %xmm0, %xmm3
; X64-NEXT:    pand %xmm2, %xmm3
; X64-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; X64-NEXT:    movdqa %xmm1, %xmm4
; X64-NEXT:    pshufb %xmm3, %xmm4
; X64-NEXT:    psrlw $4, %xmm0
; X64-NEXT:    pand %xmm2, %xmm0
; X64-NEXT:    pshufb %xmm0, %xmm1
; X64-NEXT:    paddb %xmm4, %xmm1
; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; X64-NEXT:    movdqa %xmm1, %xmm0
; X64-NEXT:    retq
  %x = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a0)
  %y = and <16 x i8> %x, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
  %z = freeze <16 x i8> %y
  %w = and <16 x i8> %z, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
  ret <16 x i8> %z
}