; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=X64-O0
; RUN: llc -O0 -mtriple=i686-unknown < %s | FileCheck %s -check-prefix=X86-O0
; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=X64
; RUN: llc -mtriple=i686-unknown < %s | FileCheck %s -check-prefix=X86
@var_22 = external dso_local global i16, align 2
@var_27 = external dso_local global i16, align 2
define void @foo() {
; X64-O0-LABEL: foo:
; X64-O0: # %bb.0: # %bb
; X64-O0-NEXT: movzwl var_22, %eax
; X64-O0-NEXT: movzwl var_27, %ecx
; X64-O0-NEXT: xorl %ecx, %eax
; X64-O0-NEXT: movzwl var_27, %ecx
; X64-O0-NEXT: xorl %ecx, %eax
; X64-O0-NEXT: cltq
; X64-O0-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-O0-NEXT: movzwl var_22, %eax
; X64-O0-NEXT: movzwl var_27, %ecx
; X64-O0-NEXT: xorl %ecx, %eax
; X64-O0-NEXT: movzwl var_27, %ecx
; X64-O0-NEXT: xorl %ecx, %eax
; X64-O0-NEXT: cltq
; X64-O0-NEXT: movzwl var_27, %ecx
; X64-O0-NEXT: subl $16610, %ecx # imm = 0x40E2
; X64-O0-NEXT: movl %ecx, %ecx
; X64-O0-NEXT: # kill: def $rcx killed $ecx
; X64-O0-NEXT: # kill: def $cl killed $rcx
; X64-O0-NEXT: sarq %cl, %rax
; X64-O0-NEXT: movb %al, %cl
; X64-O0-NEXT: # implicit-def: $rax
; X64-O0-NEXT: movb %cl, (%rax)
; X64-O0-NEXT: retq
;
; X86-O0-LABEL: foo:
; X86-O0: # %bb.0: # %bb
; X86-O0-NEXT: pushl %ebp
; X86-O0-NEXT: .cfi_def_cfa_offset 8
; X86-O0-NEXT: .cfi_offset %ebp, -8
; X86-O0-NEXT: movl %esp, %ebp
; X86-O0-NEXT: .cfi_def_cfa_register %ebp
; X86-O0-NEXT: andl $-8, %esp
; X86-O0-NEXT: subl $24, %esp
; X86-O0-NEXT: movzwl var_22, %eax
; X86-O0-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-O0-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-O0-NEXT: movzwl var_22, %edx
; X86-O0-NEXT: movb var_27, %cl
; X86-O0-NEXT: addb $30, %cl
; X86-O0-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
; X86-O0-NEXT: xorl %eax, %eax
; X86-O0-NEXT: shrdl %cl, %eax, %edx
; X86-O0-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
; X86-O0-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-O0-NEXT: testb $32, %cl
; X86-O0-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-O0-NEXT: jne .LBB0_2
; X86-O0-NEXT: # %bb.1: # %bb
; X86-O0-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-O0-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-O0-NEXT: .LBB0_2: # %bb
; X86-O0-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-O0-NEXT: movb %al, %cl
; X86-O0-NEXT: # implicit-def: $eax
; X86-O0-NEXT: movb %cl, (%eax)
; X86-O0-NEXT: movl %ebp, %esp
; X86-O0-NEXT: popl %ebp
; X86-O0-NEXT: .cfi_def_cfa %esp, 4
; X86-O0-NEXT: retl
;
; X64-LABEL: foo:
; X64: # %bb.0: # %bb
; X64-NEXT: movzbl var_27(%rip), %ecx
; X64-NEXT: movzwl var_22(%rip), %eax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NEXT: addb $30, %cl
; X64-NEXT: shrq %cl, %rax
; X64-NEXT: movb %al, (%rax)
; X64-NEXT: retq
;
; X86-LABEL: foo:
; X86: # %bb.0: # %bb
; X86-NEXT: pushl %ebp
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %ebp, -8
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: .cfi_def_cfa_register %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: movzbl var_27, %ecx
; X86-NEXT: movzwl var_22, %eax
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: addb $30, %cl
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: shrdl %cl, %edx, %eax
; X86-NEXT: testb $32, %cl
; X86-NEXT: jne .LBB0_2
; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: movl %eax, %edx
; X86-NEXT: .LBB0_2: # %bb
; X86-NEXT: movb %dl, (%eax)
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: .cfi_def_cfa %esp, 4
; X86-NEXT: retl
bb:
%tmp = alloca i64, align 8
%tmp1 = load i16, ptr @var_22, align 2
%tmp2 = zext i16 %tmp1 to i32
%tmp3 = load i16, ptr @var_27, align 2
%tmp4 = zext i16 %tmp3 to i32
%tmp5 = xor i32 %tmp2, %tmp4
%tmp6 = load i16, ptr @var_27, align 2
%tmp7 = zext i16 %tmp6 to i32
%tmp8 = xor i32 %tmp5, %tmp7
%tmp9 = sext i32 %tmp8 to i64
store i64 %tmp9, ptr %tmp, align 8
%tmp10 = load i16, ptr @var_22, align 2
%tmp11 = zext i16 %tmp10 to i32
%tmp12 = load i16, ptr @var_27, align 2
%tmp13 = zext i16 %tmp12 to i32
%tmp14 = xor i32 %tmp11, %tmp13
%tmp15 = load i16, ptr @var_27, align 2
%tmp16 = zext i16 %tmp15 to i32
%tmp17 = xor i32 %tmp14, %tmp16
%tmp18 = sext i32 %tmp17 to i64
%tmp19 = load i16, ptr @var_27, align 2
%tmp20 = zext i16 %tmp19 to i32
%tmp21 = sub nsw i32 %tmp20, 16610
%tmp22 = zext i32 %tmp21 to i64
%tmp23 = ashr i64 %tmp18, %tmp22
%tmp24 = trunc i64 %tmp23 to i8
store i8 %tmp24, ptr undef, align 1
ret void
}