llvm/llvm/test/CodeGen/X86/load-local-v3i129.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_sp
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=FAST-SHLD
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+slow-shld | FileCheck %s --check-prefix=SLOW-SHLD

define void @_start() nounwind {
; FAST-SHLD-LABEL: _start:
; FAST-SHLD:       # %bb.0: # %Entry
; FAST-SHLD-NEXT:    movq -40(%rsp), %rax
; FAST-SHLD-NEXT:    movq -32(%rsp), %rcx
; FAST-SHLD-NEXT:    movq %rcx, %rdx
; FAST-SHLD-NEXT:    shlq $62, %rdx
; FAST-SHLD-NEXT:    shrq $2, %rcx
; FAST-SHLD-NEXT:    shldq $2, %rdx, %rcx
; FAST-SHLD-NEXT:    andq $-4, %rax
; FAST-SHLD-NEXT:    incq %rax
; FAST-SHLD-NEXT:    movq %rax, -40(%rsp)
; FAST-SHLD-NEXT:    movq %rcx, -32(%rsp)
; FAST-SHLD-NEXT:    orq $-2, -56(%rsp)
; FAST-SHLD-NEXT:    movq $-1, -48(%rsp)
; FAST-SHLD-NEXT:    retq
;
; SLOW-SHLD-LABEL: _start:
; SLOW-SHLD:       # %bb.0: # %Entry
; SLOW-SHLD-NEXT:    movq -40(%rsp), %rax
; SLOW-SHLD-NEXT:    andq $-4, %rax
; SLOW-SHLD-NEXT:    incq %rax
; SLOW-SHLD-NEXT:    movq %rax, -40(%rsp)
; SLOW-SHLD-NEXT:    orq $-2, -56(%rsp)
; SLOW-SHLD-NEXT:    movq $-1, -48(%rsp)
; SLOW-SHLD-NEXT:    retq
Entry:
  %y = alloca <3 x i129>, align 16
  %L = load <3 x i129>, ptr %y
  %I1 = insertelement <3 x i129> %L, i129 340282366920938463463374607431768211455, i32 1
  store <3 x i129> %I1, ptr %y
  ret void
}