llvm/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s

define void @fixed_array16i32_to_scalable4i32(ptr %out) {
; CHECK-LABEL: @fixed_array16i32_to_scalable4i32(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP:%.*]] = alloca [16 x i32], align 16
; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, ptr [[TMP]], align 16
; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, ptr [[TMP]], align 16
; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
entry:
  %tmp = alloca [16 x i32], align 16
  store volatile <vscale x 4 x i32> zeroinitializer, ptr %tmp, align 16
  %reload = load volatile <vscale x 4 x i32>, ptr %tmp, align 16
  store <vscale x 4 x i32> %reload, ptr %out, align 16
  ret void
}

define void @scalable4i32_to_fixed16i32(ptr %out) {
; CHECK-LABEL: @scalable4i32_to_fixed16i32(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 4 x i32>, align 16
; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP]], align 16
; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 16
; CHECK-NEXT:    store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
entry:
  %tmp = alloca <vscale x 4 x i32>, align 16
  store <16 x i32> zeroinitializer, ptr %tmp, align 16
  %reload = load volatile <16 x i32>, ptr %tmp, align 16
  store <16 x i32> %reload, ptr %out, align 16
  ret void
}

define void @fixed16i32_to_scalable4i32(ptr %out) {
; CHECK-LABEL: @fixed16i32_to_scalable4i32(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP:%.*]] = alloca <16 x i32>, align 16
; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, ptr [[TMP]], align 16
; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, ptr [[TMP]], align 16
; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
entry:
  %tmp = alloca <16 x i32>, align 16
  store volatile <vscale x 4 x i32> zeroinitializer, ptr %tmp, align 16
  %reload = load volatile <vscale x 4 x i32>, ptr %tmp, align 16
  store <vscale x 4 x i32> %reload, ptr %out, align 16
  ret void
}

define void @scalable16i32_to_fixed16i32(ptr %out) {
; CHECK-LABEL: @scalable16i32_to_fixed16i32(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 16
; CHECK-NEXT:    store volatile <16 x i32> zeroinitializer, ptr [[TMP]], align 16
; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 16
; CHECK-NEXT:    store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
entry:
  %tmp = alloca <vscale x 16 x i32>, align 16
  store volatile <16 x i32> zeroinitializer, ptr %tmp, align 16
  %reload = load volatile <16 x i32>, ptr %tmp, align 16
  store <16 x i32> %reload, ptr %out, align 16
  ret void
}

define void @scalable32i32_to_scalable16i32(ptr %out) {
; CHECK-LABEL: @scalable32i32_to_scalable16i32(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i32>, align 16
; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 16
; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 16
; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
entry:
  %tmp = alloca <vscale x 32 x i32>, align 16
  store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
  %reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
  store <vscale x 16 x i32> %reload, ptr %out, align 16
  ret void
}

define void @scalable32i16_to_scalable16i32(ptr %out) {
; CHECK-LABEL: @scalable32i16_to_scalable16i32(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 16
; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 16
; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 16
; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
entry:
  %tmp = alloca <vscale x 32 x i16>, align 16
  store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
  %reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
  store <vscale x 16 x i32> %reload, ptr %out, align 16
  ret void
}

define void @scalable32i16_to_scalable16i32_multiuse(ptr %out, ptr %out2) {
; CHECK-LABEL: @scalable32i16_to_scalable16i32_multiuse(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 16
; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 16
; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 16
; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT:    [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, ptr [[TMP]], align 16
; CHECK-NEXT:    store <vscale x 32 x i16> [[RELOAD2]], ptr [[OUT2:%.*]], align 16
; CHECK-NEXT:    ret void
;
entry:
  %tmp = alloca <vscale x 32 x i16>, align 16
  store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
  %reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
  store <vscale x 16 x i32> %reload, ptr %out, align 16
  %reload2 = load volatile <vscale x 32 x i16>, ptr %tmp, align 16
  store <vscale x 32 x i16> %reload2, ptr %out2, align 16
  ret void
}