llvm/llvm/test/Transforms/InstCombine/vscale_gep.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes=instcombine < %s | FileCheck %s

; This test is used to verify we are not crashing at Assertion `CastInst::castIsValid(opc, C, Ty) && "Invalid constantexpr cast!".
define <vscale x 2 x ptr> @gep_index_type_is_scalable(ptr %p) {
; CHECK-LABEL: @gep_index_type_is_scalable(
; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], <vscale x 2 x i64> undef
; CHECK-NEXT:    ret <vscale x 2 x ptr> [[GEP]]
;
  %gep = getelementptr i8, ptr %p, <vscale x 2 x i64> undef
  ret <vscale x 2 x ptr> %gep
}

; This test serves to verify code changes for "GEP.getNumIndices() == 1".
define ptr @gep_num_of_indices_1(ptr %p) {
; CHECK-LABEL: @gep_num_of_indices_1(
; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[TMP1]], 4
; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP2]]
; CHECK-NEXT:    ret ptr [[GEP]]
;
  %gep = getelementptr <vscale x 4 x i32>, ptr %p, i64 1
  ret ptr %gep
}

; This test serves to verify code changes for "GEP.getNumOperands() == 2".
define void @gep_bitcast(ptr %p) {
; CHECK-LABEL: @gep_bitcast(
; CHECK-NEXT:    store <vscale x 16 x i8> zeroinitializer, ptr [[P:%.*]], align 16
; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[TMP1]], 4
; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]]
; CHECK-NEXT:    store <vscale x 16 x i8> zeroinitializer, ptr [[GEP2]], align 16
; CHECK-NEXT:    ret void
;
  store <vscale x 16 x i8> zeroinitializer, ptr %p
  %gep2 = getelementptr <vscale x 16 x i8>, ptr %p, i64 1
  store <vscale x 16 x i8> zeroinitializer, ptr %gep2
  ret void
}

; These tests serve to verify code changes when underlying gep ptr is alloca.
; This test is to verify 'inbounds' is added when it's valid to accumulate constant offset.
define i32 @gep_alloca_inbounds_vscale_zero() {
; CHECK-LABEL: @gep_alloca_inbounds_vscale_zero(
; CHECK-NEXT:    [[A:%.*]] = alloca <vscale x 4 x i32>, align 16
; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 8
; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[TMP]], align 4
; CHECK-NEXT:    ret i32 [[LOAD]]
;
  %a = alloca <vscale x 4 x i32>
  %tmp = getelementptr <vscale x 4 x i32>, ptr %a, i32 0, i32 2
  %load = load i32, ptr %tmp
  ret i32 %load
}

; This test is to verify 'inbounds' is not added when a constant offset can not be determined at compile-time.
define i32 @gep_alloca_inbounds_vscale_nonzero() {
; CHECK-LABEL: @gep_alloca_inbounds_vscale_nonzero(
; CHECK-NEXT:    [[A:%.*]] = alloca <vscale x 4 x i32>, align 16
; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[TMP1]], 4
; CHECK-NEXT:    [[TMP_OFFS:%.*]] = or disjoint i64 [[TMP2]], 8
; CHECK-NEXT:    [[TMP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP_OFFS]]
; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[TMP]], align 4
; CHECK-NEXT:    ret i32 [[LOAD]]
;
  %a = alloca <vscale x 4 x i32>
  %tmp = getelementptr <vscale x 4 x i32>, ptr %a, i32 1, i32 2
  %load = load i32, ptr %tmp
  ret i32 %load
}