llvm/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes=slp-vectorizer,instcombine -mtriple=aarch64--linux-gnu < %s | FileCheck %s

target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64"

define i16 @reduce_allstrided(ptr nocapture noundef readonly %x, ptr nocapture noundef readonly %y, i32 noundef %stride) {
; CHECK-LABEL: @reduce_allstrided(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr [[X:%.*]], align 2
; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
; CHECK-NEXT:    [[MUL2:%.*]] = shl nsw i32 [[STRIDE]], 1
; CHECK-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[MUL2]] to i64
; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM3]]
; CHECK-NEXT:    [[TMP2:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
; CHECK-NEXT:    [[MUL5:%.*]] = mul nsw i32 [[STRIDE]], 3
; CHECK-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[MUL5]] to i64
; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM6]]
; CHECK-NEXT:    [[TMP3:%.*]] = load i16, ptr [[ARRAYIDX7]], align 2
; CHECK-NEXT:    [[MUL8:%.*]] = shl nsw i32 [[STRIDE]], 2
; CHECK-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[MUL8]] to i64
; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM9]]
; CHECK-NEXT:    [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[STRIDE]], 5
; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[MUL11]] to i64
; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM12]]
; CHECK-NEXT:    [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX13]], align 2
; CHECK-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[STRIDE]], 6
; CHECK-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[MUL14]] to i64
; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM15]]
; CHECK-NEXT:    [[TMP6:%.*]] = load i16, ptr [[ARRAYIDX16]], align 2
; CHECK-NEXT:    [[MUL17:%.*]] = mul nsw i32 [[STRIDE]], 7
; CHECK-NEXT:    [[IDXPROM18:%.*]] = sext i32 [[MUL17]] to i64
; CHECK-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM18]]
; CHECK-NEXT:    [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX19]], align 2
; CHECK-NEXT:    [[TMP8:%.*]] = load i16, ptr [[Y:%.*]], align 2
; CHECK-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX23]], align 2
; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM3]]
; CHECK-NEXT:    [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX26]], align 2
; CHECK-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM6]]
; CHECK-NEXT:    [[TMP11:%.*]] = load i16, ptr [[ARRAYIDX29]], align 2
; CHECK-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM9]]
; CHECK-NEXT:    [[TMP12:%.*]] = load i16, ptr [[ARRAYIDX32]], align 2
; CHECK-NEXT:    [[ARRAYIDX35:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM12]]
; CHECK-NEXT:    [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX35]], align 2
; CHECK-NEXT:    [[ARRAYIDX38:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM15]]
; CHECK-NEXT:    [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX38]], align 2
; CHECK-NEXT:    [[ARRAYIDX41:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM18]]
; CHECK-NEXT:    [[TMP15:%.*]] = load i16, ptr [[ARRAYIDX41]], align 2
; CHECK-NEXT:    [[MUL43:%.*]] = mul i16 [[TMP8]], [[TMP0]]
; CHECK-NEXT:    [[MUL48:%.*]] = mul i16 [[TMP9]], [[TMP1]]
; CHECK-NEXT:    [[ADD49:%.*]] = add i16 [[MUL48]], [[MUL43]]
; CHECK-NEXT:    [[MUL54:%.*]] = mul i16 [[TMP10]], [[TMP2]]
; CHECK-NEXT:    [[ADD55:%.*]] = add i16 [[ADD49]], [[MUL54]]
; CHECK-NEXT:    [[MUL60:%.*]] = mul i16 [[TMP11]], [[TMP3]]
; CHECK-NEXT:    [[ADD61:%.*]] = add i16 [[ADD55]], [[MUL60]]
; CHECK-NEXT:    [[MUL66:%.*]] = mul i16 [[TMP12]], [[TMP4]]
; CHECK-NEXT:    [[ADD67:%.*]] = add i16 [[ADD61]], [[MUL66]]
; CHECK-NEXT:    [[MUL72:%.*]] = mul i16 [[TMP13]], [[TMP5]]
; CHECK-NEXT:    [[ADD73:%.*]] = add i16 [[ADD67]], [[MUL72]]
; CHECK-NEXT:    [[MUL78:%.*]] = mul i16 [[TMP14]], [[TMP6]]
; CHECK-NEXT:    [[ADD79:%.*]] = add i16 [[ADD73]], [[MUL78]]
; CHECK-NEXT:    [[MUL84:%.*]] = mul i16 [[TMP15]], [[TMP7]]
; CHECK-NEXT:    [[ADD85:%.*]] = add i16 [[ADD79]], [[MUL84]]
; CHECK-NEXT:    ret i16 [[ADD85]]
;
entry:
  %0 = load i16, ptr %x, align 2
  %idxprom = sext i32 %stride to i64
  %arrayidx1 = getelementptr inbounds i16, ptr %x, i64 %idxprom
  %1 = load i16, ptr %arrayidx1, align 2
  %mul2 = shl nsw i32 %stride, 1
  %idxprom3 = sext i32 %mul2 to i64
  %arrayidx4 = getelementptr inbounds i16, ptr %x, i64 %idxprom3
  %2 = load i16, ptr %arrayidx4, align 2
  %mul5 = mul nsw i32 %stride, 3
  %idxprom6 = sext i32 %mul5 to i64
  %arrayidx7 = getelementptr inbounds i16, ptr %x, i64 %idxprom6
  %3 = load i16, ptr %arrayidx7, align 2
  %mul8 = shl nsw i32 %stride, 2
  %idxprom9 = sext i32 %mul8 to i64
  %arrayidx10 = getelementptr inbounds i16, ptr %x, i64 %idxprom9
  %4 = load i16, ptr %arrayidx10, align 2
  %mul11 = mul nsw i32 %stride, 5
  %idxprom12 = sext i32 %mul11 to i64
  %arrayidx13 = getelementptr inbounds i16, ptr %x, i64 %idxprom12
  %5 = load i16, ptr %arrayidx13, align 2
  %mul14 = mul nsw i32 %stride, 6
  %idxprom15 = sext i32 %mul14 to i64
  %arrayidx16 = getelementptr inbounds i16, ptr %x, i64 %idxprom15
  %6 = load i16, ptr %arrayidx16, align 2
  %mul17 = mul nsw i32 %stride, 7
  %idxprom18 = sext i32 %mul17 to i64
  %arrayidx19 = getelementptr inbounds i16, ptr %x, i64 %idxprom18
  %7 = load i16, ptr %arrayidx19, align 2
  %8 = load i16, ptr %y, align 2
  %arrayidx23 = getelementptr inbounds i16, ptr %y, i64 %idxprom
  %9 = load i16, ptr %arrayidx23, align 2
  %arrayidx26 = getelementptr inbounds i16, ptr %y, i64 %idxprom3
  %10 = load i16, ptr %arrayidx26, align 2
  %arrayidx29 = getelementptr inbounds i16, ptr %y, i64 %idxprom6
  %11 = load i16, ptr %arrayidx29, align 2
  %arrayidx32 = getelementptr inbounds i16, ptr %y, i64 %idxprom9
  %12 = load i16, ptr %arrayidx32, align 2
  %arrayidx35 = getelementptr inbounds i16, ptr %y, i64 %idxprom12
  %13 = load i16, ptr %arrayidx35, align 2
  %arrayidx38 = getelementptr inbounds i16, ptr %y, i64 %idxprom15
  %14 = load i16, ptr %arrayidx38, align 2
  %arrayidx41 = getelementptr inbounds i16, ptr %y, i64 %idxprom18
  %15 = load i16, ptr %arrayidx41, align 2
  %mul43 = mul i16 %8, %0
  %mul48 = mul i16 %9, %1
  %add49 = add i16 %mul48, %mul43
  %mul54 = mul i16 %10, %2
  %add55 = add i16 %add49, %mul54
  %mul60 = mul i16 %11, %3
  %add61 = add i16 %add55, %mul60
  %mul66 = mul i16 %12, %4
  %add67 = add i16 %add61, %mul66
  %mul72 = mul i16 %13, %5
  %add73 = add i16 %add67, %mul72
  %mul78 = mul i16 %14, %6
  %add79 = add i16 %add73, %mul78
  %mul84 = mul i16 %15, %7
  %add85 = add i16 %add79, %mul84
  ret i16 %add85
}

define i16 @reduce_blockstrided2(ptr nocapture noundef readonly %x, ptr nocapture noundef readonly %y, i32 noundef %stride) {
; CHECK-LABEL: @reduce_blockstrided2(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr [[X:%.*]], align 2
; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 2
; CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[TMP2:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2
; CHECK-NEXT:    [[ADD3:%.*]] = add nsw i32 [[STRIDE]], 1
; CHECK-NEXT:    [[IDXPROM4:%.*]] = sext i32 [[ADD3]] to i64
; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM4]]
; CHECK-NEXT:    [[TMP3:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2
; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i32 [[STRIDE]], 1
; CHECK-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[MUL]] to i64
; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM7]]
; CHECK-NEXT:    [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2
; CHECK-NEXT:    [[ADD10:%.*]] = or disjoint i32 [[MUL]], 1
; CHECK-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[ADD10]] to i64
; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM11]]
; CHECK-NEXT:    [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX12]], align 2
; CHECK-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[STRIDE]], 3
; CHECK-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[MUL13]] to i64
; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM15]]
; CHECK-NEXT:    [[TMP6:%.*]] = load i16, ptr [[ARRAYIDX16]], align 2
; CHECK-NEXT:    [[ADD18:%.*]] = add nsw i32 [[MUL13]], 1
; CHECK-NEXT:    [[IDXPROM19:%.*]] = sext i32 [[ADD18]] to i64
; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM19]]
; CHECK-NEXT:    [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX20]], align 2
; CHECK-NEXT:    [[TMP8:%.*]] = load i16, ptr [[Y:%.*]], align 2
; CHECK-NEXT:    [[ARRAYIDX24:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX24]], align 2
; CHECK-NEXT:    [[ARRAYIDX28:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM7]]
; CHECK-NEXT:    [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX28]], align 2
; CHECK-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM15]]
; CHECK-NEXT:    [[TMP11:%.*]] = load i16, ptr [[ARRAYIDX32]], align 2
; CHECK-NEXT:    [[ARRAYIDX33:%.*]] = getelementptr inbounds i8, ptr [[Y]], i64 2
; CHECK-NEXT:    [[TMP12:%.*]] = load i16, ptr [[ARRAYIDX33]], align 2
; CHECK-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM4]]
; CHECK-NEXT:    [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX36]], align 2
; CHECK-NEXT:    [[ARRAYIDX40:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM11]]
; CHECK-NEXT:    [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX40]], align 2
; CHECK-NEXT:    [[ARRAYIDX44:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM19]]
; CHECK-NEXT:    [[TMP15:%.*]] = load i16, ptr [[ARRAYIDX44]], align 2
; CHECK-NEXT:    [[MUL46:%.*]] = mul i16 [[TMP8]], [[TMP0]]
; CHECK-NEXT:    [[MUL52:%.*]] = mul i16 [[TMP12]], [[TMP1]]
; CHECK-NEXT:    [[MUL58:%.*]] = mul i16 [[TMP9]], [[TMP2]]
; CHECK-NEXT:    [[MUL64:%.*]] = mul i16 [[TMP13]], [[TMP3]]
; CHECK-NEXT:    [[MUL70:%.*]] = mul i16 [[TMP10]], [[TMP4]]
; CHECK-NEXT:    [[MUL76:%.*]] = mul i16 [[TMP14]], [[TMP5]]
; CHECK-NEXT:    [[MUL82:%.*]] = mul i16 [[TMP11]], [[TMP6]]
; CHECK-NEXT:    [[MUL88:%.*]] = mul i16 [[TMP15]], [[TMP7]]
; CHECK-NEXT:    [[ADD53:%.*]] = add i16 [[MUL58]], [[MUL46]]
; CHECK-NEXT:    [[ADD59:%.*]] = add i16 [[ADD53]], [[MUL70]]
; CHECK-NEXT:    [[ADD65:%.*]] = add i16 [[ADD59]], [[MUL82]]
; CHECK-NEXT:    [[ADD71:%.*]] = add i16 [[ADD65]], [[MUL52]]
; CHECK-NEXT:    [[ADD77:%.*]] = add i16 [[ADD71]], [[MUL64]]
; CHECK-NEXT:    [[ADD83:%.*]] = add i16 [[ADD77]], [[MUL76]]
; CHECK-NEXT:    [[ADD89:%.*]] = add i16 [[ADD83]], [[MUL88]]
; CHECK-NEXT:    ret i16 [[ADD89]]
;
entry:
  %0 = load i16, ptr %x, align 2
  %arrayidx1 = getelementptr inbounds i16, ptr %x, i64 1
  %1 = load i16, ptr %arrayidx1, align 2
  %idxprom = sext i32 %stride to i64
  %arrayidx2 = getelementptr inbounds i16, ptr %x, i64 %idxprom
  %2 = load i16, ptr %arrayidx2, align 2
  %add3 = add nsw i32 %stride, 1
  %idxprom4 = sext i32 %add3 to i64
  %arrayidx5 = getelementptr inbounds i16, ptr %x, i64 %idxprom4
  %3 = load i16, ptr %arrayidx5, align 2
  %mul = shl nsw i32 %stride, 1
  %idxprom7 = sext i32 %mul to i64
  %arrayidx8 = getelementptr inbounds i16, ptr %x, i64 %idxprom7
  %4 = load i16, ptr %arrayidx8, align 2
  %add10 = or disjoint i32 %mul, 1
  %idxprom11 = sext i32 %add10 to i64
  %arrayidx12 = getelementptr inbounds i16, ptr %x, i64 %idxprom11
  %5 = load i16, ptr %arrayidx12, align 2
  %mul13 = mul nsw i32 %stride, 3
  %idxprom15 = sext i32 %mul13 to i64
  %arrayidx16 = getelementptr inbounds i16, ptr %x, i64 %idxprom15
  %6 = load i16, ptr %arrayidx16, align 2
  %add18 = add nsw i32 %mul13, 1
  %idxprom19 = sext i32 %add18 to i64
  %arrayidx20 = getelementptr inbounds i16, ptr %x, i64 %idxprom19
  %7 = load i16, ptr %arrayidx20, align 2
  %8 = load i16, ptr %y, align 2
  %arrayidx24 = getelementptr inbounds i16, ptr %y, i64 %idxprom
  %9 = load i16, ptr %arrayidx24, align 2
  %arrayidx28 = getelementptr inbounds i16, ptr %y, i64 %idxprom7
  %10 = load i16, ptr %arrayidx28, align 2
  %arrayidx32 = getelementptr inbounds i16, ptr %y, i64 %idxprom15
  %11 = load i16, ptr %arrayidx32, align 2
  %arrayidx33 = getelementptr inbounds i16, ptr %y, i64 1
  %12 = load i16, ptr %arrayidx33, align 2
  %arrayidx36 = getelementptr inbounds i16, ptr %y, i64 %idxprom4
  %13 = load i16, ptr %arrayidx36, align 2
  %arrayidx40 = getelementptr inbounds i16, ptr %y, i64 %idxprom11
  %14 = load i16, ptr %arrayidx40, align 2
  %arrayidx44 = getelementptr inbounds i16, ptr %y, i64 %idxprom19
  %15 = load i16, ptr %arrayidx44, align 2
  %mul46 = mul i16 %8, %0
  %mul52 = mul i16 %12, %1
  %mul58 = mul i16 %9, %2
  %mul64 = mul i16 %13, %3
  %mul70 = mul i16 %10, %4
  %mul76 = mul i16 %14, %5
  %mul82 = mul i16 %11, %6
  %mul88 = mul i16 %15, %7
  %add53 = add i16 %mul58, %mul46
  %add59 = add i16 %add53, %mul70
  %add65 = add i16 %add59, %mul82
  %add71 = add i16 %add65, %mul52
  %add77 = add i16 %add71, %mul64
  %add83 = add i16 %add77, %mul76
  %add89 = add i16 %add83, %mul88
  ret i16 %add89
}

define i16 @reduce_blockstrided3(ptr nocapture noundef readonly %x, ptr nocapture noundef readonly %y, i32 noundef %stride) {
; CHECK-LABEL: @reduce_blockstrided3(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[L0:%.*]] = load i16, ptr [[X:%.*]], align 2
; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 2
; CHECK-NEXT:    [[L1:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 4
; CHECK-NEXT:    [[L2:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2
; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[L4:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
; CHECK-NEXT:    [[ADD5:%.*]] = add nsw i32 [[STRIDE]], 1
; CHECK-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[ADD5]] to i64
; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM6]]
; CHECK-NEXT:    [[L5:%.*]] = load i16, ptr [[ARRAYIDX7]], align 2
; CHECK-NEXT:    [[ADD8:%.*]] = add nsw i32 [[STRIDE]], 2
; CHECK-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[ADD8]] to i64
; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM9]]
; CHECK-NEXT:    [[L6:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-NEXT:    [[L8:%.*]] = load i16, ptr [[Y:%.*]], align 2
; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds i8, ptr [[Y]], i64 2
; CHECK-NEXT:    [[L9:%.*]] = load i16, ptr [[ARRAYIDX15]], align 2
; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i8, ptr [[Y]], i64 4
; CHECK-NEXT:    [[L10:%.*]] = load i16, ptr [[ARRAYIDX16]], align 2
; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[L12:%.*]] = load i16, ptr [[ARRAYIDX20]], align 2
; CHECK-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM6]]
; CHECK-NEXT:    [[L13:%.*]] = load i16, ptr [[ARRAYIDX23]], align 2
; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, ptr [[Y]], i64 [[IDXPROM9]]
; CHECK-NEXT:    [[L14:%.*]] = load i16, ptr [[ARRAYIDX26]], align 2
; CHECK-NEXT:    [[MUL:%.*]] = mul i16 [[L8]], [[L0]]
; CHECK-NEXT:    [[MUL36:%.*]] = mul i16 [[L9]], [[L1]]
; CHECK-NEXT:    [[ADD37:%.*]] = add i16 [[MUL36]], [[MUL]]
; CHECK-NEXT:    [[MUL48:%.*]] = mul i16 [[L10]], [[L2]]
; CHECK-NEXT:    [[ADD49:%.*]] = add i16 [[ADD37]], [[MUL48]]
; CHECK-NEXT:    [[MUL54:%.*]] = mul i16 [[L13]], [[L5]]
; CHECK-NEXT:    [[ADD55:%.*]] = add i16 [[ADD49]], [[MUL54]]
; CHECK-NEXT:    [[MUL60:%.*]] = mul i16 [[L12]], [[L4]]
; CHECK-NEXT:    [[ADD61:%.*]] = add i16 [[ADD55]], [[MUL60]]
; CHECK-NEXT:    [[MUL72:%.*]] = mul i16 [[L14]], [[L6]]
; CHECK-NEXT:    [[ADD73:%.*]] = add i16 [[ADD61]], [[MUL72]]
; CHECK-NEXT:    ret i16 [[ADD73]]
;
entry:
  %l0 = load i16, ptr %x, align 2
  %arrayidx1 = getelementptr inbounds i16, ptr %x, i64 1
  %l1 = load i16, ptr %arrayidx1, align 2
  %arrayidx2 = getelementptr inbounds i16, ptr %x, i64 2
  %l2 = load i16, ptr %arrayidx2, align 2
  %idxprom = sext i32 %stride to i64
  %arrayidx4 = getelementptr inbounds i16, ptr %x, i64 %idxprom
  %l4 = load i16, ptr %arrayidx4, align 2
  %add5 = add nsw i32 %stride, 1
  %idxprom6 = sext i32 %add5 to i64
  %arrayidx7 = getelementptr inbounds i16, ptr %x, i64 %idxprom6
  %l5 = load i16, ptr %arrayidx7, align 2
  %add8 = add nsw i32 %stride, 2
  %idxprom9 = sext i32 %add8 to i64
  %arrayidx10 = getelementptr inbounds i16, ptr %x, i64 %idxprom9
  %l6 = load i16, ptr %arrayidx10, align 2
  %add11 = add nsw i32 %stride, 3
  %idxprom12 = sext i32 %add11 to i64
  %l8 = load i16, ptr %y, align 2
  %arrayidx15 = getelementptr inbounds i16, ptr %y, i64 1
  %l9 = load i16, ptr %arrayidx15, align 2
  %arrayidx16 = getelementptr inbounds i16, ptr %y, i64 2
  %l10 = load i16, ptr %arrayidx16, align 2
  %arrayidx20 = getelementptr inbounds i16, ptr %y, i64 %idxprom
  %l12 = load i16, ptr %arrayidx20, align 2
  %arrayidx23 = getelementptr inbounds i16, ptr %y, i64 %idxprom6
  %l13 = load i16, ptr %arrayidx23, align 2
  %arrayidx26 = getelementptr inbounds i16, ptr %y, i64 %idxprom9
  %l14 = load i16, ptr %arrayidx26, align 2
  %mul = mul i16 %l8, %l0
  %mul36 = mul i16 %l9, %l1
  %add37 = add i16 %mul36, %mul
  %mul48 = mul i16 %l10, %l2
  %add49 = add i16 %add37, %mul48
  %mul54 = mul i16 %l13, %l5
  %add55 = add i16 %add49, %mul54
  %mul60 = mul i16 %l12, %l4
  %add61 = add i16 %add55, %mul60
  %mul72 = mul i16 %l14, %l6
  %add73 = add i16 %add61, %mul72
  ret i16 %add73
}

define i16 @reduce_blockstrided4(ptr nocapture noundef readonly %x, ptr nocapture noundef readonly %y, i32 noundef %stride) {
; CHECK-LABEL: @reduce_blockstrided4(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[X]], align 2
; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 2
; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i16>, ptr [[Y]], align 2
; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX20]], align 2
; CHECK-NEXT:    [[TMP4:%.*]] = mul <4 x i16> [[TMP2]], [[TMP0]]
; CHECK-NEXT:    [[TMP5:%.*]] = mul <4 x i16> [[TMP3]], [[TMP1]]
; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT:    [[TMP7:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP6]])
; CHECK-NEXT:    ret i16 [[TMP7]]
;
entry:
  %0 = load i16, ptr %x, align 2
  %arrayidx1 = getelementptr inbounds i16, ptr %x, i64 1
  %1 = load i16, ptr %arrayidx1, align 2
  %arrayidx2 = getelementptr inbounds i16, ptr %x, i64 2
  %2 = load i16, ptr %arrayidx2, align 2
  %arrayidx3 = getelementptr inbounds i16, ptr %x, i64 3
  %3 = load i16, ptr %arrayidx3, align 2
  %idxprom = sext i32 %stride to i64
  %arrayidx4 = getelementptr inbounds i16, ptr %x, i64 %idxprom
  %4 = load i16, ptr %arrayidx4, align 2
  %add5 = add nsw i32 %stride, 1
  %idxprom6 = sext i32 %add5 to i64
  %arrayidx7 = getelementptr inbounds i16, ptr %x, i64 %idxprom6
  %5 = load i16, ptr %arrayidx7, align 2
  %add8 = add nsw i32 %stride, 2
  %idxprom9 = sext i32 %add8 to i64
  %arrayidx10 = getelementptr inbounds i16, ptr %x, i64 %idxprom9
  %6 = load i16, ptr %arrayidx10, align 2
  %add11 = add nsw i32 %stride, 3
  %idxprom12 = sext i32 %add11 to i64
  %arrayidx13 = getelementptr inbounds i16, ptr %x, i64 %idxprom12
  %7 = load i16, ptr %arrayidx13, align 2
  %8 = load i16, ptr %y, align 2
  %arrayidx15 = getelementptr inbounds i16, ptr %y, i64 1
  %9 = load i16, ptr %arrayidx15, align 2
  %arrayidx16 = getelementptr inbounds i16, ptr %y, i64 2
  %10 = load i16, ptr %arrayidx16, align 2
  %arrayidx17 = getelementptr inbounds i16, ptr %y, i64 3
  %11 = load i16, ptr %arrayidx17, align 2
  %arrayidx20 = getelementptr inbounds i16, ptr %y, i64 %idxprom
  %12 = load i16, ptr %arrayidx20, align 2
  %arrayidx23 = getelementptr inbounds i16, ptr %y, i64 %idxprom6
  %13 = load i16, ptr %arrayidx23, align 2
  %arrayidx26 = getelementptr inbounds i16, ptr %y, i64 %idxprom9
  %14 = load i16, ptr %arrayidx26, align 2
  %arrayidx29 = getelementptr inbounds i16, ptr %y, i64 %idxprom12
  %15 = load i16, ptr %arrayidx29, align 2
  %mul = mul i16 %8, %0
  %mul36 = mul i16 %9, %1
  %add37 = add i16 %mul36, %mul
  %mul42 = mul i16 %11, %3
  %add43 = add i16 %add37, %mul42
  %mul48 = mul i16 %10, %2
  %add49 = add i16 %add43, %mul48
  %mul54 = mul i16 %13, %5
  %add55 = add i16 %add49, %mul54
  %mul60 = mul i16 %12, %4
  %add61 = add i16 %add55, %mul60
  %mul66 = mul i16 %15, %7
  %add67 = add i16 %add61, %mul66
  %mul72 = mul i16 %14, %6
  %add73 = add i16 %add67, %mul72
  ret i16 %add73
}

define i32 @reduce_blockstrided4x4(ptr nocapture noundef readonly %p1, i32 noundef %off1, ptr nocapture noundef readonly %p2, i32 noundef %off2) {
; CHECK-LABEL: @reduce_blockstrided4x4(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[OFF1:%.*]] to i64
; CHECK-NEXT:    [[IDX_EXT63:%.*]] = sext i32 [[OFF2:%.*]] to i64
; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[P1:%.*]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i8, ptr [[P2:%.*]], i64 4
; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IDX_EXT]]
; CHECK-NEXT:    [[ADD_PTR64:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IDX_EXT63]]
; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64]], i64 4
; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[P1]], align 1
; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[P2]], align 1
; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3]], align 1
; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5]], align 1
; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i8>, ptr [[ADD_PTR]], align 1
; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i8>, ptr [[ADD_PTR64]], align 1
; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP0]], <4 x i8> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP6]], <16 x i8> [[TMP7]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i8> [[TMP5]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <16 x i8> [[TMP8]], <16 x i8> [[TMP9]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT:    [[TMP11:%.*]] = zext <16 x i8> [[TMP10]] to <16 x i32>
; CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_1]], align 1
; CHECK-NEXT:    [[TMP13:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_1]], align 1
; CHECK-NEXT:    [[TMP14:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> [[TMP12]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP15:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP16:%.*]] = shufflevector <16 x i8> [[TMP14]], <16 x i8> [[TMP15]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP17:%.*]] = shufflevector <4 x i8> [[TMP13]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP18:%.*]] = shufflevector <16 x i8> [[TMP16]], <16 x i8> [[TMP17]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT:    [[TMP19:%.*]] = zext <16 x i8> [[TMP18]] to <16 x i32>
; CHECK-NEXT:    [[TMP20:%.*]] = mul nuw nsw <16 x i32> [[TMP11]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP20]])
; CHECK-NEXT:    ret i32 [[TMP21]]
;
entry:
  %idx.ext = sext i32 %off1 to i64
  %idx.ext63 = sext i32 %off2 to i64

  %0 = load i8, ptr %p1, align 1
  %conv = zext i8 %0 to i32
  %1 = load i8, ptr %p2, align 1
  %conv2 = zext i8 %1 to i32
  %arrayidx3 = getelementptr inbounds i8, ptr %p1, i64 4
  %2 = load i8, ptr %arrayidx3, align 1
  %conv4 = zext i8 %2 to i32
  %arrayidx5 = getelementptr inbounds i8, ptr %p2, i64 4
  %3 = load i8, ptr %arrayidx5, align 1
  %conv6 = zext i8 %3 to i32
  %arrayidx8 = getelementptr inbounds i8, ptr %p1, i64 1
  %4 = load i8, ptr %arrayidx8, align 1
  %conv9 = zext i8 %4 to i32
  %arrayidx10 = getelementptr inbounds i8, ptr %p2, i64 1
  %5 = load i8, ptr %arrayidx10, align 1
  %conv11 = zext i8 %5 to i32
  %arrayidx13 = getelementptr inbounds i8, ptr %p1, i64 5
  %6 = load i8, ptr %arrayidx13, align 1
  %conv14 = zext i8 %6 to i32
  %arrayidx15 = getelementptr inbounds i8, ptr %p2, i64 5
  %7 = load i8, ptr %arrayidx15, align 1
  %conv16 = zext i8 %7 to i32
  %arrayidx20 = getelementptr inbounds i8, ptr %p1, i64 2
  %8 = load i8, ptr %arrayidx20, align 1
  %conv21 = zext i8 %8 to i32
  %arrayidx22 = getelementptr inbounds i8, ptr %p2, i64 2
  %9 = load i8, ptr %arrayidx22, align 1
  %conv23 = zext i8 %9 to i32
  %arrayidx25 = getelementptr inbounds i8, ptr %p1, i64 6
  %10 = load i8, ptr %arrayidx25, align 1
  %conv26 = zext i8 %10 to i32
  %arrayidx27 = getelementptr inbounds i8, ptr %p2, i64 6
  %11 = load i8, ptr %arrayidx27, align 1
  %conv28 = zext i8 %11 to i32
  %arrayidx32 = getelementptr inbounds i8, ptr %p1, i64 3
  %12 = load i8, ptr %arrayidx32, align 1
  %conv33 = zext i8 %12 to i32
  %arrayidx34 = getelementptr inbounds i8, ptr %p2, i64 3
  %13 = load i8, ptr %arrayidx34, align 1
  %conv35 = zext i8 %13 to i32
  %arrayidx37 = getelementptr inbounds i8, ptr %p1, i64 7
  %14 = load i8, ptr %arrayidx37, align 1
  %conv38 = zext i8 %14 to i32
  %arrayidx39 = getelementptr inbounds i8, ptr %p2, i64 7
  %15 = load i8, ptr %arrayidx39, align 1
  %conv40 = zext i8 %15 to i32
  %add.ptr = getelementptr inbounds i8, ptr %p1, i64 %idx.ext
  %16 = load i8, ptr %add.ptr, align 1
  %conv.1 = zext i8 %16 to i32
  %add.ptr64 = getelementptr inbounds i8, ptr %p2, i64 %idx.ext63
  %17 = load i8, ptr %add.ptr64, align 1
  %conv2.1 = zext i8 %17 to i32
  %arrayidx3.1 = getelementptr inbounds i8, ptr %add.ptr, i64 4
  %18 = load i8, ptr %arrayidx3.1, align 1
  %conv4.1 = zext i8 %18 to i32
  %arrayidx5.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 4
  %19 = load i8, ptr %arrayidx5.1, align 1
  %conv6.1 = zext i8 %19 to i32
  %arrayidx8.1 = getelementptr inbounds i8, ptr %add.ptr, i64 1
  %20 = load i8, ptr %arrayidx8.1, align 1
  %conv9.1 = zext i8 %20 to i32
  %arrayidx10.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 1
  %21 = load i8, ptr %arrayidx10.1, align 1
  %conv11.1 = zext i8 %21 to i32
  %arrayidx13.1 = getelementptr inbounds i8, ptr %add.ptr, i64 5
  %22 = load i8, ptr %arrayidx13.1, align 1
  %conv14.1 = zext i8 %22 to i32
  %arrayidx15.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 5
  %23 = load i8, ptr %arrayidx15.1, align 1
  %conv16.1 = zext i8 %23 to i32
  %arrayidx20.1 = getelementptr inbounds i8, ptr %add.ptr, i64 2
  %24 = load i8, ptr %arrayidx20.1, align 1
  %conv21.1 = zext i8 %24 to i32
  %arrayidx22.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 2
  %25 = load i8, ptr %arrayidx22.1, align 1
  %conv23.1 = zext i8 %25 to i32
  %arrayidx25.1 = getelementptr inbounds i8, ptr %add.ptr, i64 6
  %26 = load i8, ptr %arrayidx25.1, align 1
  %conv26.1 = zext i8 %26 to i32
  %arrayidx27.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 6
  %27 = load i8, ptr %arrayidx27.1, align 1
  %conv28.1 = zext i8 %27 to i32
  %arrayidx32.1 = getelementptr inbounds i8, ptr %add.ptr, i64 3
  %28 = load i8, ptr %arrayidx32.1, align 1
  %conv33.1 = zext i8 %28 to i32
  %arrayidx34.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 3
  %29 = load i8, ptr %arrayidx34.1, align 1
  %conv35.1 = zext i8 %29 to i32
  %arrayidx37.1 = getelementptr inbounds i8, ptr %add.ptr, i64 7
  %30 = load i8, ptr %arrayidx37.1, align 1
  %conv38.1 = zext i8 %30 to i32
  %arrayidx39.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 7
  %31 = load i8, ptr %arrayidx39.1, align 1
  %conv40.1 = zext i8 %31 to i32
  %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
  %32 = load i8, ptr %add.ptr.1, align 1
  %conv.2 = zext i8 %32 to i32
  %add.ptr64.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 %idx.ext63
  %33 = load i8, ptr %add.ptr64.1, align 1
  %conv2.2 = zext i8 %33 to i32
  %arrayidx3.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 4
  %34 = load i8, ptr %arrayidx3.2, align 1
  %conv4.2 = zext i8 %34 to i32
  %arrayidx5.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 4
  %35 = load i8, ptr %arrayidx5.2, align 1
  %conv6.2 = zext i8 %35 to i32
  %arrayidx8.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 1
  %36 = load i8, ptr %arrayidx8.2, align 1
  %conv9.2 = zext i8 %36 to i32
  %arrayidx10.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 1
  %37 = load i8, ptr %arrayidx10.2, align 1
  %conv11.2 = zext i8 %37 to i32
  %arrayidx13.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 5
  %38 = load i8, ptr %arrayidx13.2, align 1
  %conv14.2 = zext i8 %38 to i32
  %arrayidx15.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 5
  %39 = load i8, ptr %arrayidx15.2, align 1
  %conv16.2 = zext i8 %39 to i32
  %arrayidx20.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 2
  %40 = load i8, ptr %arrayidx20.2, align 1
  %conv21.2 = zext i8 %40 to i32
  %arrayidx22.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 2
  %41 = load i8, ptr %arrayidx22.2, align 1
  %conv23.2 = zext i8 %41 to i32
  %arrayidx25.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 6
  %42 = load i8, ptr %arrayidx25.2, align 1
  %conv26.2 = zext i8 %42 to i32
  %arrayidx27.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 6
  %43 = load i8, ptr %arrayidx27.2, align 1
  %conv28.2 = zext i8 %43 to i32
  %arrayidx32.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 3
  %44 = load i8, ptr %arrayidx32.2, align 1
  %conv33.2 = zext i8 %44 to i32
  %arrayidx34.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 3
  %45 = load i8, ptr %arrayidx34.2, align 1
  %conv35.2 = zext i8 %45 to i32
  %arrayidx37.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 7
  %46 = load i8, ptr %arrayidx37.2, align 1
  %conv38.2 = zext i8 %46 to i32
  %arrayidx39.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 7
  %47 = load i8, ptr %arrayidx39.2, align 1
  %conv40.2 = zext i8 %47 to i32
  %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
  %48 = load i8, ptr %add.ptr.2, align 1
  %conv.3 = zext i8 %48 to i32
  %add.ptr64.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 %idx.ext63
  %49 = load i8, ptr %add.ptr64.2, align 1
  %conv2.3 = zext i8 %49 to i32
  %arrayidx3.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 4
  %50 = load i8, ptr %arrayidx3.3, align 1
  %conv4.3 = zext i8 %50 to i32
  %arrayidx5.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 4
  %51 = load i8, ptr %arrayidx5.3, align 1
  %conv6.3 = zext i8 %51 to i32
  %arrayidx8.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 1
  %52 = load i8, ptr %arrayidx8.3, align 1
  %conv9.3 = zext i8 %52 to i32
  %arrayidx10.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 1
  %53 = load i8, ptr %arrayidx10.3, align 1
  %conv11.3 = zext i8 %53 to i32
  %arrayidx13.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 5
  %54 = load i8, ptr %arrayidx13.3, align 1
  %conv14.3 = zext i8 %54 to i32
  %arrayidx15.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 5
  %55 = load i8, ptr %arrayidx15.3, align 1
  %conv16.3 = zext i8 %55 to i32
  %arrayidx20.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 2
  %56 = load i8, ptr %arrayidx20.3, align 1
  %conv21.3 = zext i8 %56 to i32
  %arrayidx22.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 2
  %57 = load i8, ptr %arrayidx22.3, align 1
  %conv23.3 = zext i8 %57 to i32
  %arrayidx25.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 6
  %58 = load i8, ptr %arrayidx25.3, align 1
  %conv26.3 = zext i8 %58 to i32
  %arrayidx27.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 6
  %59 = load i8, ptr %arrayidx27.3, align 1
  %conv28.3 = zext i8 %59 to i32
  %arrayidx32.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 3
  %60 = load i8, ptr %arrayidx32.3, align 1
  %conv33.3 = zext i8 %60 to i32
  %arrayidx34.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 3
  %61 = load i8, ptr %arrayidx34.3, align 1
  %conv35.3 = zext i8 %61 to i32
  %arrayidx37.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 7
  %62 = load i8, ptr %arrayidx37.3, align 1
  %conv38.3 = zext i8 %62 to i32
  %arrayidx39.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 7
  %63 = load i8, ptr %arrayidx39.3, align 1
  %conv40.3 = zext i8 %63 to i32

  %m1 = mul i32 %conv, %conv4
  %m2 = mul i32 %conv9, %conv14
  %m3 = mul i32 %conv21, %conv26
  %m4 = mul i32 %conv33, %conv38
  %m8 = mul i32 %conv2, %conv6
  %m7 = mul i32 %conv11, %conv16
  %m6 = mul i32 %conv23, %conv28
  %m5 = mul i32 %conv35, %conv40
  %m9 = mul i32 %conv.1, %conv4.1
  %m10 = mul i32 %conv9.1, %conv14.1
  %m11 = mul i32 %conv21.1, %conv26.1
  %m12 = mul i32 %conv33.1, %conv38.1
  %m16 = mul i32 %conv2.1, %conv6.1
  %m15 = mul i32 %conv11.1, %conv16.1
  %m14 = mul i32 %conv23.1, %conv28.1
  %m13 = mul i32 %conv35.1, %conv40.1

  %a2 = add i32 %m1, %m2
  %a3 = add i32 %a2, %m3
  %a4 = add i32 %a3, %m4
  %a5 = add i32 %a4, %m5
  %a6 = add i32 %a5, %m6
  %a7 = add i32 %a6, %m7
  %a8 = add i32 %a7, %m8
  %a9 = add i32 %a8, %m9
  %a10 = add i32 %a9, %m10
  %a11 = add i32 %a10, %m11
  %a12 = add i32 %a11, %m12
  %a13 = add i32 %a12, %m13
  %a14 = add i32 %a13, %m14
  %a15 = add i32 %a14, %m15
  %a16 = add i32 %a15, %m16
  ret i32 %a16
}

define void @store_blockstrided3(ptr nocapture noundef readonly %x, ptr nocapture noundef readonly %y, ptr nocapture noundef writeonly %z, i32 noundef %stride) {
; CHECK-LABEL: @store_blockstrided3(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i64 8
; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT:    [[ADD4:%.*]] = add nsw i32 [[STRIDE:%.*]], 1
; CHECK-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[ADD4]] to i64
; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM5]]
; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i32 [[STRIDE]], 1
; CHECK-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[MUL]] to i64
; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM11]]
; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4
; CHECK-NEXT:    [[ADD14:%.*]] = or disjoint i32 [[MUL]], 1
; CHECK-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[ADD14]] to i64
; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM15]]
; CHECK-NEXT:    [[MUL21:%.*]] = mul nsw i32 [[STRIDE]], 3
; CHECK-NEXT:    [[IDXPROM23:%.*]] = sext i32 [[MUL21]] to i64
; CHECK-NEXT:    [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM23]]
; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX24]], align 4
; CHECK-NEXT:    [[ADD26:%.*]] = add nsw i32 [[MUL21]], 1
; CHECK-NEXT:    [[IDXPROM27:%.*]] = sext i32 [[ADD26]] to i64
; CHECK-NEXT:    [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM27]]
; CHECK-NEXT:    [[ARRAYIDX35:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i64 8
; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX35]], align 4
; CHECK-NEXT:    [[ARRAYIDX41:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM5]]
; CHECK-NEXT:    [[ARRAYIDX48:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM11]]
; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX48]], align 4
; CHECK-NEXT:    [[ARRAYIDX52:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM15]]
; CHECK-NEXT:    [[ARRAYIDX60:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM23]]
; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX60]], align 4
; CHECK-NEXT:    [[ARRAYIDX64:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM27]]
; CHECK-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds i8, ptr [[Z:%.*]], i64 4
; CHECK-NEXT:    [[MUL73:%.*]] = mul nsw i32 [[TMP3]], [[TMP0]]
; CHECK-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds i8, ptr [[Z]], i64 24
; CHECK-NEXT:    [[TMP6:%.*]] = load <2 x i32>, ptr [[X]], align 4
; CHECK-NEXT:    [[TMP7:%.*]] = load <2 x i32>, ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT:    [[TMP8:%.*]] = load <2 x i32>, ptr [[Y]], align 4
; CHECK-NEXT:    [[TMP9:%.*]] = load <2 x i32>, ptr [[ARRAYIDX41]], align 4
; CHECK-NEXT:    [[TMP10:%.*]] = mul nsw <2 x i32> [[TMP8]], [[TMP6]]
; CHECK-NEXT:    [[TMP11:%.*]] = mul nsw <2 x i32> [[TMP9]], [[TMP7]]
; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <2 x i32> [[TMP10]], <2 x i32> [[TMP11]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
; CHECK-NEXT:    [[MUL81:%.*]] = mul nsw i32 [[TMP4]], [[TMP1]]
; CHECK-NEXT:    [[ARRAYIDX82:%.*]] = getelementptr inbounds i8, ptr [[Z]], i64 32
; CHECK-NEXT:    [[TMP13:%.*]] = load <2 x i32>, ptr [[ARRAYIDX16]], align 4
; CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX52]], align 4
; CHECK-NEXT:    [[TMP15:%.*]] = mul nsw <2 x i32> [[TMP14]], [[TMP13]]
; CHECK-NEXT:    [[TMP16:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT:    [[MUL87:%.*]] = mul nsw i32 [[TMP5]], [[TMP2]]
; CHECK-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds i8, ptr [[Z]], i64 44
; CHECK-NEXT:    [[ARRAYIDX92:%.*]] = getelementptr inbounds i8, ptr [[Z]], i64 36
; CHECK-NEXT:    [[TMP17:%.*]] = load <2 x i32>, ptr [[ARRAYIDX28]], align 4
; CHECK-NEXT:    [[TMP18:%.*]] = load <2 x i32>, ptr [[ARRAYIDX64]], align 4
; CHECK-NEXT:    store i32 [[MUL73]], ptr [[Z]], align 4
; CHECK-NEXT:    store <4 x i32> [[TMP12]], ptr [[ARRAYIDX72]], align 4
; CHECK-NEXT:    store i32 [[MUL81]], ptr [[ARRAYIDX82]], align 4
; CHECK-NEXT:    store <2 x i32> [[TMP16]], ptr [[ARRAYIDX76]], align 4
; CHECK-NEXT:    store i32 [[MUL87]], ptr [[ARRAYIDX88]], align 4
; CHECK-NEXT:    [[TMP19:%.*]] = mul nsw <2 x i32> [[TMP18]], [[TMP17]]
; CHECK-NEXT:    [[TMP20:%.*]] = shufflevector <2 x i32> [[TMP19]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT:    store <2 x i32> [[TMP20]], ptr [[ARRAYIDX92]], align 4
; CHECK-NEXT:    ret void
;
entry:
  %0 = load i32, ptr %x, align 4
  %arrayidx1 = getelementptr inbounds i32, ptr %x, i64 1
  %1 = load i32, ptr %arrayidx1, align 4
  %arrayidx2 = getelementptr inbounds i32, ptr %x, i64 2
  %2 = load i32, ptr %arrayidx2, align 4
  %add4 = add nsw i32 %stride, 1
  %idxprom5 = sext i32 %add4 to i64
  %arrayidx6 = getelementptr inbounds i32, ptr %x, i64 %idxprom5
  %3 = load i32, ptr %arrayidx6, align 4
  %add7 = add nsw i32 %stride, 2
  %idxprom8 = sext i32 %add7 to i64
  %arrayidx9 = getelementptr inbounds i32, ptr %x, i64 %idxprom8
  %4 = load i32, ptr %arrayidx9, align 4
  %mul = shl nsw i32 %stride, 1
  %idxprom11 = sext i32 %mul to i64
  %arrayidx12 = getelementptr inbounds i32, ptr %x, i64 %idxprom11
  %5 = load i32, ptr %arrayidx12, align 4
  %add14 = or disjoint i32 %mul, 1
  %idxprom15 = sext i32 %add14 to i64
  %arrayidx16 = getelementptr inbounds i32, ptr %x, i64 %idxprom15
  %6 = load i32, ptr %arrayidx16, align 4
  %add18 = add nsw i32 %mul, 2
  %idxprom19 = sext i32 %add18 to i64
  %arrayidx20 = getelementptr inbounds i32, ptr %x, i64 %idxprom19
  %7 = load i32, ptr %arrayidx20, align 4
  %mul21 = mul nsw i32 %stride, 3
  %idxprom23 = sext i32 %mul21 to i64
  %arrayidx24 = getelementptr inbounds i32, ptr %x, i64 %idxprom23
  %8 = load i32, ptr %arrayidx24, align 4
  %add26 = add nsw i32 %mul21, 1
  %idxprom27 = sext i32 %add26 to i64
  %arrayidx28 = getelementptr inbounds i32, ptr %x, i64 %idxprom27
  %9 = load i32, ptr %arrayidx28, align 4
  %add30 = add nsw i32 %mul21, 2
  %idxprom31 = sext i32 %add30 to i64
  %arrayidx32 = getelementptr inbounds i32, ptr %x, i64 %idxprom31
  %10 = load i32, ptr %arrayidx32, align 4
  %11 = load i32, ptr %y, align 4
  %arrayidx34 = getelementptr inbounds i32, ptr %y, i64 1
  %12 = load i32, ptr %arrayidx34, align 4
  %arrayidx35 = getelementptr inbounds i32, ptr %y, i64 2
  %13 = load i32, ptr %arrayidx35, align 4
  %arrayidx41 = getelementptr inbounds i32, ptr %y, i64 %idxprom5
  %14 = load i32, ptr %arrayidx41, align 4
  %arrayidx44 = getelementptr inbounds i32, ptr %y, i64 %idxprom8
  %15 = load i32, ptr %arrayidx44, align 4
  %arrayidx48 = getelementptr inbounds i32, ptr %y, i64 %idxprom11
  %16 = load i32, ptr %arrayidx48, align 4
  %arrayidx52 = getelementptr inbounds i32, ptr %y, i64 %idxprom15
  %17 = load i32, ptr %arrayidx52, align 4
  %arrayidx56 = getelementptr inbounds i32, ptr %y, i64 %idxprom19
  %18 = load i32, ptr %arrayidx56, align 4
  %arrayidx60 = getelementptr inbounds i32, ptr %y, i64 %idxprom23
  %19 = load i32, ptr %arrayidx60, align 4
  %arrayidx64 = getelementptr inbounds i32, ptr %y, i64 %idxprom27
  %20 = load i32, ptr %arrayidx64, align 4
  %arrayidx68 = getelementptr inbounds i32, ptr %y, i64 %idxprom31
  %21 = load i32, ptr %arrayidx68, align 4
  %mul69 = mul nsw i32 %11, %0
  %arrayidx70 = getelementptr inbounds i32, ptr %z, i64 2
  store i32 %mul69, ptr %arrayidx70, align 4
  %mul71 = mul nsw i32 %12, %1
  %arrayidx72 = getelementptr inbounds i32, ptr %z, i64 1
  store i32 %mul71, ptr %arrayidx72, align 4
  %mul73 = mul nsw i32 %13, %2
  store i32 %mul73, ptr %z, align 4
  %arrayidx76 = getelementptr inbounds i32, ptr %z, i64 6
  %mul77 = mul nsw i32 %14, %3
  %arrayidx78 = getelementptr inbounds i32, ptr %z, i64 4
  store i32 %mul77, ptr %arrayidx78, align 4
  %mul79 = mul nsw i32 %15, %4
  %arrayidx80 = getelementptr inbounds i32, ptr %z, i64 3
  store i32 %mul79, ptr %arrayidx80, align 4
  %mul81 = mul nsw i32 %16, %5
  %arrayidx82 = getelementptr inbounds i32, ptr %z, i64 8
  store i32 %mul81, ptr %arrayidx82, align 4
  %mul83 = mul nsw i32 %17, %6
  %arrayidx84 = getelementptr inbounds i32, ptr %z, i64 7
  store i32 %mul83, ptr %arrayidx84, align 4
  %mul85 = mul nsw i32 %18, %7
  store i32 %mul85, ptr %arrayidx76, align 4
  %mul87 = mul nsw i32 %19, %8
  %arrayidx88 = getelementptr inbounds i32, ptr %z, i64 11
  store i32 %mul87, ptr %arrayidx88, align 4
  %mul89 = mul nsw i32 %20, %9
  %arrayidx90 = getelementptr inbounds i32, ptr %z, i64 10
  store i32 %mul89, ptr %arrayidx90, align 4
  %mul91 = mul nsw i32 %21, %10
  %arrayidx92 = getelementptr inbounds i32, ptr %z, i64 9
  store i32 %mul91, ptr %arrayidx92, align 4
  ret void
}

define void @store_blockstrided4(ptr nocapture noundef readonly %x, ptr nocapture noundef readonly %y, i32 noundef %stride, ptr %dst0) {
; CHECK-LABEL: @store_blockstrided4(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[X]], align 2
; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 2
; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i16>, ptr [[Y]], align 2
; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX20]], align 2
; CHECK-NEXT:    [[TMP4:%.*]] = mul <4 x i16> [[TMP2]], [[TMP0]]
; CHECK-NEXT:    [[TMP5:%.*]] = mul <4 x i16> [[TMP3]], [[TMP1]]
; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
; CHECK-NEXT:    store <8 x i16> [[TMP6]], ptr [[DST0:%.*]], align 2
; CHECK-NEXT:    ret void
;
entry:
  %0 = load i16, ptr %x, align 2
  %arrayidx1 = getelementptr inbounds i16, ptr %x, i64 1
  %1 = load i16, ptr %arrayidx1, align 2
  %arrayidx2 = getelementptr inbounds i16, ptr %x, i64 2
  %2 = load i16, ptr %arrayidx2, align 2
  %arrayidx3 = getelementptr inbounds i16, ptr %x, i64 3
  %3 = load i16, ptr %arrayidx3, align 2
  %idxprom = sext i32 %stride to i64
  %arrayidx4 = getelementptr inbounds i16, ptr %x, i64 %idxprom
  %4 = load i16, ptr %arrayidx4, align 2
  %add5 = add nsw i32 %stride, 1
  %idxprom6 = sext i32 %add5 to i64
  %arrayidx7 = getelementptr inbounds i16, ptr %x, i64 %idxprom6
  %5 = load i16, ptr %arrayidx7, align 2
  %add8 = add nsw i32 %stride, 2
  %idxprom9 = sext i32 %add8 to i64
  %arrayidx10 = getelementptr inbounds i16, ptr %x, i64 %idxprom9
  %6 = load i16, ptr %arrayidx10, align 2
  %add11 = add nsw i32 %stride, 3
  %idxprom12 = sext i32 %add11 to i64
  %arrayidx13 = getelementptr inbounds i16, ptr %x, i64 %idxprom12
  %7 = load i16, ptr %arrayidx13, align 2
  %8 = load i16, ptr %y, align 2
  %arrayidx15 = getelementptr inbounds i16, ptr %y, i64 1
  %9 = load i16, ptr %arrayidx15, align 2
  %arrayidx16 = getelementptr inbounds i16, ptr %y, i64 2
  %10 = load i16, ptr %arrayidx16, align 2
  %arrayidx17 = getelementptr inbounds i16, ptr %y, i64 3
  %11 = load i16, ptr %arrayidx17, align 2
  %arrayidx20 = getelementptr inbounds i16, ptr %y, i64 %idxprom
  %12 = load i16, ptr %arrayidx20, align 2
  %arrayidx23 = getelementptr inbounds i16, ptr %y, i64 %idxprom6
  %13 = load i16, ptr %arrayidx23, align 2
  %arrayidx26 = getelementptr inbounds i16, ptr %y, i64 %idxprom9
  %14 = load i16, ptr %arrayidx26, align 2
  %arrayidx29 = getelementptr inbounds i16, ptr %y, i64 %idxprom12
  %15 = load i16, ptr %arrayidx29, align 2
  %mul = mul i16 %8, %0
  %mul36 = mul i16 %9, %1
  %mul42 = mul i16 %11, %3
  %mul48 = mul i16 %10, %2
  %mul54 = mul i16 %13, %5
  %mul60 = mul i16 %12, %4
  %mul66 = mul i16 %15, %7
  %mul72 = mul i16 %14, %6
  %dst1 = getelementptr inbounds i16, ptr %dst0, i64 1
  %dst2 = getelementptr inbounds i16, ptr %dst0, i64 2
  %dst3 = getelementptr inbounds i16, ptr %dst0, i64 3
  %dst4 = getelementptr inbounds i16, ptr %dst0, i64 4
  %dst5 = getelementptr inbounds i16, ptr %dst0, i64 5
  %dst6 = getelementptr inbounds i16, ptr %dst0, i64 6
  %dst7 = getelementptr inbounds i16, ptr %dst0, i64 7
  store i16 %mul, ptr %dst0
  store i16 %mul36, ptr %dst1
  store i16 %mul42, ptr %dst2
  store i16 %mul48, ptr %dst3
  store i16 %mul54, ptr %dst4
  store i16 %mul60, ptr %dst5
  store i16 %mul66, ptr %dst6
  store i16 %mul72, ptr %dst7
  ret void
}

define void @store_blockstrided4x4(ptr nocapture noundef readonly %p1, i32 noundef %off1, ptr nocapture noundef readonly %p2, i32 noundef %off2, ptr %dst0) {
; CHECK-LABEL: @store_blockstrided4x4(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[OFF1:%.*]] to i64
; CHECK-NEXT:    [[IDX_EXT63:%.*]] = sext i32 [[OFF2:%.*]] to i64
; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[P1:%.*]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i8, ptr [[P2:%.*]], i64 4
; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IDX_EXT]]
; CHECK-NEXT:    [[ADD_PTR64:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IDX_EXT63]]
; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64]], i64 4
; CHECK-NEXT:    [[DST4:%.*]] = getelementptr inbounds i8, ptr [[DST0:%.*]], i64 16
; CHECK-NEXT:    [[DST8:%.*]] = getelementptr inbounds i8, ptr [[DST0]], i64 32
; CHECK-NEXT:    [[DST12:%.*]] = getelementptr inbounds i8, ptr [[DST0]], i64 48
; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[P1]], align 1
; CHECK-NEXT:    [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i32>
; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3]], align 1
; CHECK-NEXT:    [[TMP3:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i32>
; CHECK-NEXT:    [[TMP4:%.*]] = mul nuw nsw <4 x i32> [[TMP1]], [[TMP3]]
; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i8>, ptr [[P2]], align 1
; CHECK-NEXT:    [[TMP6:%.*]] = zext <4 x i8> [[TMP5]] to <4 x i32>
; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5]], align 1
; CHECK-NEXT:    [[TMP8:%.*]] = zext <4 x i8> [[TMP7]] to <4 x i32>
; CHECK-NEXT:    [[TMP9:%.*]] = mul nuw nsw <4 x i32> [[TMP6]], [[TMP8]]
; CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i8>, ptr [[ADD_PTR]], align 1
; CHECK-NEXT:    [[TMP11:%.*]] = zext <4 x i8> [[TMP10]] to <4 x i32>
; CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_1]], align 1
; CHECK-NEXT:    [[TMP13:%.*]] = zext <4 x i8> [[TMP12]] to <4 x i32>
; CHECK-NEXT:    [[TMP14:%.*]] = mul nuw nsw <4 x i32> [[TMP11]], [[TMP13]]
; CHECK-NEXT:    [[TMP15:%.*]] = load <4 x i8>, ptr [[ADD_PTR64]], align 1
; CHECK-NEXT:    [[TMP16:%.*]] = zext <4 x i8> [[TMP15]] to <4 x i32>
; CHECK-NEXT:    [[TMP17:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_1]], align 1
; CHECK-NEXT:    [[TMP18:%.*]] = zext <4 x i8> [[TMP17]] to <4 x i32>
; CHECK-NEXT:    [[TMP19:%.*]] = mul nuw nsw <4 x i32> [[TMP16]], [[TMP18]]
; CHECK-NEXT:    store <4 x i32> [[TMP4]], ptr [[DST0]], align 4
; CHECK-NEXT:    store <4 x i32> [[TMP9]], ptr [[DST4]], align 4
; CHECK-NEXT:    store <4 x i32> [[TMP14]], ptr [[DST8]], align 4
; CHECK-NEXT:    store <4 x i32> [[TMP19]], ptr [[DST12]], align 4
; CHECK-NEXT:    ret void
;
entry:
  %idx.ext = sext i32 %off1 to i64
  %idx.ext63 = sext i32 %off2 to i64

  %0 = load i8, ptr %p1, align 1
  %conv = zext i8 %0 to i32
  %1 = load i8, ptr %p2, align 1
  %conv2 = zext i8 %1 to i32
  %arrayidx3 = getelementptr inbounds i8, ptr %p1, i64 4
  %2 = load i8, ptr %arrayidx3, align 1
  %conv4 = zext i8 %2 to i32
  %arrayidx5 = getelementptr inbounds i8, ptr %p2, i64 4
  %3 = load i8, ptr %arrayidx5, align 1
  %conv6 = zext i8 %3 to i32
  %arrayidx8 = getelementptr inbounds i8, ptr %p1, i64 1
  %4 = load i8, ptr %arrayidx8, align 1
  %conv9 = zext i8 %4 to i32
  %arrayidx10 = getelementptr inbounds i8, ptr %p2, i64 1
  %5 = load i8, ptr %arrayidx10, align 1
  %conv11 = zext i8 %5 to i32
  %arrayidx13 = getelementptr inbounds i8, ptr %p1, i64 5
  %6 = load i8, ptr %arrayidx13, align 1
  %conv14 = zext i8 %6 to i32
  %arrayidx15 = getelementptr inbounds i8, ptr %p2, i64 5
  %7 = load i8, ptr %arrayidx15, align 1
  %conv16 = zext i8 %7 to i32
  %arrayidx20 = getelementptr inbounds i8, ptr %p1, i64 2
  %8 = load i8, ptr %arrayidx20, align 1
  %conv21 = zext i8 %8 to i32
  %arrayidx22 = getelementptr inbounds i8, ptr %p2, i64 2
  %9 = load i8, ptr %arrayidx22, align 1
  %conv23 = zext i8 %9 to i32
  %arrayidx25 = getelementptr inbounds i8, ptr %p1, i64 6
  %10 = load i8, ptr %arrayidx25, align 1
  %conv26 = zext i8 %10 to i32
  %arrayidx27 = getelementptr inbounds i8, ptr %p2, i64 6
  %11 = load i8, ptr %arrayidx27, align 1
  %conv28 = zext i8 %11 to i32
  %arrayidx32 = getelementptr inbounds i8, ptr %p1, i64 3
  %12 = load i8, ptr %arrayidx32, align 1
  %conv33 = zext i8 %12 to i32
  %arrayidx34 = getelementptr inbounds i8, ptr %p2, i64 3
  %13 = load i8, ptr %arrayidx34, align 1
  %conv35 = zext i8 %13 to i32
  %arrayidx37 = getelementptr inbounds i8, ptr %p1, i64 7
  %14 = load i8, ptr %arrayidx37, align 1
  %conv38 = zext i8 %14 to i32
  %arrayidx39 = getelementptr inbounds i8, ptr %p2, i64 7
  %15 = load i8, ptr %arrayidx39, align 1
  %conv40 = zext i8 %15 to i32
  %add.ptr = getelementptr inbounds i8, ptr %p1, i64 %idx.ext
  %16 = load i8, ptr %add.ptr, align 1
  %conv.1 = zext i8 %16 to i32
  %add.ptr64 = getelementptr inbounds i8, ptr %p2, i64 %idx.ext63
  %17 = load i8, ptr %add.ptr64, align 1
  %conv2.1 = zext i8 %17 to i32
  %arrayidx3.1 = getelementptr inbounds i8, ptr %add.ptr, i64 4
  %18 = load i8, ptr %arrayidx3.1, align 1
  %conv4.1 = zext i8 %18 to i32
  %arrayidx5.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 4
  %19 = load i8, ptr %arrayidx5.1, align 1
  %conv6.1 = zext i8 %19 to i32
  %arrayidx8.1 = getelementptr inbounds i8, ptr %add.ptr, i64 1
  %20 = load i8, ptr %arrayidx8.1, align 1
  %conv9.1 = zext i8 %20 to i32
  %arrayidx10.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 1
  %21 = load i8, ptr %arrayidx10.1, align 1
  %conv11.1 = zext i8 %21 to i32
  %arrayidx13.1 = getelementptr inbounds i8, ptr %add.ptr, i64 5
  %22 = load i8, ptr %arrayidx13.1, align 1
  %conv14.1 = zext i8 %22 to i32
  %arrayidx15.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 5
  %23 = load i8, ptr %arrayidx15.1, align 1
  %conv16.1 = zext i8 %23 to i32
  %arrayidx20.1 = getelementptr inbounds i8, ptr %add.ptr, i64 2
  %24 = load i8, ptr %arrayidx20.1, align 1
  %conv21.1 = zext i8 %24 to i32
  %arrayidx22.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 2
  %25 = load i8, ptr %arrayidx22.1, align 1
  %conv23.1 = zext i8 %25 to i32
  %arrayidx25.1 = getelementptr inbounds i8, ptr %add.ptr, i64 6
  %26 = load i8, ptr %arrayidx25.1, align 1
  %conv26.1 = zext i8 %26 to i32
  %arrayidx27.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 6
  %27 = load i8, ptr %arrayidx27.1, align 1
  %conv28.1 = zext i8 %27 to i32
  %arrayidx32.1 = getelementptr inbounds i8, ptr %add.ptr, i64 3
  %28 = load i8, ptr %arrayidx32.1, align 1
  %conv33.1 = zext i8 %28 to i32
  %arrayidx34.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 3
  %29 = load i8, ptr %arrayidx34.1, align 1
  %conv35.1 = zext i8 %29 to i32
  %arrayidx37.1 = getelementptr inbounds i8, ptr %add.ptr, i64 7
  %30 = load i8, ptr %arrayidx37.1, align 1
  %conv38.1 = zext i8 %30 to i32
  %arrayidx39.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 7
  %31 = load i8, ptr %arrayidx39.1, align 1
  %conv40.1 = zext i8 %31 to i32
  %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
  %32 = load i8, ptr %add.ptr.1, align 1
  %conv.2 = zext i8 %32 to i32
  %add.ptr64.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 %idx.ext63
  %33 = load i8, ptr %add.ptr64.1, align 1
  %conv2.2 = zext i8 %33 to i32
  %arrayidx3.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 4
  %34 = load i8, ptr %arrayidx3.2, align 1
  %conv4.2 = zext i8 %34 to i32
  %arrayidx5.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 4
  %35 = load i8, ptr %arrayidx5.2, align 1
  %conv6.2 = zext i8 %35 to i32
  %arrayidx8.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 1
  %36 = load i8, ptr %arrayidx8.2, align 1
  %conv9.2 = zext i8 %36 to i32
  %arrayidx10.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 1
  %37 = load i8, ptr %arrayidx10.2, align 1
  %conv11.2 = zext i8 %37 to i32
  %arrayidx13.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 5
  %38 = load i8, ptr %arrayidx13.2, align 1
  %conv14.2 = zext i8 %38 to i32
  %arrayidx15.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 5
  %39 = load i8, ptr %arrayidx15.2, align 1
  %conv16.2 = zext i8 %39 to i32
  %arrayidx20.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 2
  %40 = load i8, ptr %arrayidx20.2, align 1
  %conv21.2 = zext i8 %40 to i32
  %arrayidx22.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 2
  %41 = load i8, ptr %arrayidx22.2, align 1
  %conv23.2 = zext i8 %41 to i32
  %arrayidx25.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 6
  %42 = load i8, ptr %arrayidx25.2, align 1
  %conv26.2 = zext i8 %42 to i32
  %arrayidx27.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 6
  %43 = load i8, ptr %arrayidx27.2, align 1
  %conv28.2 = zext i8 %43 to i32
  %arrayidx32.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 3
  %44 = load i8, ptr %arrayidx32.2, align 1
  %conv33.2 = zext i8 %44 to i32
  %arrayidx34.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 3
  %45 = load i8, ptr %arrayidx34.2, align 1
  %conv35.2 = zext i8 %45 to i32
  %arrayidx37.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 7
  %46 = load i8, ptr %arrayidx37.2, align 1
  %conv38.2 = zext i8 %46 to i32
  %arrayidx39.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 7
  %47 = load i8, ptr %arrayidx39.2, align 1
  %conv40.2 = zext i8 %47 to i32
  %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
  %48 = load i8, ptr %add.ptr.2, align 1
  %conv.3 = zext i8 %48 to i32
  %add.ptr64.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 %idx.ext63
  %49 = load i8, ptr %add.ptr64.2, align 1
  %conv2.3 = zext i8 %49 to i32
  %arrayidx3.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 4
  %50 = load i8, ptr %arrayidx3.3, align 1
  %conv4.3 = zext i8 %50 to i32
  %arrayidx5.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 4
  %51 = load i8, ptr %arrayidx5.3, align 1
  %conv6.3 = zext i8 %51 to i32
  %arrayidx8.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 1
  %52 = load i8, ptr %arrayidx8.3, align 1
  %conv9.3 = zext i8 %52 to i32
  %arrayidx10.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 1
  %53 = load i8, ptr %arrayidx10.3, align 1
  %conv11.3 = zext i8 %53 to i32
  %arrayidx13.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 5
  %54 = load i8, ptr %arrayidx13.3, align 1
  %conv14.3 = zext i8 %54 to i32
  %arrayidx15.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 5
  %55 = load i8, ptr %arrayidx15.3, align 1
  %conv16.3 = zext i8 %55 to i32
  %arrayidx20.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 2
  %56 = load i8, ptr %arrayidx20.3, align 1
  %conv21.3 = zext i8 %56 to i32
  %arrayidx22.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 2
  %57 = load i8, ptr %arrayidx22.3, align 1
  %conv23.3 = zext i8 %57 to i32
  %arrayidx25.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 6
  %58 = load i8, ptr %arrayidx25.3, align 1
  %conv26.3 = zext i8 %58 to i32
  %arrayidx27.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 6
  %59 = load i8, ptr %arrayidx27.3, align 1
  %conv28.3 = zext i8 %59 to i32
  %arrayidx32.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 3
  %60 = load i8, ptr %arrayidx32.3, align 1
  %conv33.3 = zext i8 %60 to i32
  %arrayidx34.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 3
  %61 = load i8, ptr %arrayidx34.3, align 1
  %conv35.3 = zext i8 %61 to i32
  %arrayidx37.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 7
  %62 = load i8, ptr %arrayidx37.3, align 1
  %conv38.3 = zext i8 %62 to i32
  %arrayidx39.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 7
  %63 = load i8, ptr %arrayidx39.3, align 1
  %conv40.3 = zext i8 %63 to i32

  %m1 = mul i32 %conv, %conv4
  %m2 = mul i32 %conv9, %conv14
  %m3 = mul i32 %conv21, %conv26
  %m4 = mul i32 %conv33, %conv38
  %m5 = mul i32 %conv2, %conv6
  %m6 = mul i32 %conv11, %conv16
  %m7 = mul i32 %conv23, %conv28
  %m8 = mul i32 %conv35, %conv40
  %m9 = mul i32 %conv.1, %conv4.1
  %m10 = mul i32 %conv9.1, %conv14.1
  %m11 = mul i32 %conv21.1, %conv26.1
  %m12 = mul i32 %conv33.1, %conv38.1
  %m13 = mul i32 %conv2.1, %conv6.1
  %m14 = mul i32 %conv11.1, %conv16.1
  %m15 = mul i32 %conv23.1, %conv28.1
  %m16 = mul i32 %conv35.1, %conv40.1

  %dst1 = getelementptr inbounds i32, ptr %dst0, i64 1
  %dst2 = getelementptr inbounds i32, ptr %dst0, i64 2
  %dst3 = getelementptr inbounds i32, ptr %dst0, i64 3
  %dst4 = getelementptr inbounds i32, ptr %dst0, i64 4
  %dst5 = getelementptr inbounds i32, ptr %dst0, i64 5
  %dst6 = getelementptr inbounds i32, ptr %dst0, i64 6
  %dst7 = getelementptr inbounds i32, ptr %dst0, i64 7
  %dst8 = getelementptr inbounds i32, ptr %dst0, i64 8
  %dst9 = getelementptr inbounds i32, ptr %dst0, i64 9
  %dst10 = getelementptr inbounds i32, ptr %dst0, i64 10
  %dst11 = getelementptr inbounds i32, ptr %dst0, i64 11
  %dst12 = getelementptr inbounds i32, ptr %dst0, i64 12
  %dst13 = getelementptr inbounds i32, ptr %dst0, i64 13
  %dst14 = getelementptr inbounds i32, ptr %dst0, i64 14
  %dst15 = getelementptr inbounds i32, ptr %dst0, i64 15
  store i32 %m1, ptr %dst0
  store i32 %m2, ptr %dst1
  store i32 %m3, ptr %dst2
  store i32 %m4, ptr %dst3
  store i32 %m5, ptr %dst4
  store i32 %m6, ptr %dst5
  store i32 %m7, ptr %dst6
  store i32 %m8, ptr %dst7
  store i32 %m9, ptr %dst8
  store i32 %m10, ptr %dst9
  store i32 %m11, ptr %dst10
  store i32 %m12, ptr %dst11
  store i32 %m13, ptr %dst12
  store i32 %m14, ptr %dst13
  store i32 %m15, ptr %dst14
  store i32 %m16, ptr %dst15
  ret void
}

define dso_local i32 @full(ptr nocapture noundef readonly %p1, i32 noundef %st1, ptr nocapture noundef readonly %p2, i32 noundef %st2) {
; CHECK-LABEL: @full(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[ST1:%.*]] to i64
; CHECK-NEXT:    [[IDX_EXT63:%.*]] = sext i32 [[ST2:%.*]] to i64
; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[P1:%.*]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i8, ptr [[P2:%.*]], i64 4
; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IDX_EXT]]
; CHECK-NEXT:    [[ADD_PTR64:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IDX_EXT63]]
; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64]], i64 4
; CHECK-NEXT:    [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
; CHECK-NEXT:    [[ADD_PTR64_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
; CHECK-NEXT:    [[ARRAYIDX3_2:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR_1]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5_2:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64_1]], i64 4
; CHECK-NEXT:    [[ADD_PTR_2:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR_1]], i64 [[IDX_EXT]]
; CHECK-NEXT:    [[ADD_PTR64_2:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64_1]], i64 [[IDX_EXT63]]
; CHECK-NEXT:    [[ARRAYIDX3_3:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR_2]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5_3:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64_2]], i64 4
; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[P1]], align 1
; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[P2]], align 1
; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3]], align 1
; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5]], align 1
; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i8>, ptr [[ADD_PTR]], align 1
; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i8>, ptr [[ADD_PTR64]], align 1
; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_1]], align 1
; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_1]], align 1
; CHECK-NEXT:    [[TMP8:%.*]] = load <4 x i8>, ptr [[ADD_PTR_1]], align 1
; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i8>, ptr [[ADD_PTR64_1]], align 1
; CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_2]], align 1
; CHECK-NEXT:    [[TMP11:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_2]], align 1
; CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i8>, ptr [[ADD_PTR_2]], align 1
; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <4 x i8> [[TMP0]], <4 x i8> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP14:%.*]] = shufflevector <4 x i8> [[TMP8]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP15:%.*]] = shufflevector <16 x i8> [[TMP13]], <16 x i8> [[TMP14]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP16:%.*]] = shufflevector <4 x i8> [[TMP12]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP17:%.*]] = shufflevector <16 x i8> [[TMP15]], <16 x i8> [[TMP16]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT:    [[TMP18:%.*]] = zext <16 x i8> [[TMP17]] to <16 x i32>
; CHECK-NEXT:    [[TMP19:%.*]] = load <4 x i8>, ptr [[ADD_PTR64_2]], align 1
; CHECK-NEXT:    [[TMP20:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP5]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP21:%.*]] = shufflevector <4 x i8> [[TMP9]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP22:%.*]] = shufflevector <16 x i8> [[TMP20]], <16 x i8> [[TMP21]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP23:%.*]] = shufflevector <4 x i8> [[TMP19]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP24:%.*]] = shufflevector <16 x i8> [[TMP22]], <16 x i8> [[TMP23]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT:    [[TMP25:%.*]] = zext <16 x i8> [[TMP24]] to <16 x i32>
; CHECK-NEXT:    [[TMP26:%.*]] = sub nsw <16 x i32> [[TMP18]], [[TMP25]]
; CHECK-NEXT:    [[TMP27:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_3]], align 1
; CHECK-NEXT:    [[TMP28:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> [[TMP6]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP29:%.*]] = shufflevector <4 x i8> [[TMP10]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP30:%.*]] = shufflevector <16 x i8> [[TMP28]], <16 x i8> [[TMP29]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP31:%.*]] = shufflevector <4 x i8> [[TMP27]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP32:%.*]] = shufflevector <16 x i8> [[TMP30]], <16 x i8> [[TMP31]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT:    [[TMP33:%.*]] = zext <16 x i8> [[TMP32]] to <16 x i32>
; CHECK-NEXT:    [[TMP34:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_3]], align 1
; CHECK-NEXT:    [[TMP35:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> [[TMP7]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP36:%.*]] = shufflevector <4 x i8> [[TMP11]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP37:%.*]] = shufflevector <16 x i8> [[TMP35]], <16 x i8> [[TMP36]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP38:%.*]] = shufflevector <4 x i8> [[TMP34]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP39:%.*]] = shufflevector <16 x i8> [[TMP37]], <16 x i8> [[TMP38]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT:    [[TMP40:%.*]] = zext <16 x i8> [[TMP39]] to <16 x i32>
; CHECK-NEXT:    [[TMP41:%.*]] = sub nsw <16 x i32> [[TMP33]], [[TMP40]]
; CHECK-NEXT:    [[TMP42:%.*]] = shl nsw <16 x i32> [[TMP41]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
; CHECK-NEXT:    [[TMP43:%.*]] = add nsw <16 x i32> [[TMP42]], [[TMP26]]
; CHECK-NEXT:    [[TMP44:%.*]] = shufflevector <16 x i32> [[TMP43]], <16 x i32> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
; CHECK-NEXT:    [[TMP45:%.*]] = add nsw <16 x i32> [[TMP43]], [[TMP44]]
; CHECK-NEXT:    [[TMP46:%.*]] = sub nsw <16 x i32> [[TMP43]], [[TMP44]]
; CHECK-NEXT:    [[TMP47:%.*]] = shufflevector <16 x i32> [[TMP45]], <16 x i32> [[TMP46]], <16 x i32> <i32 11, i32 15, i32 7, i32 3, i32 26, i32 30, i32 22, i32 18, i32 9, i32 13, i32 5, i32 1, i32 24, i32 28, i32 20, i32 16>
; CHECK-NEXT:    [[TMP48:%.*]] = shufflevector <16 x i32> [[TMP47]], <16 x i32> poison, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT:    [[TMP49:%.*]] = add nsw <16 x i32> [[TMP47]], [[TMP48]]
; CHECK-NEXT:    [[TMP50:%.*]] = sub nsw <16 x i32> [[TMP47]], [[TMP48]]
; CHECK-NEXT:    [[TMP51:%.*]] = shufflevector <16 x i32> [[TMP49]], <16 x i32> [[TMP50]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT:    [[TMP52:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
; CHECK-NEXT:    [[TMP53:%.*]] = sub nsw <16 x i32> [[TMP51]], [[TMP52]]
; CHECK-NEXT:    [[TMP54:%.*]] = add nsw <16 x i32> [[TMP51]], [[TMP52]]
; CHECK-NEXT:    [[TMP55:%.*]] = shufflevector <16 x i32> [[TMP53]], <16 x i32> [[TMP54]], <16 x i32> <i32 0, i32 17, i32 18, i32 3, i32 4, i32 21, i32 22, i32 7, i32 8, i32 25, i32 26, i32 11, i32 12, i32 29, i32 30, i32 15>
; CHECK-NEXT:    [[TMP56:%.*]] = shufflevector <16 x i32> [[TMP55]], <16 x i32> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
; CHECK-NEXT:    [[TMP57:%.*]] = add nsw <16 x i32> [[TMP55]], [[TMP56]]
; CHECK-NEXT:    [[TMP58:%.*]] = sub nsw <16 x i32> [[TMP55]], [[TMP56]]
; CHECK-NEXT:    [[TMP59:%.*]] = shufflevector <16 x i32> [[TMP57]], <16 x i32> [[TMP58]], <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 4, i32 5, i32 22, i32 23, i32 8, i32 9, i32 26, i32 27, i32 12, i32 13, i32 30, i32 31>
; CHECK-NEXT:    [[TMP60:%.*]] = lshr <16 x i32> [[TMP59]], <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
; CHECK-NEXT:    [[TMP61:%.*]] = and <16 x i32> [[TMP60]], <i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537>
; CHECK-NEXT:    [[TMP62:%.*]] = mul nuw <16 x i32> [[TMP61]], <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
; CHECK-NEXT:    [[TMP63:%.*]] = add <16 x i32> [[TMP62]], [[TMP59]]
; CHECK-NEXT:    [[TMP64:%.*]] = xor <16 x i32> [[TMP63]], [[TMP62]]
; CHECK-NEXT:    [[TMP65:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP64]])
; CHECK-NEXT:    [[CONV118:%.*]] = and i32 [[TMP65]], 65535
; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[TMP65]], 16
; CHECK-NEXT:    [[ADD119:%.*]] = add nuw nsw i32 [[CONV118]], [[SHR]]
; CHECK-NEXT:    [[SHR120:%.*]] = lshr i32 [[ADD119]], 1
; CHECK-NEXT:    ret i32 [[SHR120]]
;
entry:
  %idx.ext = sext i32 %st1 to i64
  %idx.ext63 = sext i32 %st2 to i64
  %0 = load i8, ptr %p1, align 1
  %conv = zext i8 %0 to i32
  %1 = load i8, ptr %p2, align 1
  %conv2 = zext i8 %1 to i32
  %sub = sub nsw i32 %conv, %conv2
  %arrayidx3 = getelementptr inbounds i8, ptr %p1, i64 4
  %2 = load i8, ptr %arrayidx3, align 1
  %conv4 = zext i8 %2 to i32
  %arrayidx5 = getelementptr inbounds i8, ptr %p2, i64 4
  %3 = load i8, ptr %arrayidx5, align 1
  %conv6 = zext i8 %3 to i32
  %sub7 = sub nsw i32 %conv4, %conv6
  %shl = shl nsw i32 %sub7, 16
  %add = add nsw i32 %shl, %sub
  %arrayidx8 = getelementptr inbounds i8, ptr %p1, i64 1
  %4 = load i8, ptr %arrayidx8, align 1
  %conv9 = zext i8 %4 to i32
  %arrayidx10 = getelementptr inbounds i8, ptr %p2, i64 1
  %5 = load i8, ptr %arrayidx10, align 1
  %conv11 = zext i8 %5 to i32
  %sub12 = sub nsw i32 %conv9, %conv11
  %arrayidx13 = getelementptr inbounds i8, ptr %p1, i64 5
  %6 = load i8, ptr %arrayidx13, align 1
  %conv14 = zext i8 %6 to i32
  %arrayidx15 = getelementptr inbounds i8, ptr %p2, i64 5
  %7 = load i8, ptr %arrayidx15, align 1
  %conv16 = zext i8 %7 to i32
  %sub17 = sub nsw i32 %conv14, %conv16
  %shl18 = shl nsw i32 %sub17, 16
  %add19 = add nsw i32 %shl18, %sub12
  %arrayidx20 = getelementptr inbounds i8, ptr %p1, i64 2
  %8 = load i8, ptr %arrayidx20, align 1
  %conv21 = zext i8 %8 to i32
  %arrayidx22 = getelementptr inbounds i8, ptr %p2, i64 2
  %9 = load i8, ptr %arrayidx22, align 1
  %conv23 = zext i8 %9 to i32
  %sub24 = sub nsw i32 %conv21, %conv23
  %arrayidx25 = getelementptr inbounds i8, ptr %p1, i64 6
  %10 = load i8, ptr %arrayidx25, align 1
  %conv26 = zext i8 %10 to i32
  %arrayidx27 = getelementptr inbounds i8, ptr %p2, i64 6
  %11 = load i8, ptr %arrayidx27, align 1
  %conv28 = zext i8 %11 to i32
  %sub29 = sub nsw i32 %conv26, %conv28
  %shl30 = shl nsw i32 %sub29, 16
  %add31 = add nsw i32 %shl30, %sub24
  %arrayidx32 = getelementptr inbounds i8, ptr %p1, i64 3
  %12 = load i8, ptr %arrayidx32, align 1
  %conv33 = zext i8 %12 to i32
  %arrayidx34 = getelementptr inbounds i8, ptr %p2, i64 3
  %13 = load i8, ptr %arrayidx34, align 1
  %conv35 = zext i8 %13 to i32
  %sub36 = sub nsw i32 %conv33, %conv35
  %arrayidx37 = getelementptr inbounds i8, ptr %p1, i64 7
  %14 = load i8, ptr %arrayidx37, align 1
  %conv38 = zext i8 %14 to i32
  %arrayidx39 = getelementptr inbounds i8, ptr %p2, i64 7
  %15 = load i8, ptr %arrayidx39, align 1
  %conv40 = zext i8 %15 to i32
  %sub41 = sub nsw i32 %conv38, %conv40
  %shl42 = shl nsw i32 %sub41, 16
  %add43 = add nsw i32 %shl42, %sub36
  %add44 = add nsw i32 %add19, %add
  %sub45 = sub nsw i32 %add, %add19
  %add46 = add nsw i32 %add43, %add31
  %sub47 = sub nsw i32 %add31, %add43
  %add48 = add nsw i32 %add46, %add44
  %sub51 = sub nsw i32 %add44, %add46
  %add55 = add nsw i32 %sub47, %sub45
  %sub59 = sub nsw i32 %sub45, %sub47
  %add.ptr = getelementptr inbounds i8, ptr %p1, i64 %idx.ext
  %add.ptr64 = getelementptr inbounds i8, ptr %p2, i64 %idx.ext63
  %16 = load i8, ptr %add.ptr, align 1
  %conv.1 = zext i8 %16 to i32
  %17 = load i8, ptr %add.ptr64, align 1
  %conv2.1 = zext i8 %17 to i32
  %sub.1 = sub nsw i32 %conv.1, %conv2.1
  %arrayidx3.1 = getelementptr inbounds i8, ptr %add.ptr, i64 4
  %18 = load i8, ptr %arrayidx3.1, align 1
  %conv4.1 = zext i8 %18 to i32
  %arrayidx5.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 4
  %19 = load i8, ptr %arrayidx5.1, align 1
  %conv6.1 = zext i8 %19 to i32
  %sub7.1 = sub nsw i32 %conv4.1, %conv6.1
  %shl.1 = shl nsw i32 %sub7.1, 16
  %add.1 = add nsw i32 %shl.1, %sub.1
  %arrayidx8.1 = getelementptr inbounds i8, ptr %add.ptr, i64 1
  %20 = load i8, ptr %arrayidx8.1, align 1
  %conv9.1 = zext i8 %20 to i32
  %arrayidx10.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 1
  %21 = load i8, ptr %arrayidx10.1, align 1
  %conv11.1 = zext i8 %21 to i32
  %sub12.1 = sub nsw i32 %conv9.1, %conv11.1
  %arrayidx13.1 = getelementptr inbounds i8, ptr %add.ptr, i64 5
  %22 = load i8, ptr %arrayidx13.1, align 1
  %conv14.1 = zext i8 %22 to i32
  %arrayidx15.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 5
  %23 = load i8, ptr %arrayidx15.1, align 1
  %conv16.1 = zext i8 %23 to i32
  %sub17.1 = sub nsw i32 %conv14.1, %conv16.1
  %shl18.1 = shl nsw i32 %sub17.1, 16
  %add19.1 = add nsw i32 %shl18.1, %sub12.1
  %arrayidx20.1 = getelementptr inbounds i8, ptr %add.ptr, i64 2
  %24 = load i8, ptr %arrayidx20.1, align 1
  %conv21.1 = zext i8 %24 to i32
  %arrayidx22.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 2
  %25 = load i8, ptr %arrayidx22.1, align 1
  %conv23.1 = zext i8 %25 to i32
  %sub24.1 = sub nsw i32 %conv21.1, %conv23.1
  %arrayidx25.1 = getelementptr inbounds i8, ptr %add.ptr, i64 6
  %26 = load i8, ptr %arrayidx25.1, align 1
  %conv26.1 = zext i8 %26 to i32
  %arrayidx27.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 6
  %27 = load i8, ptr %arrayidx27.1, align 1
  %conv28.1 = zext i8 %27 to i32
  %sub29.1 = sub nsw i32 %conv26.1, %conv28.1
  %shl30.1 = shl nsw i32 %sub29.1, 16
  %add31.1 = add nsw i32 %shl30.1, %sub24.1
  %arrayidx32.1 = getelementptr inbounds i8, ptr %add.ptr, i64 3
  %28 = load i8, ptr %arrayidx32.1, align 1
  %conv33.1 = zext i8 %28 to i32
  %arrayidx34.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 3
  %29 = load i8, ptr %arrayidx34.1, align 1
  %conv35.1 = zext i8 %29 to i32
  %sub36.1 = sub nsw i32 %conv33.1, %conv35.1
  %arrayidx37.1 = getelementptr inbounds i8, ptr %add.ptr, i64 7
  %30 = load i8, ptr %arrayidx37.1, align 1
  %conv38.1 = zext i8 %30 to i32
  %arrayidx39.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 7
  %31 = load i8, ptr %arrayidx39.1, align 1
  %conv40.1 = zext i8 %31 to i32
  %sub41.1 = sub nsw i32 %conv38.1, %conv40.1
  %shl42.1 = shl nsw i32 %sub41.1, 16
  %add43.1 = add nsw i32 %shl42.1, %sub36.1
  %add44.1 = add nsw i32 %add19.1, %add.1
  %sub45.1 = sub nsw i32 %add.1, %add19.1
  %add46.1 = add nsw i32 %add43.1, %add31.1
  %sub47.1 = sub nsw i32 %add31.1, %add43.1
  %add48.1 = add nsw i32 %add46.1, %add44.1
  %sub51.1 = sub nsw i32 %add44.1, %add46.1
  %add55.1 = add nsw i32 %sub47.1, %sub45.1
  %sub59.1 = sub nsw i32 %sub45.1, %sub47.1
  %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
  %add.ptr64.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 %idx.ext63
  %32 = load i8, ptr %add.ptr.1, align 1
  %conv.2 = zext i8 %32 to i32
  %33 = load i8, ptr %add.ptr64.1, align 1
  %conv2.2 = zext i8 %33 to i32
  %sub.2 = sub nsw i32 %conv.2, %conv2.2
  %arrayidx3.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 4
  %34 = load i8, ptr %arrayidx3.2, align 1
  %conv4.2 = zext i8 %34 to i32
  %arrayidx5.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 4
  %35 = load i8, ptr %arrayidx5.2, align 1
  %conv6.2 = zext i8 %35 to i32
  %sub7.2 = sub nsw i32 %conv4.2, %conv6.2
  %shl.2 = shl nsw i32 %sub7.2, 16
  %add.2 = add nsw i32 %shl.2, %sub.2
  %arrayidx8.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 1
  %36 = load i8, ptr %arrayidx8.2, align 1
  %conv9.2 = zext i8 %36 to i32
  %arrayidx10.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 1
  %37 = load i8, ptr %arrayidx10.2, align 1
  %conv11.2 = zext i8 %37 to i32
  %sub12.2 = sub nsw i32 %conv9.2, %conv11.2
  %arrayidx13.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 5
  %38 = load i8, ptr %arrayidx13.2, align 1
  %conv14.2 = zext i8 %38 to i32
  %arrayidx15.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 5
  %39 = load i8, ptr %arrayidx15.2, align 1
  %conv16.2 = zext i8 %39 to i32
  %sub17.2 = sub nsw i32 %conv14.2, %conv16.2
  %shl18.2 = shl nsw i32 %sub17.2, 16
  %add19.2 = add nsw i32 %shl18.2, %sub12.2
  %arrayidx20.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 2
  %40 = load i8, ptr %arrayidx20.2, align 1
  %conv21.2 = zext i8 %40 to i32
  %arrayidx22.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 2
  %41 = load i8, ptr %arrayidx22.2, align 1
  %conv23.2 = zext i8 %41 to i32
  %sub24.2 = sub nsw i32 %conv21.2, %conv23.2
  %arrayidx25.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 6
  %42 = load i8, ptr %arrayidx25.2, align 1
  %conv26.2 = zext i8 %42 to i32
  %arrayidx27.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 6
  %43 = load i8, ptr %arrayidx27.2, align 1
  %conv28.2 = zext i8 %43 to i32
  %sub29.2 = sub nsw i32 %conv26.2, %conv28.2
  %shl30.2 = shl nsw i32 %sub29.2, 16
  %add31.2 = add nsw i32 %shl30.2, %sub24.2
  %arrayidx32.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 3
  %44 = load i8, ptr %arrayidx32.2, align 1
  %conv33.2 = zext i8 %44 to i32
  %arrayidx34.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 3
  %45 = load i8, ptr %arrayidx34.2, align 1
  %conv35.2 = zext i8 %45 to i32
  %sub36.2 = sub nsw i32 %conv33.2, %conv35.2
  %arrayidx37.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 7
  %46 = load i8, ptr %arrayidx37.2, align 1
  %conv38.2 = zext i8 %46 to i32
  %arrayidx39.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 7
  %47 = load i8, ptr %arrayidx39.2, align 1
  %conv40.2 = zext i8 %47 to i32
  %sub41.2 = sub nsw i32 %conv38.2, %conv40.2
  %shl42.2 = shl nsw i32 %sub41.2, 16
  %add43.2 = add nsw i32 %shl42.2, %sub36.2
  %add44.2 = add nsw i32 %add19.2, %add.2
  %sub45.2 = sub nsw i32 %add.2, %add19.2
  %add46.2 = add nsw i32 %add43.2, %add31.2
  %sub47.2 = sub nsw i32 %add31.2, %add43.2
  %add48.2 = add nsw i32 %add46.2, %add44.2
  %sub51.2 = sub nsw i32 %add44.2, %add46.2
  %add55.2 = add nsw i32 %sub47.2, %sub45.2
  %sub59.2 = sub nsw i32 %sub45.2, %sub47.2
  %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
  %add.ptr64.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 %idx.ext63
  %48 = load i8, ptr %add.ptr.2, align 1
  %conv.3 = zext i8 %48 to i32
  %49 = load i8, ptr %add.ptr64.2, align 1
  %conv2.3 = zext i8 %49 to i32
  %sub.3 = sub nsw i32 %conv.3, %conv2.3
  %arrayidx3.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 4
  %50 = load i8, ptr %arrayidx3.3, align 1
  %conv4.3 = zext i8 %50 to i32
  %arrayidx5.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 4
  %51 = load i8, ptr %arrayidx5.3, align 1
  %conv6.3 = zext i8 %51 to i32
  %sub7.3 = sub nsw i32 %conv4.3, %conv6.3
  %shl.3 = shl nsw i32 %sub7.3, 16
  %add.3 = add nsw i32 %shl.3, %sub.3
  %arrayidx8.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 1
  %52 = load i8, ptr %arrayidx8.3, align 1
  %conv9.3 = zext i8 %52 to i32
  %arrayidx10.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 1
  %53 = load i8, ptr %arrayidx10.3, align 1
  %conv11.3 = zext i8 %53 to i32
  %sub12.3 = sub nsw i32 %conv9.3, %conv11.3
  %arrayidx13.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 5
  %54 = load i8, ptr %arrayidx13.3, align 1
  %conv14.3 = zext i8 %54 to i32
  %arrayidx15.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 5
  %55 = load i8, ptr %arrayidx15.3, align 1
  %conv16.3 = zext i8 %55 to i32
  %sub17.3 = sub nsw i32 %conv14.3, %conv16.3
  %shl18.3 = shl nsw i32 %sub17.3, 16
  %add19.3 = add nsw i32 %shl18.3, %sub12.3
  %arrayidx20.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 2
  %56 = load i8, ptr %arrayidx20.3, align 1
  %conv21.3 = zext i8 %56 to i32
  %arrayidx22.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 2
  %57 = load i8, ptr %arrayidx22.3, align 1
  %conv23.3 = zext i8 %57 to i32
  %sub24.3 = sub nsw i32 %conv21.3, %conv23.3
  %arrayidx25.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 6
  %58 = load i8, ptr %arrayidx25.3, align 1
  %conv26.3 = zext i8 %58 to i32
  %arrayidx27.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 6
  %59 = load i8, ptr %arrayidx27.3, align 1
  %conv28.3 = zext i8 %59 to i32
  %sub29.3 = sub nsw i32 %conv26.3, %conv28.3
  %shl30.3 = shl nsw i32 %sub29.3, 16
  %add31.3 = add nsw i32 %shl30.3, %sub24.3
  %arrayidx32.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 3
  %60 = load i8, ptr %arrayidx32.3, align 1
  %conv33.3 = zext i8 %60 to i32
  %arrayidx34.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 3
  %61 = load i8, ptr %arrayidx34.3, align 1
  %conv35.3 = zext i8 %61 to i32
  %sub36.3 = sub nsw i32 %conv33.3, %conv35.3
  %arrayidx37.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 7
  %62 = load i8, ptr %arrayidx37.3, align 1
  %conv38.3 = zext i8 %62 to i32
  %arrayidx39.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 7
  %63 = load i8, ptr %arrayidx39.3, align 1
  %conv40.3 = zext i8 %63 to i32
  %sub41.3 = sub nsw i32 %conv38.3, %conv40.3
  %shl42.3 = shl nsw i32 %sub41.3, 16
  %add43.3 = add nsw i32 %shl42.3, %sub36.3
  %add44.3 = add nsw i32 %add19.3, %add.3
  %sub45.3 = sub nsw i32 %add.3, %add19.3
  %add46.3 = add nsw i32 %add43.3, %add31.3
  %sub47.3 = sub nsw i32 %add31.3, %add43.3
  %add48.3 = add nsw i32 %add46.3, %add44.3
  %sub51.3 = sub nsw i32 %add44.3, %add46.3
  %add55.3 = add nsw i32 %sub47.3, %sub45.3
  %sub59.3 = sub nsw i32 %sub45.3, %sub47.3
  %add78 = add nsw i32 %add48.1, %add48
  %sub86 = sub nsw i32 %add48, %add48.1
  %add94 = add nsw i32 %add48.3, %add48.2
  %sub102 = sub nsw i32 %add48.2, %add48.3
  %add103 = add nsw i32 %add94, %add78
  %sub104 = sub nsw i32 %add78, %add94
  %add105 = add nsw i32 %sub102, %sub86
  %sub106 = sub nsw i32 %sub86, %sub102
  %shr.i = lshr i32 %add103, 15
  %and.i = and i32 %shr.i, 65537
  %mul.i = mul nuw i32 %and.i, 65535
  %add.i = add i32 %mul.i, %add103
  %xor.i = xor i32 %add.i, %mul.i
  %shr.i184 = lshr i32 %add105, 15
  %and.i185 = and i32 %shr.i184, 65537
  %mul.i186 = mul nuw i32 %and.i185, 65535
  %add.i187 = add i32 %mul.i186, %add105
  %xor.i188 = xor i32 %add.i187, %mul.i186
  %shr.i189 = lshr i32 %sub104, 15
  %and.i190 = and i32 %shr.i189, 65537
  %mul.i191 = mul nuw i32 %and.i190, 65535
  %add.i192 = add i32 %mul.i191, %sub104
  %xor.i193 = xor i32 %add.i192, %mul.i191
  %shr.i194 = lshr i32 %sub106, 15
  %and.i195 = and i32 %shr.i194, 65537
  %mul.i196 = mul nuw i32 %and.i195, 65535
  %add.i197 = add i32 %mul.i196, %sub106
  %xor.i198 = xor i32 %add.i197, %mul.i196
  %add110 = add i32 %xor.i188, %xor.i
  %add112 = add i32 %add110, %xor.i193
  %add113 = add i32 %add112, %xor.i198
  %add78.1 = add nsw i32 %add55.1, %add55
  %sub86.1 = sub nsw i32 %add55, %add55.1
  %add94.1 = add nsw i32 %add55.3, %add55.2
  %sub102.1 = sub nsw i32 %add55.2, %add55.3
  %add103.1 = add nsw i32 %add94.1, %add78.1
  %sub104.1 = sub nsw i32 %add78.1, %add94.1
  %add105.1 = add nsw i32 %sub102.1, %sub86.1
  %sub106.1 = sub nsw i32 %sub86.1, %sub102.1
  %shr.i.1 = lshr i32 %add103.1, 15
  %and.i.1 = and i32 %shr.i.1, 65537
  %mul.i.1 = mul nuw i32 %and.i.1, 65535
  %add.i.1 = add i32 %mul.i.1, %add103.1
  %xor.i.1 = xor i32 %add.i.1, %mul.i.1
  %shr.i184.1 = lshr i32 %add105.1, 15
  %and.i185.1 = and i32 %shr.i184.1, 65537
  %mul.i186.1 = mul nuw i32 %and.i185.1, 65535
  %add.i187.1 = add i32 %mul.i186.1, %add105.1
  %xor.i188.1 = xor i32 %add.i187.1, %mul.i186.1
  %shr.i189.1 = lshr i32 %sub104.1, 15
  %and.i190.1 = and i32 %shr.i189.1, 65537
  %mul.i191.1 = mul nuw i32 %and.i190.1, 65535
  %add.i192.1 = add i32 %mul.i191.1, %sub104.1
  %xor.i193.1 = xor i32 %add.i192.1, %mul.i191.1
  %shr.i194.1 = lshr i32 %sub106.1, 15
  %and.i195.1 = and i32 %shr.i194.1, 65537
  %mul.i196.1 = mul nuw i32 %and.i195.1, 65535
  %add.i197.1 = add i32 %mul.i196.1, %sub106.1
  %xor.i198.1 = xor i32 %add.i197.1, %mul.i196.1
  %add108.1 = add i32 %xor.i188.1, %add113
  %add110.1 = add i32 %add108.1, %xor.i.1
  %add112.1 = add i32 %add110.1, %xor.i193.1
  %add113.1 = add i32 %add112.1, %xor.i198.1
  %add78.2 = add nsw i32 %sub51.1, %sub51
  %sub86.2 = sub nsw i32 %sub51, %sub51.1
  %add94.2 = add nsw i32 %sub51.3, %sub51.2
  %sub102.2 = sub nsw i32 %sub51.2, %sub51.3
  %add103.2 = add nsw i32 %add94.2, %add78.2
  %sub104.2 = sub nsw i32 %add78.2, %add94.2
  %add105.2 = add nsw i32 %sub102.2, %sub86.2
  %sub106.2 = sub nsw i32 %sub86.2, %sub102.2
  %shr.i.2 = lshr i32 %add103.2, 15
  %and.i.2 = and i32 %shr.i.2, 65537
  %mul.i.2 = mul nuw i32 %and.i.2, 65535
  %add.i.2 = add i32 %mul.i.2, %add103.2
  %xor.i.2 = xor i32 %add.i.2, %mul.i.2
  %shr.i184.2 = lshr i32 %add105.2, 15
  %and.i185.2 = and i32 %shr.i184.2, 65537
  %mul.i186.2 = mul nuw i32 %and.i185.2, 65535
  %add.i187.2 = add i32 %mul.i186.2, %add105.2
  %xor.i188.2 = xor i32 %add.i187.2, %mul.i186.2
  %shr.i189.2 = lshr i32 %sub104.2, 15
  %and.i190.2 = and i32 %shr.i189.2, 65537
  %mul.i191.2 = mul nuw i32 %and.i190.2, 65535
  %add.i192.2 = add i32 %mul.i191.2, %sub104.2
  %xor.i193.2 = xor i32 %add.i192.2, %mul.i191.2
  %shr.i194.2 = lshr i32 %sub106.2, 15
  %and.i195.2 = and i32 %shr.i194.2, 65537
  %mul.i196.2 = mul nuw i32 %and.i195.2, 65535
  %add.i197.2 = add i32 %mul.i196.2, %sub106.2
  %xor.i198.2 = xor i32 %add.i197.2, %mul.i196.2
  %add108.2 = add i32 %xor.i188.2, %add113.1
  %add110.2 = add i32 %add108.2, %xor.i.2
  %add112.2 = add i32 %add110.2, %xor.i193.2
  %add113.2 = add i32 %add112.2, %xor.i198.2
  %add78.3 = add nsw i32 %sub59.1, %sub59
  %sub86.3 = sub nsw i32 %sub59, %sub59.1
  %add94.3 = add nsw i32 %sub59.3, %sub59.2
  %sub102.3 = sub nsw i32 %sub59.2, %sub59.3
  %add103.3 = add nsw i32 %add94.3, %add78.3
  %sub104.3 = sub nsw i32 %add78.3, %add94.3
  %add105.3 = add nsw i32 %sub102.3, %sub86.3
  %sub106.3 = sub nsw i32 %sub86.3, %sub102.3
  %shr.i.3 = lshr i32 %add103.3, 15
  %and.i.3 = and i32 %shr.i.3, 65537
  %mul.i.3 = mul nuw i32 %and.i.3, 65535
  %add.i.3 = add i32 %mul.i.3, %add103.3
  %xor.i.3 = xor i32 %add.i.3, %mul.i.3
  %shr.i184.3 = lshr i32 %add105.3, 15
  %and.i185.3 = and i32 %shr.i184.3, 65537
  %mul.i186.3 = mul nuw i32 %and.i185.3, 65535
  %add.i187.3 = add i32 %mul.i186.3, %add105.3
  %xor.i188.3 = xor i32 %add.i187.3, %mul.i186.3
  %shr.i189.3 = lshr i32 %sub104.3, 15
  %and.i190.3 = and i32 %shr.i189.3, 65537
  %mul.i191.3 = mul nuw i32 %and.i190.3, 65535
  %add.i192.3 = add i32 %mul.i191.3, %sub104.3
  %xor.i193.3 = xor i32 %add.i192.3, %mul.i191.3
  %shr.i194.3 = lshr i32 %sub106.3, 15
  %and.i195.3 = and i32 %shr.i194.3, 65537
  %mul.i196.3 = mul nuw i32 %and.i195.3, 65535
  %add.i197.3 = add i32 %mul.i196.3, %sub106.3
  %xor.i198.3 = xor i32 %add.i197.3, %mul.i196.3
  %add108.3 = add i32 %xor.i188.3, %add113.2
  %add110.3 = add i32 %add108.3, %xor.i.3
  %add112.3 = add i32 %add110.3, %xor.i193.3
  %add113.3 = add i32 %add112.3, %xor.i198.3
  %conv118 = and i32 %add113.3, 65535
  %shr = lshr i32 %add113.3, 16
  %add119 = add nuw nsw i32 %conv118, %shr
  %shr120 = lshr i32 %add119, 1
  ret i32 %shr120
}