llvm/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
; RUN: opt -S -mtriple riscv64-unknown-linux-gnu < %s --passes=slp-vectorizer -mattr=+v -slp-threshold=-20 | FileCheck %s
; RUN: opt -S -mtriple riscv64-unknown-linux-gnu < %s --passes=slp-vectorizer -mattr=+v -slp-threshold=-15 | FileCheck %s --check-prefix=THR15

define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.ptr, ptr %add.ptr64) {
; CHECK-LABEL: define i32 @test(
; CHECK-SAME: ptr [[PIX1:%.*]], ptr [[PIX2:%.*]], i64 [[IDX_EXT:%.*]], i64 [[IDX_EXT63:%.*]], ptr [[ADD_PTR:%.*]], ptr [[ADD_PTR64:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[PIX1]], align 1
; CHECK-NEXT:    [[CONV1:%.*]] = zext i8 [[TMP0]] to i32
; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i8, ptr [[PIX1]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr i8, ptr [[PIX2]], i64 4
; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr i8, ptr [[PIX1]], i64 1
; CHECK-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 1
; CHECK-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr i8, ptr [[PIX1]], i64 5
; CHECK-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr i8, ptr [[PIX2]], i64 5
; CHECK-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr i8, ptr [[PIX1]], i64 3
; CHECK-NEXT:    [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX32]], align 1
; CHECK-NEXT:    [[CONV33:%.*]] = zext i8 [[TMP10]] to i32
; CHECK-NEXT:    [[ADD_PTR3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 [[IDX_EXT]]
; CHECK-NEXT:    [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
; CHECK-NEXT:    [[TMP11:%.*]] = load i8, ptr [[ADD_PTR3]], align 1
; CHECK-NEXT:    [[CONV_1:%.*]] = zext i8 [[TMP11]] to i32
; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 4
; CHECK-NEXT:    [[ARRAYIDX8_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 1
; CHECK-NEXT:    [[ARRAYIDX32_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 2
; CHECK-NEXT:    [[TMP14:%.*]] = load i8, ptr [[ARRAYIDX32_1]], align 1
; CHECK-NEXT:    [[ARRAYIDX25_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 6
; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX25_1]], align 1
; CHECK-NEXT:    [[ARRAYIDX27_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 6
; CHECK-NEXT:    [[TMP5:%.*]] = load i8, ptr [[ARRAYIDX27_1]], align 1
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i8> poison, i8 [[TMP4]], i32 0
; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <2 x i8> [[TMP6]], i8 [[TMP5]], i32 1
; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x i8> [[TMP7]], i32 0
; CHECK-NEXT:    [[TMP9:%.*]] = zext i8 [[TMP8]] to i32
; CHECK-NEXT:    [[ARRAYIDX32_2:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 3
; CHECK-NEXT:    [[ARRAYIDX34_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 3
; CHECK-NEXT:    [[TMP18:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX32_2]], i64 4, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP20:%.*]] = zext <2 x i8> [[TMP18]] to <2 x i16>
; CHECK-NEXT:    [[TMP12:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX34_1]], i64 4, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP13:%.*]] = zext <2 x i8> [[TMP12]] to <2 x i16>
; CHECK-NEXT:    [[TMP28:%.*]] = sub <2 x i16> [[TMP20]], [[TMP13]]
; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <2 x i16> [[TMP28]], i32 1
; CHECK-NEXT:    [[TMP16:%.*]] = sext i16 [[TMP15]] to i32
; CHECK-NEXT:    [[TMP17:%.*]] = extractelement <2 x i16> [[TMP28]], i32 0
; CHECK-NEXT:    [[CONV33_1:%.*]] = sext i16 [[TMP17]] to i32
; CHECK-NEXT:    [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
; CHECK-NEXT:    [[ADD_PTR64_1:%.*]] = getelementptr i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
; CHECK-NEXT:    [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
; CHECK-NEXT:    [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
; CHECK-NEXT:    [[ARRAYIDX8_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 1
; CHECK-NEXT:    [[ARRAYIDX10_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 1
; CHECK-NEXT:    [[ARRAYIDX13_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 5
; CHECK-NEXT:    [[ARRAYIDX15_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 5
; CHECK-NEXT:    [[TMP19:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP29:%.*]] = load i8, ptr [[ADD_PTR_1]], align 1
; CHECK-NEXT:    [[TMP21:%.*]] = zext <2 x i8> [[TMP19]] to <2 x i32>
; CHECK-NEXT:    [[TMP84:%.*]] = zext i8 [[TMP29]] to i32
; CHECK-NEXT:    [[TMP22:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR64_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP23:%.*]] = zext <2 x i8> [[TMP22]] to <2 x i32>
; CHECK-NEXT:    [[TMP30:%.*]] = sub <2 x i32> [[TMP21]], [[TMP23]]
; CHECK-NEXT:    [[TMP42:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP26:%.*]] = zext <2 x i8> [[TMP42]] to <2 x i32>
; CHECK-NEXT:    [[TMP27:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP49:%.*]] = zext <2 x i8> [[TMP27]] to <2 x i32>
; CHECK-NEXT:    [[TMP24:%.*]] = sub <2 x i32> [[TMP26]], [[TMP49]]
; CHECK-NEXT:    [[TMP25:%.*]] = shl <2 x i32> [[TMP24]], <i32 16, i32 16>
; CHECK-NEXT:    [[TMP31:%.*]] = add <2 x i32> [[TMP25]], [[TMP30]]
; CHECK-NEXT:    [[TMP32:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX8_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP33:%.*]] = load i8, ptr [[ARRAYIDX8_2]], align 1
; CHECK-NEXT:    [[TMP50:%.*]] = zext <2 x i8> [[TMP32]] to <2 x i32>
; CHECK-NEXT:    [[TMP83:%.*]] = zext i8 [[TMP33]] to i32
; CHECK-NEXT:    [[TMP35:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX10_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP51:%.*]] = zext <2 x i8> [[TMP35]] to <2 x i32>
; CHECK-NEXT:    [[TMP52:%.*]] = sub <2 x i32> [[TMP50]], [[TMP51]]
; CHECK-NEXT:    [[TMP38:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX13_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP39:%.*]] = zext <2 x i8> [[TMP38]] to <2 x i32>
; CHECK-NEXT:    [[TMP40:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX15_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP56:%.*]] = zext <2 x i8> [[TMP40]] to <2 x i32>
; CHECK-NEXT:    [[TMP36:%.*]] = sub <2 x i32> [[TMP39]], [[TMP56]]
; CHECK-NEXT:    [[TMP37:%.*]] = shl <2 x i32> [[TMP36]], <i32 16, i32 16>
; CHECK-NEXT:    [[TMP59:%.*]] = add <2 x i32> [[TMP37]], [[TMP52]]
; CHECK-NEXT:    [[TMP63:%.*]] = add <2 x i32> [[TMP59]], [[TMP31]]
; CHECK-NEXT:    [[TMP72:%.*]] = sub <2 x i32> [[TMP31]], [[TMP59]]
; CHECK-NEXT:    [[TMP73:%.*]] = extractelement <2 x i32> [[TMP63]], i32 0
; CHECK-NEXT:    [[TMP34:%.*]] = extractelement <2 x i32> [[TMP63]], i32 1
; CHECK-NEXT:    [[ADD48_2:%.*]] = add i32 [[TMP34]], [[TMP73]]
; CHECK-NEXT:    [[SUB51_2:%.*]] = sub i32 [[TMP73]], [[TMP34]]
; CHECK-NEXT:    [[TMP47:%.*]] = extractelement <2 x i32> [[TMP72]], i32 0
; CHECK-NEXT:    [[TMP48:%.*]] = extractelement <2 x i32> [[TMP72]], i32 1
; CHECK-NEXT:    [[ADD55_2:%.*]] = add i32 [[TMP48]], [[TMP47]]
; CHECK-NEXT:    [[SUB59_2:%.*]] = sub i32 [[TMP47]], [[TMP48]]
; CHECK-NEXT:    [[ARRAYIDX3_3:%.*]] = getelementptr i8, ptr null, i64 4
; CHECK-NEXT:    [[ARRAYIDX5_3:%.*]] = getelementptr i8, ptr null, i64 4
; CHECK-NEXT:    [[ARRAYIDX8_3:%.*]] = getelementptr i8, ptr null, i64 1
; CHECK-NEXT:    [[ARRAYIDX10_3:%.*]] = getelementptr i8, ptr null, i64 1
; CHECK-NEXT:    [[TMP44:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT:    [[ARRAYIDX15_3:%.*]] = getelementptr i8, ptr null, i64 5
; CHECK-NEXT:    [[TMP43:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT:    [[TMP53:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP76:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT:    [[TMP55:%.*]] = zext <2 x i8> [[TMP53]] to <2 x i32>
; CHECK-NEXT:    [[TMP77:%.*]] = zext i8 [[TMP76]] to i32
; CHECK-NEXT:    [[TMP54:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP57:%.*]] = zext <2 x i8> [[TMP54]] to <2 x i32>
; CHECK-NEXT:    [[TMP58:%.*]] = sub <2 x i32> [[TMP55]], [[TMP57]]
; CHECK-NEXT:    [[TMP41:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_3]], i64 -4, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP60:%.*]] = zext <2 x i8> [[TMP41]] to <2 x i32>
; CHECK-NEXT:    [[TMP61:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP62:%.*]] = zext <2 x i8> [[TMP61]] to <2 x i32>
; CHECK-NEXT:    [[TMP45:%.*]] = sub <2 x i32> [[TMP60]], [[TMP62]]
; CHECK-NEXT:    [[TMP46:%.*]] = shl <2 x i32> [[TMP45]], <i32 16, i32 16>
; CHECK-NEXT:    [[TMP82:%.*]] = add <2 x i32> [[TMP46]], [[TMP58]]
; CHECK-NEXT:    [[TMP85:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX8_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP94:%.*]] = zext <2 x i8> [[TMP85]] to <2 x i32>
; CHECK-NEXT:    [[TMP68:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX10_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP103:%.*]] = zext <2 x i8> [[TMP68]] to <2 x i32>
; CHECK-NEXT:    [[TMP106:%.*]] = sub <2 x i32> [[TMP94]], [[TMP103]]
; CHECK-NEXT:    [[TMP64:%.*]] = insertelement <2 x i8> poison, i8 [[TMP44]], i32 0
; CHECK-NEXT:    [[TMP65:%.*]] = insertelement <2 x i8> [[TMP64]], i8 [[TMP43]], i32 1
; CHECK-NEXT:    [[TMP108:%.*]] = zext <2 x i8> [[TMP65]] to <2 x i32>
; CHECK-NEXT:    [[TMP74:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX15_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP75:%.*]] = zext <2 x i8> [[TMP74]] to <2 x i32>
; CHECK-NEXT:    [[TMP69:%.*]] = sub <2 x i32> [[TMP108]], [[TMP75]]
; CHECK-NEXT:    [[TMP70:%.*]] = shl <2 x i32> [[TMP69]], <i32 16, i32 16>
; CHECK-NEXT:    [[TMP109:%.*]] = add <2 x i32> [[TMP70]], [[TMP106]]
; CHECK-NEXT:    [[TMP79:%.*]] = add <2 x i32> [[TMP109]], [[TMP82]]
; CHECK-NEXT:    [[TMP115:%.*]] = sub <2 x i32> [[TMP82]], [[TMP109]]
; CHECK-NEXT:    [[TMP78:%.*]] = extractelement <2 x i32> [[TMP79]], i32 0
; CHECK-NEXT:    [[TMP71:%.*]] = extractelement <2 x i32> [[TMP79]], i32 1
; CHECK-NEXT:    [[ADD48_3:%.*]] = add i32 [[TMP71]], [[TMP78]]
; CHECK-NEXT:    [[SUB51_3:%.*]] = sub i32 [[TMP78]], [[TMP71]]
; CHECK-NEXT:    [[TMP80:%.*]] = extractelement <2 x i32> [[TMP115]], i32 0
; CHECK-NEXT:    [[TMP81:%.*]] = extractelement <2 x i32> [[TMP115]], i32 1
; CHECK-NEXT:    [[ADD55_3:%.*]] = add i32 [[TMP81]], [[TMP80]]
; CHECK-NEXT:    [[SUB59_3:%.*]] = sub i32 [[TMP80]], [[TMP81]]
; CHECK-NEXT:    [[ADD94:%.*]] = add i32 [[ADD48_3]], [[ADD48_2]]
; CHECK-NEXT:    [[SUB102:%.*]] = sub i32 [[ADD48_2]], [[ADD48_3]]
; CHECK-NEXT:    [[SHR_I49_3:%.*]] = lshr i32 [[TMP77]], 15
; CHECK-NEXT:    [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537
; CHECK-NEXT:    [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535
; CHECK-NEXT:    [[SHR_I_1:%.*]] = lshr i32 [[TMP34]], 15
; CHECK-NEXT:    [[AND_I_1:%.*]] = and i32 [[SHR_I_1]], 65537
; CHECK-NEXT:    [[MUL_I_1:%.*]] = mul i32 [[AND_I_1]], 65535
; CHECK-NEXT:    [[ADD94_1:%.*]] = add i32 [[ADD55_3]], [[ADD55_2]]
; CHECK-NEXT:    [[SUB102_1:%.*]] = sub i32 [[ADD55_2]], [[ADD55_3]]
; CHECK-NEXT:    [[SHR_I_2:%.*]] = lshr i32 [[TMP83]], 15
; CHECK-NEXT:    [[AND_I_2:%.*]] = and i32 [[SHR_I_2]], 65537
; CHECK-NEXT:    [[MUL_I_2:%.*]] = mul i32 [[AND_I_2]], 65535
; CHECK-NEXT:    [[SHR_I49_1:%.*]] = lshr i32 [[TMP84]], 15
; CHECK-NEXT:    [[AND_I50_1:%.*]] = and i32 [[SHR_I49_1]], 65537
; CHECK-NEXT:    [[ADD94_2:%.*]] = mul i32 [[AND_I50_1]], 65535
; CHECK-NEXT:    [[ADD94_6:%.*]] = add i32 [[SUB51_3]], [[SUB51_2]]
; CHECK-NEXT:    [[SHR_I49_2:%.*]] = lshr i32 [[CONV_1]], 15
; CHECK-NEXT:    [[AND_I50_2:%.*]] = and i32 [[SHR_I49_2]], 65537
; CHECK-NEXT:    [[MUL_I51_2:%.*]] = mul i32 [[AND_I50_2]], 65535
; CHECK-NEXT:    [[ADD94_5:%.*]] = add i32 [[SUB59_3]], [[SUB59_2]]
; CHECK-NEXT:    [[SUB102_3:%.*]] = sub i32 [[SUB59_2]], [[SUB59_3]]
; CHECK-NEXT:    [[SHR_I49_4:%.*]] = lshr i32 [[CONV1]], 15
; CHECK-NEXT:    [[AND_I50_4:%.*]] = and i32 [[SHR_I49_4]], 65537
; CHECK-NEXT:    [[MUL_I51_4:%.*]] = mul i32 [[AND_I50_4]], 65535
; CHECK-NEXT:    [[TMP66:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
; CHECK-NEXT:    [[TMP102:%.*]] = zext <2 x i8> [[TMP66]] to <2 x i32>
; CHECK-NEXT:    [[TMP67:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[PIX2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP112:%.*]] = zext <2 x i8> [[TMP67]] to <2 x i32>
; CHECK-NEXT:    [[TMP89:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[TMP1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP90:%.*]] = zext <2 x i8> [[TMP89]] to <2 x i32>
; CHECK-NEXT:    [[TMP91:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP117:%.*]] = zext <2 x i8> [[TMP91]] to <2 x i32>
; CHECK-NEXT:    [[TMP87:%.*]] = sub <2 x i32> [[TMP90]], [[TMP117]]
; CHECK-NEXT:    [[TMP88:%.*]] = shl <2 x i32> [[TMP87]], <i32 16, i32 16>
; CHECK-NEXT:    [[TMP119:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP121:%.*]] = zext <2 x i8> [[TMP119]] to <2 x i32>
; CHECK-NEXT:    [[TMP128:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX25]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP131:%.*]] = zext <2 x i8> [[TMP128]] to <2 x i32>
; CHECK-NEXT:    [[TMP132:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT:    [[TMP100:%.*]] = zext <2 x i8> [[TMP132]] to <2 x i32>
; CHECK-NEXT:    [[TMP95:%.*]] = sub <2 x i32> [[TMP131]], [[TMP100]]
; CHECK-NEXT:    [[TMP96:%.*]] = shl <2 x i32> [[TMP95]], <i32 16, i32 16>
; CHECK-NEXT:    [[TMP97:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV33]], i32 1
; CHECK-NEXT:    [[TMP133:%.*]] = sub <2 x i32> [[TMP97]], [[TMP121]]
; CHECK-NEXT:    [[TMP105:%.*]] = add <2 x i32> [[TMP96]], [[TMP133]]
; CHECK-NEXT:    [[TMP86:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV1]], i32 0
; CHECK-NEXT:    [[TMP107:%.*]] = sub <2 x i32> [[TMP86]], [[TMP112]]
; CHECK-NEXT:    [[TMP92:%.*]] = add <2 x i32> [[TMP88]], [[TMP107]]
; CHECK-NEXT:    [[TMP93:%.*]] = shufflevector <2 x i32> [[TMP105]], <2 x i32> [[TMP92]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT:    [[TMP101:%.*]] = sub <2 x i32> [[TMP92]], [[TMP105]]
; CHECK-NEXT:    [[TMP111:%.*]] = extractelement <2 x i32> [[TMP101]], i32 0
; CHECK-NEXT:    [[TMP99:%.*]] = extractelement <2 x i32> [[TMP101]], i32 1
; CHECK-NEXT:    [[ADD55:%.*]] = add i32 [[TMP99]], [[TMP111]]
; CHECK-NEXT:    [[SHR_I59_1:%.*]] = lshr i32 [[TMP99]], 15
; CHECK-NEXT:    [[AND_I60_1:%.*]] = and i32 [[SHR_I59_1]], 65537
; CHECK-NEXT:    [[MUL_I61_1:%.*]] = mul i32 [[AND_I60_1]], 65535
; CHECK-NEXT:    [[TMP104:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
; CHECK-NEXT:    [[TMP113:%.*]] = zext <2 x i8> [[TMP104]] to <2 x i32>
; CHECK-NEXT:    [[TMP114:%.*]] = load <2 x i8>, ptr [[ADD_PTR644]], align 1
; CHECK-NEXT:    [[TMP134:%.*]] = zext <2 x i8> [[TMP114]] to <2 x i32>
; CHECK-NEXT:    [[TMP116:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_1]], align 1
; CHECK-NEXT:    [[TMP145:%.*]] = zext <2 x i8> [[TMP116]] to <2 x i32>
; CHECK-NEXT:    [[TMP118:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_1]], align 1
; CHECK-NEXT:    [[TMP120:%.*]] = zext <2 x i8> [[TMP118]] to <2 x i32>
; CHECK-NEXT:    [[TMP124:%.*]] = sub <2 x i32> [[TMP145]], [[TMP120]]
; CHECK-NEXT:    [[TMP125:%.*]] = shl <2 x i32> [[TMP124]], <i32 16, i32 16>
; CHECK-NEXT:    [[TMP122:%.*]] = shufflevector <2 x i32> [[TMP113]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
; CHECK-NEXT:    [[TMP123:%.*]] = insertelement <2 x i32> [[TMP122]], i32 [[CONV_1]], i32 0
; CHECK-NEXT:    [[TMP157:%.*]] = sub <2 x i32> [[TMP123]], [[TMP134]]
; CHECK-NEXT:    [[TMP156:%.*]] = add <2 x i32> [[TMP125]], [[TMP157]]
; CHECK-NEXT:    [[TMP126:%.*]] = shufflevector <2 x i8> [[TMP7]], <2 x i8> poison, <2 x i32> <i32 1, i32 poison>
; CHECK-NEXT:    [[TMP127:%.*]] = insertelement <2 x i8> [[TMP126]], i8 [[TMP14]], i32 1
; CHECK-NEXT:    [[TMP158:%.*]] = zext <2 x i8> [[TMP127]] to <2 x i32>
; CHECK-NEXT:    [[TMP129:%.*]] = insertelement <2 x i32> [[TMP113]], i32 [[TMP9]], i32 0
; CHECK-NEXT:    [[TMP130:%.*]] = sub <2 x i32> [[TMP129]], [[TMP158]]
; CHECK-NEXT:    [[TMP135:%.*]] = insertelement <2 x i32> [[TMP130]], i32 [[TMP16]], i32 1
; CHECK-NEXT:    [[TMP136:%.*]] = shl <2 x i32> [[TMP135]], <i32 16, i32 16>
; CHECK-NEXT:    [[TMP110:%.*]] = shufflevector <2 x i32> [[TMP130]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
; CHECK-NEXT:    [[TMP137:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV33_1]], i32 1
; CHECK-NEXT:    [[TMP155:%.*]] = add <2 x i32> [[TMP136]], [[TMP137]]
; CHECK-NEXT:    [[TMP139:%.*]] = extractelement <2 x i32> [[TMP156]], i32 0
; CHECK-NEXT:    [[TMP142:%.*]] = extractelement <2 x i32> [[TMP156]], i32 1
; CHECK-NEXT:    [[SUB45_1:%.*]] = sub i32 [[TMP139]], [[TMP142]]
; CHECK-NEXT:    [[TMP138:%.*]] = extractelement <2 x i32> [[TMP155]], i32 0
; CHECK-NEXT:    [[TMP171:%.*]] = extractelement <2 x i32> [[TMP155]], i32 1
; CHECK-NEXT:    [[SUB47_1:%.*]] = sub i32 [[TMP138]], [[TMP171]]
; CHECK-NEXT:    [[TMP140:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP105]], <2 x i32> <i32 1, i32 2>
; CHECK-NEXT:    [[TMP153:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP92]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT:    [[TMP163:%.*]] = add <2 x i32> [[TMP140]], [[TMP153]]
; CHECK-NEXT:    [[TMP143:%.*]] = shufflevector <2 x i32> [[TMP105]], <2 x i32> [[TMP155]], <2 x i32> <i32 3, i32 1>
; CHECK-NEXT:    [[TMP144:%.*]] = shufflevector <2 x i32> [[TMP92]], <2 x i32> [[TMP155]], <2 x i32> <i32 2, i32 1>
; CHECK-NEXT:    [[TMP165:%.*]] = add <2 x i32> [[TMP143]], [[TMP144]]
; CHECK-NEXT:    [[TMP98:%.*]] = extractelement <2 x i32> [[TMP163]], i32 1
; CHECK-NEXT:    [[TMP146:%.*]] = extractelement <2 x i32> [[TMP165]], i32 1
; CHECK-NEXT:    [[ADD48:%.*]] = add i32 [[TMP146]], [[TMP98]]
; CHECK-NEXT:    [[SHR_I54_1:%.*]] = lshr i32 [[TMP146]], 15
; CHECK-NEXT:    [[AND_I55_1:%.*]] = and i32 [[SHR_I54_1]], 65537
; CHECK-NEXT:    [[MUL_I56_1:%.*]] = mul i32 [[AND_I55_1]], 65535
; CHECK-NEXT:    [[TMP167:%.*]] = extractelement <2 x i32> [[TMP163]], i32 0
; CHECK-NEXT:    [[TMP166:%.*]] = extractelement <2 x i32> [[TMP165]], i32 0
; CHECK-NEXT:    [[ADD48_1:%.*]] = add i32 [[TMP166]], [[TMP167]]
; CHECK-NEXT:    [[TMP141:%.*]] = sub <2 x i32> [[TMP163]], [[TMP165]]
; CHECK-NEXT:    [[ADD55_1:%.*]] = add i32 [[SUB47_1]], [[SUB45_1]]
; CHECK-NEXT:    [[TMP151:%.*]] = shufflevector <2 x i32> [[TMP101]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
; CHECK-NEXT:    [[TMP152:%.*]] = insertelement <2 x i32> [[TMP151]], i32 [[SUB45_1]], i32 0
; CHECK-NEXT:    [[TMP154:%.*]] = insertelement <2 x i32> [[TMP101]], i32 [[SUB47_1]], i32 0
; CHECK-NEXT:    [[TMP168:%.*]] = sub <2 x i32> [[TMP152]], [[TMP154]]
; CHECK-NEXT:    [[SHR_I54:%.*]] = lshr i32 [[TMP166]], 15
; CHECK-NEXT:    [[AND_I55:%.*]] = and i32 [[SHR_I54]], 65537
; CHECK-NEXT:    [[MUL_I56:%.*]] = mul i32 [[AND_I55]], 65535
; CHECK-NEXT:    [[SHR_I54_2:%.*]] = lshr i32 [[SUB47_1]], 15
; CHECK-NEXT:    [[AND_I55_2:%.*]] = and i32 [[SHR_I54_2]], 65537
; CHECK-NEXT:    [[MUL_I56_2:%.*]] = mul i32 [[AND_I55_2]], 65535
; CHECK-NEXT:    [[TMP147:%.*]] = lshr <2 x i32> [[TMP113]], <i32 15, i32 15>
; CHECK-NEXT:    [[TMP148:%.*]] = and <2 x i32> [[TMP147]], <i32 65537, i32 65537>
; CHECK-NEXT:    [[TMP149:%.*]] = mul <2 x i32> [[TMP148]], <i32 65535, i32 65535>
; CHECK-NEXT:    [[ADD78:%.*]] = add i32 [[ADD48_1]], [[ADD48]]
; CHECK-NEXT:    [[SUB86:%.*]] = sub i32 [[ADD48]], [[ADD48_1]]
; CHECK-NEXT:    [[ADD103:%.*]] = add i32 [[ADD94]], [[ADD78]]
; CHECK-NEXT:    [[SUB104:%.*]] = sub i32 [[ADD78]], [[ADD94]]
; CHECK-NEXT:    [[ADD105:%.*]] = add i32 [[SUB102]], [[SUB86]]
; CHECK-NEXT:    [[SUB106:%.*]] = sub i32 [[SUB86]], [[SUB102]]
; CHECK-NEXT:    [[ADD_I:%.*]] = add i32 [[MUL_I51_3]], [[ADD103]]
; CHECK-NEXT:    [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP77]]
; CHECK-NEXT:    [[ADD_I52:%.*]] = add i32 [[MUL_I_1]], [[ADD105]]
; CHECK-NEXT:    [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[TMP34]]
; CHECK-NEXT:    [[ADD_I57:%.*]] = add i32 [[MUL_I56]], [[SUB104]]
; CHECK-NEXT:    [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP166]]
; CHECK-NEXT:    [[ADD_I62:%.*]] = add i32 [[MUL_I56_1]], [[SUB106]]
; CHECK-NEXT:    [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP146]]
; CHECK-NEXT:    [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]]
; CHECK-NEXT:    [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]]
; CHECK-NEXT:    [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]]
; CHECK-NEXT:    [[ADD78_1:%.*]] = add i32 [[ADD55_1]], [[ADD55]]
; CHECK-NEXT:    [[SUB86_1:%.*]] = sub i32 [[ADD55]], [[ADD55_1]]
; CHECK-NEXT:    [[ADD103_1:%.*]] = add i32 [[ADD94_1]], [[ADD78_1]]
; CHECK-NEXT:    [[SUB104_1:%.*]] = sub i32 [[ADD78_1]], [[ADD94_1]]
; CHECK-NEXT:    [[ADD105_1:%.*]] = add i32 [[SUB102_1]], [[SUB86_1]]
; CHECK-NEXT:    [[SUB106_1:%.*]] = sub i32 [[SUB86_1]], [[SUB102_1]]
; CHECK-NEXT:    [[TMP203:%.*]] = add i32 [[MUL_I_2]], [[ADD103_1]]
; CHECK-NEXT:    [[XOR_I_1:%.*]] = xor i32 [[TMP203]], [[TMP83]]
; CHECK-NEXT:    [[ADD_I52_1:%.*]] = add i32 [[ADD94_2]], [[ADD105_1]]
; CHECK-NEXT:    [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP84]]
; CHECK-NEXT:    [[ADD_I57_1:%.*]] = add i32 [[MUL_I56_2]], [[SUB104_1]]
; CHECK-NEXT:    [[XOR_I58_1:%.*]] = xor i32 [[ADD_I57_1]], [[SUB47_1]]
; CHECK-NEXT:    [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_1]], [[SUB106_1]]
; CHECK-NEXT:    [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[TMP99]]
; CHECK-NEXT:    [[ADD108_1:%.*]] = add i32 [[XOR_I53_1]], [[ADD113]]
; CHECK-NEXT:    [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[XOR_I_1]]
; CHECK-NEXT:    [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[XOR_I58_1]]
; CHECK-NEXT:    [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]]
; CHECK-NEXT:    [[TMP169:%.*]] = extractelement <2 x i32> [[TMP141]], i32 0
; CHECK-NEXT:    [[TMP160:%.*]] = extractelement <2 x i32> [[TMP141]], i32 1
; CHECK-NEXT:    [[ADD78_2:%.*]] = add i32 [[TMP169]], [[TMP160]]
; CHECK-NEXT:    [[TMP196:%.*]] = insertelement <2 x i32> [[TMP141]], i32 [[SUB51_2]], i32 0
; CHECK-NEXT:    [[TMP194:%.*]] = shufflevector <2 x i32> [[TMP141]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
; CHECK-NEXT:    [[TMP195:%.*]] = insertelement <2 x i32> [[TMP194]], i32 [[SUB51_3]], i32 0
; CHECK-NEXT:    [[TMP164:%.*]] = sub <2 x i32> [[TMP196]], [[TMP195]]
; CHECK-NEXT:    [[TMP201:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_2]], i32 0
; CHECK-NEXT:    [[TMP198:%.*]] = shufflevector <2 x i32> [[TMP201]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT:    [[TMP199:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_6]], i32 0
; CHECK-NEXT:    [[TMP200:%.*]] = shufflevector <2 x i32> [[TMP199]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT:    [[TMP225:%.*]] = add <2 x i32> [[TMP198]], [[TMP200]]
; CHECK-NEXT:    [[TMP226:%.*]] = sub <2 x i32> [[TMP198]], [[TMP200]]
; CHECK-NEXT:    [[TMP227:%.*]] = shufflevector <2 x i32> [[TMP225]], <2 x i32> [[TMP226]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT:    [[TMP204:%.*]] = extractelement <2 x i32> [[TMP164]], i32 0
; CHECK-NEXT:    [[TMP212:%.*]] = extractelement <2 x i32> [[TMP164]], i32 1
; CHECK-NEXT:    [[ADD105_2:%.*]] = add i32 [[TMP204]], [[TMP212]]
; CHECK-NEXT:    [[SUB106_2:%.*]] = sub i32 [[TMP212]], [[TMP204]]
; CHECK-NEXT:    [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_2]], [[ADD105_2]]
; CHECK-NEXT:    [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]]
; CHECK-NEXT:    [[TMP207:%.*]] = add <2 x i32> [[TMP149]], [[TMP227]]
; CHECK-NEXT:    [[TMP213:%.*]] = xor <2 x i32> [[TMP207]], [[TMP113]]
; CHECK-NEXT:    [[SHR_I59_2:%.*]] = lshr i32 [[TMP98]], 15
; CHECK-NEXT:    [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537
; CHECK-NEXT:    [[MUL_I61_2:%.*]] = mul i32 [[AND_I60_2]], 65535
; CHECK-NEXT:    [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_2]], [[SUB106_2]]
; CHECK-NEXT:    [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP98]]
; CHECK-NEXT:    [[ADD108_2:%.*]] = add i32 [[XOR_I53_2]], [[ADD113_1]]
; CHECK-NEXT:    [[TMP176:%.*]] = extractelement <2 x i32> [[TMP213]], i32 0
; CHECK-NEXT:    [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP176]]
; CHECK-NEXT:    [[TMP177:%.*]] = extractelement <2 x i32> [[TMP213]], i32 1
; CHECK-NEXT:    [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP177]]
; CHECK-NEXT:    [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]]
; CHECK-NEXT:    [[SUB59_1:%.*]] = extractelement <2 x i32> [[TMP168]], i32 0
; CHECK-NEXT:    [[SUB59:%.*]] = extractelement <2 x i32> [[TMP168]], i32 1
; CHECK-NEXT:    [[ADD94_3:%.*]] = add i32 [[SUB59_1]], [[SUB59]]
; CHECK-NEXT:    [[SUB86_3:%.*]] = sub i32 [[SUB59]], [[SUB59_1]]
; CHECK-NEXT:    [[TMP223:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_3]], i32 0
; CHECK-NEXT:    [[TMP224:%.*]] = shufflevector <2 x i32> [[TMP223]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT:    [[TMP241:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_5]], i32 0
; CHECK-NEXT:    [[TMP242:%.*]] = shufflevector <2 x i32> [[TMP241]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT:    [[TMP261:%.*]] = add <2 x i32> [[TMP224]], [[TMP242]]
; CHECK-NEXT:    [[TMP262:%.*]] = sub <2 x i32> [[TMP224]], [[TMP242]]
; CHECK-NEXT:    [[TMP220:%.*]] = shufflevector <2 x i32> [[TMP261]], <2 x i32> [[TMP262]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT:    [[ADD105_3:%.*]] = add i32 [[SUB102_3]], [[SUB86_3]]
; CHECK-NEXT:    [[SUB106_3:%.*]] = sub i32 [[SUB86_3]], [[SUB102_3]]
; CHECK-NEXT:    [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_4]], [[ADD105_3]]
; CHECK-NEXT:    [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV1]]
; CHECK-NEXT:    [[TMP230:%.*]] = lshr <2 x i32> [[TMP102]], <i32 15, i32 15>
; CHECK-NEXT:    [[TMP231:%.*]] = and <2 x i32> [[TMP230]], <i32 65537, i32 65537>
; CHECK-NEXT:    [[TMP232:%.*]] = mul <2 x i32> [[TMP231]], <i32 65535, i32 65535>
; CHECK-NEXT:    [[TMP150:%.*]] = add <2 x i32> [[TMP232]], [[TMP220]]
; CHECK-NEXT:    [[TMP234:%.*]] = xor <2 x i32> [[TMP150]], [[TMP102]]
; CHECK-NEXT:    [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15
; CHECK-NEXT:    [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537
; CHECK-NEXT:    [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535
; CHECK-NEXT:    [[ADD_I62_3:%.*]] = add i32 [[MUL_I61_3]], [[SUB106_3]]
; CHECK-NEXT:    [[XOR_I63_3:%.*]] = xor i32 [[ADD_I62_3]], [[CONV33]]
; CHECK-NEXT:    [[ADD108_3:%.*]] = add i32 [[XOR_I53_3]], [[ADD113_2]]
; CHECK-NEXT:    [[TMP192:%.*]] = extractelement <2 x i32> [[TMP234]], i32 0
; CHECK-NEXT:    [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP192]]
; CHECK-NEXT:    [[TMP193:%.*]] = extractelement <2 x i32> [[TMP234]], i32 1
; CHECK-NEXT:    [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP193]]
; CHECK-NEXT:    [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[XOR_I63_3]]
; CHECK-NEXT:    ret i32 [[ADD113_3]]
;
; THR15-LABEL: define i32 @test(
; THR15-SAME: ptr [[PIX1:%.*]], ptr [[PIX2:%.*]], i64 [[IDX_EXT:%.*]], i64 [[IDX_EXT63:%.*]], ptr [[ADD_PTR:%.*]], ptr [[ADD_PTR64:%.*]]) #[[ATTR0:[0-9]+]] {
; THR15-NEXT:  entry:
; THR15-NEXT:    [[TMP0:%.*]] = load i8, ptr [[PIX1]], align 1
; THR15-NEXT:    [[CONV:%.*]] = zext i8 [[TMP0]] to i32
; THR15-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 4
; THR15-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr i8, ptr [[PIX2]], i64 4
; THR15-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr i8, ptr [[PIX1]], i64 1
; THR15-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 1
; THR15-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr i8, ptr [[PIX1]], i64 5
; THR15-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr i8, ptr [[PIX2]], i64 5
; THR15-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr i8, ptr [[PIX1]], i64 3
; THR15-NEXT:    [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX32]], align 1
; THR15-NEXT:    [[CONV33:%.*]] = zext i8 [[TMP1]] to i32
; THR15-NEXT:    [[ADD_PTR3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 [[IDX_EXT]]
; THR15-NEXT:    [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
; THR15-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ADD_PTR3]], align 1
; THR15-NEXT:    [[CONV_1:%.*]] = zext i8 [[TMP2]] to i32
; THR15-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 4
; THR15-NEXT:    [[ARRAYIDX5_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 4
; THR15-NEXT:    [[ARRAYIDX8_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 1
; THR15-NEXT:    [[ARRAYIDX32_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 2
; THR15-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX32_1]], align 1
; THR15-NEXT:    [[ARRAYIDX25_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 6
; THR15-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX25_1]], align 1
; THR15-NEXT:    [[ARRAYIDX27_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 6
; THR15-NEXT:    [[TMP5:%.*]] = load i8, ptr [[ARRAYIDX27_1]], align 1
; THR15-NEXT:    [[TMP6:%.*]] = insertelement <2 x i8> poison, i8 [[TMP4]], i32 0
; THR15-NEXT:    [[TMP7:%.*]] = insertelement <2 x i8> [[TMP6]], i8 [[TMP5]], i32 1
; THR15-NEXT:    [[TMP8:%.*]] = extractelement <2 x i8> [[TMP7]], i32 0
; THR15-NEXT:    [[TMP9:%.*]] = zext i8 [[TMP8]] to i32
; THR15-NEXT:    [[ARRAYIDX32_2:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 3
; THR15-NEXT:    [[ARRAYIDX34_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 3
; THR15-NEXT:    [[TMP10:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX32_2]], i64 4, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT:    [[TMP11:%.*]] = zext <2 x i8> [[TMP10]] to <2 x i16>
; THR15-NEXT:    [[TMP12:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX34_1]], i64 4, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT:    [[TMP20:%.*]] = zext <2 x i8> [[TMP12]] to <2 x i16>
; THR15-NEXT:    [[TMP40:%.*]] = sub <2 x i16> [[TMP11]], [[TMP20]]
; THR15-NEXT:    [[TMP46:%.*]] = extractelement <2 x i16> [[TMP40]], i32 1
; THR15-NEXT:    [[TMP16:%.*]] = sext i16 [[TMP46]] to i32
; THR15-NEXT:    [[SHL42_1:%.*]] = shl i32 [[TMP16]], 16
; THR15-NEXT:    [[TMP17:%.*]] = extractelement <2 x i16> [[TMP40]], i32 0
; THR15-NEXT:    [[TMP18:%.*]] = sext i16 [[TMP17]] to i32
; THR15-NEXT:    [[ADD43_1:%.*]] = add i32 [[SHL42_1]], [[TMP18]]
; THR15-NEXT:    [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
; THR15-NEXT:    [[ADD_PTR64_1:%.*]] = getelementptr i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
; THR15-NEXT:    [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
; THR15-NEXT:    [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
; THR15-NEXT:    [[TMP19:%.*]] = load <2 x i8>, ptr [[ADD_PTR_1]], align 1
; THR15-NEXT:    [[TMP66:%.*]] = zext <2 x i8> [[TMP19]] to <2 x i32>
; THR15-NEXT:    [[TMP21:%.*]] = load <2 x i8>, ptr [[ADD_PTR64_1]], align 1
; THR15-NEXT:    [[TMP22:%.*]] = zext <2 x i8> [[TMP21]] to <2 x i32>
; THR15-NEXT:    [[TMP23:%.*]] = sub <2 x i32> [[TMP66]], [[TMP22]]
; THR15-NEXT:    [[TMP24:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_2]], align 1
; THR15-NEXT:    [[TMP28:%.*]] = zext <2 x i8> [[TMP24]] to <2 x i32>
; THR15-NEXT:    [[TMP29:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_2]], align 1
; THR15-NEXT:    [[TMP30:%.*]] = zext <2 x i8> [[TMP29]] to <2 x i32>
; THR15-NEXT:    [[TMP13:%.*]] = sub <2 x i32> [[TMP28]], [[TMP30]]
; THR15-NEXT:    [[TMP14:%.*]] = shl <2 x i32> [[TMP13]], <i32 16, i32 16>
; THR15-NEXT:    [[TMP15:%.*]] = add <2 x i32> [[TMP14]], [[TMP23]]
; THR15-NEXT:    [[ARRAYIDX20_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 2
; THR15-NEXT:    [[ARRAYIDX22_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 2
; THR15-NEXT:    [[ARRAYIDX25_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 6
; THR15-NEXT:    [[ARRAYIDX27_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 6
; THR15-NEXT:    [[TMP31:%.*]] = load <2 x i8>, ptr [[ARRAYIDX20_2]], align 1
; THR15-NEXT:    [[TMP47:%.*]] = zext <2 x i8> [[TMP31]] to <2 x i32>
; THR15-NEXT:    [[TMP33:%.*]] = load <2 x i8>, ptr [[ARRAYIDX22_2]], align 1
; THR15-NEXT:    [[TMP50:%.*]] = zext <2 x i8> [[TMP33]] to <2 x i32>
; THR15-NEXT:    [[TMP35:%.*]] = sub <2 x i32> [[TMP47]], [[TMP50]]
; THR15-NEXT:    [[TMP36:%.*]] = load <2 x i8>, ptr [[ARRAYIDX25_2]], align 1
; THR15-NEXT:    [[TMP53:%.*]] = zext <2 x i8> [[TMP36]] to <2 x i32>
; THR15-NEXT:    [[TMP38:%.*]] = load <2 x i8>, ptr [[ARRAYIDX27_2]], align 1
; THR15-NEXT:    [[TMP39:%.*]] = zext <2 x i8> [[TMP38]] to <2 x i32>
; THR15-NEXT:    [[TMP25:%.*]] = sub <2 x i32> [[TMP53]], [[TMP39]]
; THR15-NEXT:    [[TMP26:%.*]] = shl <2 x i32> [[TMP25]], <i32 16, i32 16>
; THR15-NEXT:    [[TMP27:%.*]] = add <2 x i32> [[TMP26]], [[TMP35]]
; THR15-NEXT:    [[TMP68:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0
; THR15-NEXT:    [[TMP59:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1
; THR15-NEXT:    [[ADD44_2:%.*]] = add i32 [[TMP59]], [[TMP68]]
; THR15-NEXT:    [[SUB45_2:%.*]] = sub i32 [[TMP68]], [[TMP59]]
; THR15-NEXT:    [[TMP76:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0
; THR15-NEXT:    [[TMP60:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1
; THR15-NEXT:    [[ADD46_2:%.*]] = add i32 [[TMP60]], [[TMP76]]
; THR15-NEXT:    [[SUB47_2:%.*]] = sub i32 [[TMP76]], [[TMP60]]
; THR15-NEXT:    [[ADD48_2:%.*]] = add i32 [[ADD46_2]], [[ADD44_2]]
; THR15-NEXT:    [[SUB51_2:%.*]] = sub i32 [[ADD44_2]], [[ADD46_2]]
; THR15-NEXT:    [[ADD55_2:%.*]] = add i32 [[SUB47_2]], [[SUB45_2]]
; THR15-NEXT:    [[SUB59_2:%.*]] = sub i32 [[SUB45_2]], [[SUB47_2]]
; THR15-NEXT:    [[ARRAYIDX3_3:%.*]] = getelementptr i8, ptr null, i64 4
; THR15-NEXT:    [[ARRAYIDX5_3:%.*]] = getelementptr i8, ptr null, i64 4
; THR15-NEXT:    [[TMP32:%.*]] = load <2 x i8>, ptr null, align 1
; THR15-NEXT:    [[TMP48:%.*]] = load i8, ptr null, align 1
; THR15-NEXT:    [[TMP49:%.*]] = zext <2 x i8> [[TMP32]] to <2 x i32>
; THR15-NEXT:    [[TMP63:%.*]] = zext i8 [[TMP48]] to i32
; THR15-NEXT:    [[TMP34:%.*]] = load <2 x i8>, ptr null, align 1
; THR15-NEXT:    [[TMP61:%.*]] = zext <2 x i8> [[TMP34]] to <2 x i32>
; THR15-NEXT:    [[TMP93:%.*]] = sub <2 x i32> [[TMP49]], [[TMP61]]
; THR15-NEXT:    [[TMP37:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_3]], i64 -4, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT:    [[TMP54:%.*]] = zext <2 x i8> [[TMP37]] to <2 x i32>
; THR15-NEXT:    [[TMP55:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_3]], align 1
; THR15-NEXT:    [[TMP80:%.*]] = zext <2 x i8> [[TMP55]] to <2 x i32>
; THR15-NEXT:    [[TMP41:%.*]] = sub <2 x i32> [[TMP54]], [[TMP80]]
; THR15-NEXT:    [[TMP42:%.*]] = shl <2 x i32> [[TMP41]], <i32 16, i32 16>
; THR15-NEXT:    [[TMP43:%.*]] = add <2 x i32> [[TMP42]], [[TMP93]]
; THR15-NEXT:    [[ARRAYIDX20_3:%.*]] = getelementptr i8, ptr null, i64 2
; THR15-NEXT:    [[ARRAYIDX22_3:%.*]] = getelementptr i8, ptr null, i64 2
; THR15-NEXT:    [[TMP44:%.*]] = load i8, ptr null, align 1
; THR15-NEXT:    [[ARRAYIDX27_3:%.*]] = getelementptr i8, ptr null, i64 6
; THR15-NEXT:    [[TMP45:%.*]] = load i8, ptr null, align 1
; THR15-NEXT:    [[TMP62:%.*]] = load <2 x i8>, ptr [[ARRAYIDX20_3]], align 1
; THR15-NEXT:    [[TMP83:%.*]] = zext <2 x i8> [[TMP62]] to <2 x i32>
; THR15-NEXT:    [[TMP87:%.*]] = load <2 x i8>, ptr [[ARRAYIDX22_3]], align 1
; THR15-NEXT:    [[TMP98:%.*]] = zext <2 x i8> [[TMP87]] to <2 x i32>
; THR15-NEXT:    [[TMP65:%.*]] = sub <2 x i32> [[TMP83]], [[TMP98]]
; THR15-NEXT:    [[TMP51:%.*]] = insertelement <2 x i8> poison, i8 [[TMP44]], i32 0
; THR15-NEXT:    [[TMP52:%.*]] = insertelement <2 x i8> [[TMP51]], i8 [[TMP45]], i32 1
; THR15-NEXT:    [[TMP99:%.*]] = zext <2 x i8> [[TMP52]] to <2 x i32>
; THR15-NEXT:    [[TMP70:%.*]] = load <2 x i8>, ptr [[ARRAYIDX27_3]], align 1
; THR15-NEXT:    [[TMP101:%.*]] = zext <2 x i8> [[TMP70]] to <2 x i32>
; THR15-NEXT:    [[TMP56:%.*]] = sub <2 x i32> [[TMP99]], [[TMP101]]
; THR15-NEXT:    [[TMP57:%.*]] = shl <2 x i32> [[TMP56]], <i32 16, i32 16>
; THR15-NEXT:    [[TMP58:%.*]] = add <2 x i32> [[TMP57]], [[TMP65]]
; THR15-NEXT:    [[TMP104:%.*]] = extractelement <2 x i32> [[TMP43]], i32 0
; THR15-NEXT:    [[TMP102:%.*]] = extractelement <2 x i32> [[TMP43]], i32 1
; THR15-NEXT:    [[ADD44_3:%.*]] = add i32 [[TMP102]], [[TMP104]]
; THR15-NEXT:    [[SUB45_3:%.*]] = sub i32 [[TMP104]], [[TMP102]]
; THR15-NEXT:    [[TMP107:%.*]] = extractelement <2 x i32> [[TMP58]], i32 0
; THR15-NEXT:    [[TMP78:%.*]] = extractelement <2 x i32> [[TMP58]], i32 1
; THR15-NEXT:    [[ADD46_3:%.*]] = add i32 [[TMP78]], [[TMP107]]
; THR15-NEXT:    [[SUB47_3:%.*]] = sub i32 [[TMP107]], [[TMP78]]
; THR15-NEXT:    [[ADD48_3:%.*]] = add i32 [[ADD46_3]], [[ADD44_3]]
; THR15-NEXT:    [[SUB51_3:%.*]] = sub i32 [[ADD44_3]], [[ADD46_3]]
; THR15-NEXT:    [[ADD55_3:%.*]] = add i32 [[SUB47_3]], [[SUB45_3]]
; THR15-NEXT:    [[SUB59_3:%.*]] = sub i32 [[SUB45_3]], [[SUB47_3]]
; THR15-NEXT:    [[ADD94:%.*]] = add i32 [[ADD48_3]], [[ADD48_2]]
; THR15-NEXT:    [[SUB102:%.*]] = sub i32 [[ADD48_2]], [[ADD48_3]]
; THR15-NEXT:    [[SHR_I:%.*]] = lshr i32 [[TMP63]], 15
; THR15-NEXT:    [[AND_I:%.*]] = and i32 [[SHR_I]], 65537
; THR15-NEXT:    [[MUL_I:%.*]] = mul i32 [[AND_I]], 65535
; THR15-NEXT:    [[SHR_I49:%.*]] = lshr i32 [[ADD46_2]], 15
; THR15-NEXT:    [[AND_I50:%.*]] = and i32 [[SHR_I49]], 65537
; THR15-NEXT:    [[MUL_I51:%.*]] = mul i32 [[AND_I50]], 65535
; THR15-NEXT:    [[ADD94_1:%.*]] = add i32 [[ADD55_3]], [[ADD55_2]]
; THR15-NEXT:    [[SUB102_1:%.*]] = sub i32 [[ADD55_2]], [[ADD55_3]]
; THR15-NEXT:    [[TMP105:%.*]] = extractelement <2 x i32> [[TMP66]], i32 1
; THR15-NEXT:    [[SHR_I_1:%.*]] = lshr i32 [[TMP105]], 15
; THR15-NEXT:    [[AND_I_1:%.*]] = and i32 [[SHR_I_1]], 65537
; THR15-NEXT:    [[MUL_I_1:%.*]] = mul i32 [[AND_I_1]], 65535
; THR15-NEXT:    [[TMP64:%.*]] = extractelement <2 x i32> [[TMP66]], i32 0
; THR15-NEXT:    [[SHR_I49_2:%.*]] = lshr i32 [[TMP64]], 15
; THR15-NEXT:    [[AND_I50_2:%.*]] = and i32 [[SHR_I49_2]], 65537
; THR15-NEXT:    [[MUL_I51_2:%.*]] = mul i32 [[AND_I50_2]], 65535
; THR15-NEXT:    [[ADD94_2:%.*]] = add i32 [[SUB51_3]], [[SUB51_2]]
; THR15-NEXT:    [[SUB102_2:%.*]] = sub i32 [[SUB51_2]], [[SUB51_3]]
; THR15-NEXT:    [[SHR_I49_3:%.*]] = lshr i32 [[CONV_1]], 15
; THR15-NEXT:    [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537
; THR15-NEXT:    [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535
; THR15-NEXT:    [[ADD94_3:%.*]] = add i32 [[SUB59_3]], [[SUB59_2]]
; THR15-NEXT:    [[SUB102_3:%.*]] = sub i32 [[SUB59_2]], [[SUB59_3]]
; THR15-NEXT:    [[SHR_I49_4:%.*]] = lshr i32 [[CONV]], 15
; THR15-NEXT:    [[AND_I50_4:%.*]] = and i32 [[SHR_I49_4]], 65537
; THR15-NEXT:    [[MUL_I51_4:%.*]] = mul i32 [[AND_I50_4]], 65535
; THR15-NEXT:    [[TMP81:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
; THR15-NEXT:    [[TMP74:%.*]] = zext <2 x i8> [[TMP81]] to <2 x i32>
; THR15-NEXT:    [[TMP67:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[PIX2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT:    [[TMP108:%.*]] = zext <2 x i8> [[TMP67]] to <2 x i32>
; THR15-NEXT:    [[TMP69:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT:    [[TMP109:%.*]] = zext <2 x i8> [[TMP69]] to <2 x i32>
; THR15-NEXT:    [[TMP71:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT:    [[TMP111:%.*]] = zext <2 x i8> [[TMP71]] to <2 x i32>
; THR15-NEXT:    [[TMP72:%.*]] = sub <2 x i32> [[TMP109]], [[TMP111]]
; THR15-NEXT:    [[TMP73:%.*]] = shl <2 x i32> [[TMP72]], <i32 16, i32 16>
; THR15-NEXT:    [[TMP75:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT:    [[TMP125:%.*]] = zext <2 x i8> [[TMP75]] to <2 x i32>
; THR15-NEXT:    [[TMP82:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX25]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT:    [[TMP94:%.*]] = zext <2 x i8> [[TMP82]] to <2 x i32>
; THR15-NEXT:    [[TMP79:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT:    [[TMP96:%.*]] = zext <2 x i8> [[TMP79]] to <2 x i32>
; THR15-NEXT:    [[TMP84:%.*]] = sub <2 x i32> [[TMP94]], [[TMP96]]
; THR15-NEXT:    [[TMP85:%.*]] = shl <2 x i32> [[TMP84]], <i32 16, i32 16>
; THR15-NEXT:    [[TMP86:%.*]] = insertelement <2 x i32> [[TMP74]], i32 [[CONV33]], i32 1
; THR15-NEXT:    [[TMP100:%.*]] = sub <2 x i32> [[TMP86]], [[TMP125]]
; THR15-NEXT:    [[TMP88:%.*]] = add <2 x i32> [[TMP85]], [[TMP100]]
; THR15-NEXT:    [[TMP92:%.*]] = insertelement <2 x i32> [[TMP74]], i32 [[CONV]], i32 0
; THR15-NEXT:    [[TMP120:%.*]] = sub <2 x i32> [[TMP92]], [[TMP108]]
; THR15-NEXT:    [[TMP95:%.*]] = add <2 x i32> [[TMP73]], [[TMP120]]
; THR15-NEXT:    [[TMP97:%.*]] = shufflevector <2 x i32> [[TMP88]], <2 x i32> [[TMP95]], <2 x i32> <i32 0, i32 2>
; THR15-NEXT:    [[TMP77:%.*]] = add <2 x i32> [[TMP88]], [[TMP95]]
; THR15-NEXT:    [[TMP91:%.*]] = sub <2 x i32> [[TMP95]], [[TMP88]]
; THR15-NEXT:    [[TMP89:%.*]] = extractelement <2 x i32> [[TMP77]], i32 0
; THR15-NEXT:    [[TMP90:%.*]] = extractelement <2 x i32> [[TMP77]], i32 1
; THR15-NEXT:    [[ADD48:%.*]] = add i32 [[TMP90]], [[TMP89]]
; THR15-NEXT:    [[SUB51:%.*]] = sub i32 [[TMP89]], [[TMP90]]
; THR15-NEXT:    [[TMP110:%.*]] = extractelement <2 x i32> [[TMP91]], i32 0
; THR15-NEXT:    [[SUB47:%.*]] = extractelement <2 x i32> [[TMP91]], i32 1
; THR15-NEXT:    [[ADD55:%.*]] = add i32 [[SUB47]], [[TMP110]]
; THR15-NEXT:    [[SHR_I59:%.*]] = lshr i32 [[TMP90]], 15
; THR15-NEXT:    [[AND_I60:%.*]] = and i32 [[SHR_I59]], 65537
; THR15-NEXT:    [[MUL_I61:%.*]] = mul i32 [[AND_I60]], 65535
; THR15-NEXT:    [[SHR_I59_1:%.*]] = lshr i32 [[SUB47]], 15
; THR15-NEXT:    [[AND_I60_1:%.*]] = and i32 [[SHR_I59_1]], 65537
; THR15-NEXT:    [[MUL_I61_1:%.*]] = mul i32 [[AND_I60_1]], 65535
; THR15-NEXT:    [[TMP112:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
; THR15-NEXT:    [[TMP130:%.*]] = zext <2 x i8> [[TMP112]] to <2 x i32>
; THR15-NEXT:    [[TMP129:%.*]] = load <2 x i8>, ptr [[ADD_PTR644]], align 1
; THR15-NEXT:    [[TMP115:%.*]] = zext <2 x i8> [[TMP129]] to <2 x i32>
; THR15-NEXT:    [[TMP116:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_1]], align 1
; THR15-NEXT:    [[TMP117:%.*]] = zext <2 x i8> [[TMP116]] to <2 x i32>
; THR15-NEXT:    [[TMP131:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_1]], align 1
; THR15-NEXT:    [[TMP119:%.*]] = zext <2 x i8> [[TMP131]] to <2 x i32>
; THR15-NEXT:    [[TMP113:%.*]] = sub <2 x i32> [[TMP117]], [[TMP119]]
; THR15-NEXT:    [[TMP114:%.*]] = shl <2 x i32> [[TMP113]], <i32 16, i32 16>
; THR15-NEXT:    [[TMP103:%.*]] = shufflevector <2 x i32> [[TMP130]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
; THR15-NEXT:    [[TMP126:%.*]] = insertelement <2 x i32> [[TMP103]], i32 [[CONV_1]], i32 0
; THR15-NEXT:    [[TMP134:%.*]] = sub <2 x i32> [[TMP126]], [[TMP115]]
; THR15-NEXT:    [[TMP121:%.*]] = add <2 x i32> [[TMP114]], [[TMP134]]
; THR15-NEXT:    [[TMP145:%.*]] = shufflevector <2 x i8> [[TMP7]], <2 x i8> poison, <2 x i32> <i32 1, i32 poison>
; THR15-NEXT:    [[TMP127:%.*]] = insertelement <2 x i8> [[TMP145]], i8 [[TMP3]], i32 1
; THR15-NEXT:    [[TMP128:%.*]] = zext <2 x i8> [[TMP127]] to <2 x i32>
; THR15-NEXT:    [[TMP146:%.*]] = insertelement <2 x i32> [[TMP130]], i32 [[TMP9]], i32 0
; THR15-NEXT:    [[TMP106:%.*]] = sub <2 x i32> [[TMP146]], [[TMP128]]
; THR15-NEXT:    [[TMP118:%.*]] = extractelement <2 x i32> [[TMP106]], i32 0
; THR15-NEXT:    [[SHL30_1:%.*]] = shl i32 [[TMP118]], 16
; THR15-NEXT:    [[TMP132:%.*]] = extractelement <2 x i32> [[TMP106]], i32 1
; THR15-NEXT:    [[ADD31_1:%.*]] = add i32 [[SHL30_1]], [[TMP132]]
; THR15-NEXT:    [[TMP133:%.*]] = extractelement <2 x i32> [[TMP121]], i32 0
; THR15-NEXT:    [[TMP147:%.*]] = extractelement <2 x i32> [[TMP121]], i32 1
; THR15-NEXT:    [[SUB45_1:%.*]] = sub i32 [[TMP133]], [[TMP147]]
; THR15-NEXT:    [[TMP135:%.*]] = shufflevector <2 x i32> [[TMP121]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
; THR15-NEXT:    [[TMP136:%.*]] = insertelement <2 x i32> [[TMP135]], i32 [[ADD43_1]], i32 1
; THR15-NEXT:    [[TMP137:%.*]] = insertelement <2 x i32> [[TMP121]], i32 [[ADD31_1]], i32 1
; THR15-NEXT:    [[TMP138:%.*]] = add <2 x i32> [[TMP136]], [[TMP137]]
; THR15-NEXT:    [[SUB47_1:%.*]] = sub i32 [[ADD31_1]], [[ADD43_1]]
; THR15-NEXT:    [[TMP139:%.*]] = extractelement <2 x i32> [[TMP138]], i32 0
; THR15-NEXT:    [[TMP140:%.*]] = extractelement <2 x i32> [[TMP138]], i32 1
; THR15-NEXT:    [[ADD48_1:%.*]] = add i32 [[TMP140]], [[TMP139]]
; THR15-NEXT:    [[SUB51_1:%.*]] = sub i32 [[TMP139]], [[TMP140]]
; THR15-NEXT:    [[ADD55_1:%.*]] = add i32 [[SUB47_1]], [[SUB45_1]]
; THR15-NEXT:    [[TMP141:%.*]] = shufflevector <2 x i32> [[TMP91]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
; THR15-NEXT:    [[TMP142:%.*]] = insertelement <2 x i32> [[TMP141]], i32 [[SUB45_1]], i32 0
; THR15-NEXT:    [[TMP143:%.*]] = insertelement <2 x i32> [[TMP91]], i32 [[SUB47_1]], i32 0
; THR15-NEXT:    [[TMP144:%.*]] = sub <2 x i32> [[TMP142]], [[TMP143]]
; THR15-NEXT:    [[SHR_I54_1:%.*]] = lshr i32 [[TMP140]], 15
; THR15-NEXT:    [[AND_I55_1:%.*]] = and i32 [[SHR_I54_1]], 65537
; THR15-NEXT:    [[MUL_I56_1:%.*]] = mul i32 [[AND_I55_1]], 65535
; THR15-NEXT:    [[SHR_I54_2:%.*]] = lshr i32 [[SUB47_1]], 15
; THR15-NEXT:    [[AND_I55_2:%.*]] = and i32 [[SHR_I54_2]], 65537
; THR15-NEXT:    [[MUL_I56_2:%.*]] = mul i32 [[AND_I55_2]], 65535
; THR15-NEXT:    [[TMP122:%.*]] = lshr <2 x i32> [[TMP130]], <i32 15, i32 15>
; THR15-NEXT:    [[TMP123:%.*]] = and <2 x i32> [[TMP122]], <i32 65537, i32 65537>
; THR15-NEXT:    [[TMP124:%.*]] = mul <2 x i32> [[TMP123]], <i32 65535, i32 65535>
; THR15-NEXT:    [[ADD78:%.*]] = add i32 [[ADD48_1]], [[ADD48]]
; THR15-NEXT:    [[SUB86:%.*]] = sub i32 [[ADD48]], [[ADD48_1]]
; THR15-NEXT:    [[ADD103:%.*]] = add i32 [[ADD94]], [[ADD78]]
; THR15-NEXT:    [[SUB104:%.*]] = sub i32 [[ADD78]], [[ADD94]]
; THR15-NEXT:    [[ADD105:%.*]] = add i32 [[SUB102]], [[SUB86]]
; THR15-NEXT:    [[SUB106:%.*]] = sub i32 [[SUB86]], [[SUB102]]
; THR15-NEXT:    [[ADD_I:%.*]] = add i32 [[MUL_I]], [[ADD103]]
; THR15-NEXT:    [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP63]]
; THR15-NEXT:    [[ADD_I52:%.*]] = add i32 [[MUL_I51]], [[ADD105]]
; THR15-NEXT:    [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[ADD46_2]]
; THR15-NEXT:    [[ADD_I57:%.*]] = add i32 [[MUL_I56_1]], [[SUB104]]
; THR15-NEXT:    [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP140]]
; THR15-NEXT:    [[ADD_I62:%.*]] = add i32 [[MUL_I61]], [[SUB106]]
; THR15-NEXT:    [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP90]]
; THR15-NEXT:    [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]]
; THR15-NEXT:    [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]]
; THR15-NEXT:    [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]]
; THR15-NEXT:    [[ADD78_1:%.*]] = add i32 [[ADD55_1]], [[ADD55]]
; THR15-NEXT:    [[SUB86_1:%.*]] = sub i32 [[ADD55]], [[ADD55_1]]
; THR15-NEXT:    [[ADD103_1:%.*]] = add i32 [[ADD94_1]], [[ADD78_1]]
; THR15-NEXT:    [[SUB104_1:%.*]] = sub i32 [[ADD78_1]], [[ADD94_1]]
; THR15-NEXT:    [[ADD105_1:%.*]] = add i32 [[SUB102_1]], [[SUB86_1]]
; THR15-NEXT:    [[SUB106_1:%.*]] = sub i32 [[SUB86_1]], [[SUB102_1]]
; THR15-NEXT:    [[ADD_I_1:%.*]] = add i32 [[MUL_I_1]], [[ADD103_1]]
; THR15-NEXT:    [[XOR_I_1:%.*]] = xor i32 [[ADD_I_1]], [[TMP105]]
; THR15-NEXT:    [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_2]], [[ADD105_1]]
; THR15-NEXT:    [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP64]]
; THR15-NEXT:    [[ADD_I57_1:%.*]] = add i32 [[MUL_I56_2]], [[SUB104_1]]
; THR15-NEXT:    [[XOR_I58_1:%.*]] = xor i32 [[ADD_I57_1]], [[SUB47_1]]
; THR15-NEXT:    [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_1]], [[SUB106_1]]
; THR15-NEXT:    [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[SUB47]]
; THR15-NEXT:    [[ADD108_1:%.*]] = add i32 [[XOR_I53_1]], [[ADD113]]
; THR15-NEXT:    [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[XOR_I_1]]
; THR15-NEXT:    [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[XOR_I58_1]]
; THR15-NEXT:    [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]]
; THR15-NEXT:    [[ADD78_2:%.*]] = add i32 [[SUB51_1]], [[SUB51]]
; THR15-NEXT:    [[SUB86_2:%.*]] = sub i32 [[SUB51]], [[SUB51_1]]
; THR15-NEXT:    [[TMP152:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_2]], i32 0
; THR15-NEXT:    [[TMP153:%.*]] = shufflevector <2 x i32> [[TMP152]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT:    [[TMP154:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0
; THR15-NEXT:    [[TMP155:%.*]] = shufflevector <2 x i32> [[TMP154]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT:    [[TMP156:%.*]] = add <2 x i32> [[TMP153]], [[TMP155]]
; THR15-NEXT:    [[TMP157:%.*]] = sub <2 x i32> [[TMP153]], [[TMP155]]
; THR15-NEXT:    [[TMP158:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP157]], <2 x i32> <i32 0, i32 3>
; THR15-NEXT:    [[ADD105_2:%.*]] = add i32 [[SUB102_2]], [[SUB86_2]]
; THR15-NEXT:    [[SUB106_2:%.*]] = sub i32 [[SUB86_2]], [[SUB102_2]]
; THR15-NEXT:    [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_3]], [[ADD105_2]]
; THR15-NEXT:    [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]]
; THR15-NEXT:    [[TMP177:%.*]] = add <2 x i32> [[TMP124]], [[TMP158]]
; THR15-NEXT:    [[TMP160:%.*]] = xor <2 x i32> [[TMP177]], [[TMP130]]
; THR15-NEXT:    [[SHR_I59_2:%.*]] = lshr i32 [[TMP89]], 15
; THR15-NEXT:    [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537
; THR15-NEXT:    [[MUL_I61_2:%.*]] = mul i32 [[AND_I60_2]], 65535
; THR15-NEXT:    [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_2]], [[SUB106_2]]
; THR15-NEXT:    [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP89]]
; THR15-NEXT:    [[ADD108_2:%.*]] = add i32 [[XOR_I53_2]], [[ADD113_1]]
; THR15-NEXT:    [[TMP161:%.*]] = extractelement <2 x i32> [[TMP160]], i32 0
; THR15-NEXT:    [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP161]]
; THR15-NEXT:    [[TMP162:%.*]] = extractelement <2 x i32> [[TMP160]], i32 1
; THR15-NEXT:    [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP162]]
; THR15-NEXT:    [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]]
; THR15-NEXT:    [[TMP159:%.*]] = extractelement <2 x i32> [[TMP144]], i32 0
; THR15-NEXT:    [[TMP176:%.*]] = extractelement <2 x i32> [[TMP144]], i32 1
; THR15-NEXT:    [[ADD78_3:%.*]] = add i32 [[TMP159]], [[TMP176]]
; THR15-NEXT:    [[SUB86_3:%.*]] = sub i32 [[TMP176]], [[TMP159]]
; THR15-NEXT:    [[TMP163:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_3]], i32 0
; THR15-NEXT:    [[TMP164:%.*]] = shufflevector <2 x i32> [[TMP163]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT:    [[TMP165:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_3]], i32 0
; THR15-NEXT:    [[TMP166:%.*]] = shufflevector <2 x i32> [[TMP165]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT:    [[TMP167:%.*]] = add <2 x i32> [[TMP164]], [[TMP166]]
; THR15-NEXT:    [[TMP168:%.*]] = sub <2 x i32> [[TMP164]], [[TMP166]]
; THR15-NEXT:    [[TMP169:%.*]] = shufflevector <2 x i32> [[TMP167]], <2 x i32> [[TMP168]], <2 x i32> <i32 0, i32 3>
; THR15-NEXT:    [[ADD105_3:%.*]] = add i32 [[SUB102_3]], [[SUB86_3]]
; THR15-NEXT:    [[SUB106_3:%.*]] = sub i32 [[SUB86_3]], [[SUB102_3]]
; THR15-NEXT:    [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_4]], [[ADD105_3]]
; THR15-NEXT:    [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV]]
; THR15-NEXT:    [[TMP170:%.*]] = lshr <2 x i32> [[TMP74]], <i32 15, i32 15>
; THR15-NEXT:    [[TMP171:%.*]] = and <2 x i32> [[TMP170]], <i32 65537, i32 65537>
; THR15-NEXT:    [[TMP172:%.*]] = mul <2 x i32> [[TMP171]], <i32 65535, i32 65535>
; THR15-NEXT:    [[TMP173:%.*]] = add <2 x i32> [[TMP172]], [[TMP169]]
; THR15-NEXT:    [[TMP174:%.*]] = xor <2 x i32> [[TMP173]], [[TMP74]]
; THR15-NEXT:    [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15
; THR15-NEXT:    [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537
; THR15-NEXT:    [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535
; THR15-NEXT:    [[ADD_I62_3:%.*]] = add i32 [[MUL_I61_3]], [[SUB106_3]]
; THR15-NEXT:    [[XOR_I63_3:%.*]] = xor i32 [[ADD_I62_3]], [[CONV33]]
; THR15-NEXT:    [[ADD108_3:%.*]] = add i32 [[XOR_I53_3]], [[ADD113_2]]
; THR15-NEXT:    [[TMP175:%.*]] = extractelement <2 x i32> [[TMP174]], i32 0
; THR15-NEXT:    [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP175]]
; THR15-NEXT:    [[TMP178:%.*]] = extractelement <2 x i32> [[TMP174]], i32 1
; THR15-NEXT:    [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP178]]
; THR15-NEXT:    [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[XOR_I63_3]]
; THR15-NEXT:    ret i32 [[ADD113_3]]
;
entry:
  %0 = load i8, ptr %pix1, align 1
  %conv = zext i8 %0 to i32
  %1 = load i8, ptr %pix2, align 1
  %conv2 = zext i8 %1 to i32
  %sub = sub i32 %conv, %conv2
  %arrayidx3 = getelementptr i8, ptr %pix1, i64 4
  %2 = load i8, ptr %arrayidx3, align 1
  %conv4 = zext i8 %2 to i32
  %arrayidx5 = getelementptr i8, ptr %pix2, i64 4
  %3 = load i8, ptr %arrayidx5, align 1
  %conv6 = zext i8 %3 to i32
  %sub7 = sub i32 %conv4, %conv6
  %shl = shl i32 %sub7, 16
  %add = add i32 %shl, %sub
  %arrayidx8 = getelementptr i8, ptr %pix1, i64 1
  %4 = load i8, ptr %arrayidx8, align 1
  %conv9 = zext i8 %4 to i32
  %arrayidx10 = getelementptr i8, ptr %pix2, i64 1
  %5 = load i8, ptr %arrayidx10, align 1
  %conv11 = zext i8 %5 to i32
  %sub12 = sub i32 %conv9, %conv11
  %arrayidx13 = getelementptr i8, ptr %pix1, i64 5
  %6 = load i8, ptr %arrayidx13, align 1
  %conv14 = zext i8 %6 to i32
  %arrayidx15 = getelementptr i8, ptr %pix2, i64 5
  %7 = load i8, ptr %arrayidx15, align 1
  %conv16 = zext i8 %7 to i32
  %sub17 = sub i32 %conv14, %conv16
  %shl18 = shl i32 %sub17, 16
  %add19 = add i32 %shl18, %sub12
  %arrayidx20 = getelementptr i8, ptr %pix1, i64 2
  %8 = load i8, ptr %arrayidx20, align 1
  %conv21 = zext i8 %8 to i32
  %arrayidx22 = getelementptr i8, ptr %pix2, i64 2
  %9 = load i8, ptr %arrayidx22, align 1
  %conv23 = zext i8 %9 to i32
  %sub24 = sub i32 %conv21, %conv23
  %arrayidx25 = getelementptr i8, ptr %pix1, i64 6
  %10 = load i8, ptr %arrayidx25, align 1
  %conv26 = zext i8 %10 to i32
  %arrayidx27 = getelementptr i8, ptr %pix2, i64 6
  %11 = load i8, ptr %arrayidx27, align 1
  %conv28 = zext i8 %11 to i32
  %sub29 = sub i32 %conv26, %conv28
  %shl30 = shl i32 %sub29, 16
  %add31 = add i32 %shl30, %sub24
  %arrayidx32 = getelementptr i8, ptr %pix1, i64 3
  %12 = load i8, ptr %arrayidx32, align 1
  %conv33 = zext i8 %12 to i32
  %arrayidx34 = getelementptr i8, ptr %pix2, i64 3
  %13 = load i8, ptr %arrayidx34, align 1
  %conv35 = zext i8 %13 to i32
  %sub36 = sub i32 %conv33, %conv35
  %arrayidx37 = getelementptr i8, ptr %pix1, i64 7
  %14 = load i8, ptr %arrayidx37, align 1
  %conv38 = zext i8 %14 to i32
  %arrayidx39 = getelementptr i8, ptr %pix2, i64 7
  %15 = load i8, ptr %arrayidx39, align 1
  %conv40 = zext i8 %15 to i32
  %sub41 = sub i32 %conv38, %conv40
  %shl42 = shl i32 %sub41, 16
  %add43 = add i32 %shl42, %sub36
  %add44 = add i32 %add19, %add
  %sub45 = sub i32 %add, %add19
  %add46 = add i32 %add43, %add31
  %sub47 = sub i32 %add31, %add43
  %add48 = add i32 %add46, %add44
  %sub51 = sub i32 %add44, %add46
  %add55 = add i32 %sub47, %sub45
  %sub59 = sub i32 %sub45, %sub47
  %add.ptr3 = getelementptr i8, ptr %pix1, i64 %idx.ext
  %add.ptr644 = getelementptr i8, ptr %pix2, i64 %idx.ext63
  %16 = load i8, ptr %add.ptr3, align 1
  %conv.1 = zext i8 %16 to i32
  %17 = load i8, ptr %add.ptr644, align 1
  %conv2.1 = zext i8 %17 to i32
  %sub.1 = sub i32 %conv.1, %conv2.1
  %arrayidx3.1 = getelementptr i8, ptr %add.ptr3, i64 4
  %18 = load i8, ptr %arrayidx3.1, align 1
  %conv4.1 = zext i8 %18 to i32
  %arrayidx5.1 = getelementptr i8, ptr %add.ptr644, i64 4
  %19 = load i8, ptr %arrayidx5.1, align 1
  %conv6.1 = zext i8 %19 to i32
  %sub7.1 = sub i32 %conv4.1, %conv6.1
  %shl.1 = shl i32 %sub7.1, 16
  %add.1 = add i32 %shl.1, %sub.1
  %arrayidx8.1 = getelementptr i8, ptr %add.ptr3, i64 1
  %20 = load i8, ptr %arrayidx8.1, align 1
  %conv9.1 = zext i8 %20 to i32
  %arrayidx10.1 = getelementptr i8, ptr %add.ptr644, i64 1
  %21 = load i8, ptr %arrayidx10.1, align 1
  %conv11.1 = zext i8 %21 to i32
  %sub12.1 = sub i32 %conv9.1, %conv11.1
  %arrayidx13.1 = getelementptr i8, ptr %add.ptr3, i64 5
  %22 = load i8, ptr %arrayidx13.1, align 1
  %conv14.1 = zext i8 %22 to i32
  %arrayidx15.1 = getelementptr i8, ptr %add.ptr644, i64 5
  %23 = load i8, ptr %arrayidx15.1, align 1
  %conv16.1 = zext i8 %23 to i32
  %sub17.1 = sub i32 %conv14.1, %conv16.1
  %shl18.1 = shl i32 %sub17.1, 16
  %add19.1 = add i32 %shl18.1, %sub12.1
  %arrayidx20.1 = getelementptr i8, ptr %add.ptr3, i64 2
  %24 = load i8, ptr %arrayidx20.1, align 1
  %conv21.1 = zext i8 %24 to i32
  %arrayidx22.1 = getelementptr i8, ptr %add.ptr644, i64 2
  %25 = load i8, ptr %arrayidx22.1, align 1
  %conv23.1 = zext i8 %25 to i32
  %sub24.1 = sub i32 %conv21.1, %conv23.1
  %arrayidx25.1 = getelementptr i8, ptr %add.ptr3, i64 6
  %26 = load i8, ptr %arrayidx25.1, align 1
  %conv26.1 = zext i8 %26 to i32
  %arrayidx27.1 = getelementptr i8, ptr %add.ptr644, i64 6
  %27 = load i8, ptr %arrayidx27.1, align 1
  %conv28.1 = zext i8 %27 to i32
  %sub29.1 = sub i32 %conv26.1, %conv28.1
  %shl30.1 = shl i32 %sub29.1, 16
  %add31.1 = add i32 %shl30.1, %sub24.1
  %arrayidx32.1 = getelementptr i8, ptr %add.ptr3, i64 3
  %28 = load i8, ptr %arrayidx32.1, align 1
  %conv33.1 = zext i8 %28 to i32
  %arrayidx34.1 = getelementptr i8, ptr %add.ptr644, i64 3
  %29 = load i8, ptr %arrayidx34.1, align 1
  %conv35.1 = zext i8 %29 to i32
  %sub36.1 = sub i32 %conv33.1, %conv35.1
  %arrayidx37.1 = getelementptr i8, ptr %add.ptr3, i64 7
  %30 = load i8, ptr %arrayidx37.1, align 1
  %conv38.1 = zext i8 %30 to i32
  %arrayidx39.1 = getelementptr i8, ptr %add.ptr644, i64 7
  %31 = load i8, ptr %arrayidx39.1, align 1
  %conv40.1 = zext i8 %31 to i32
  %sub41.1 = sub i32 %conv38.1, %conv40.1
  %shl42.1 = shl i32 %sub41.1, 16
  %add43.1 = add i32 %shl42.1, %sub36.1
  %add44.1 = add i32 %add19.1, %add.1
  %sub45.1 = sub i32 %add.1, %add19.1
  %add46.1 = add i32 %add43.1, %add31.1
  %sub47.1 = sub i32 %add31.1, %add43.1
  %add48.1 = add i32 %add46.1, %add44.1
  %sub51.1 = sub i32 %add44.1, %add46.1
  %add55.1 = add i32 %sub47.1, %sub45.1
  %sub59.1 = sub i32 %sub45.1, %sub47.1
  %add.ptr.1 = getelementptr i8, ptr %add.ptr, i64 %idx.ext
  %add.ptr64.1 = getelementptr i8, ptr %add.ptr64, i64 %idx.ext63
  %32 = load i8, ptr %add.ptr.1, align 1
  %conv.2 = zext i8 %32 to i32
  %33 = load i8, ptr %add.ptr64.1, align 1
  %conv2.2 = zext i8 %33 to i32
  %sub.2 = sub i32 %conv.2, %conv2.2
  %arrayidx3.2 = getelementptr i8, ptr %add.ptr.1, i64 4
  %34 = load i8, ptr %arrayidx3.2, align 1
  %conv4.2 = zext i8 %34 to i32
  %arrayidx5.2 = getelementptr i8, ptr %add.ptr64.1, i64 4
  %35 = load i8, ptr %arrayidx5.2, align 1
  %conv6.2 = zext i8 %35 to i32
  %sub7.2 = sub i32 %conv4.2, %conv6.2
  %shl.2 = shl i32 %sub7.2, 16
  %add.2 = add i32 %shl.2, %sub.2
  %arrayidx8.2 = getelementptr i8, ptr %add.ptr.1, i64 1
  %36 = load i8, ptr %arrayidx8.2, align 1
  %conv9.2 = zext i8 %36 to i32
  %arrayidx10.2 = getelementptr i8, ptr %add.ptr64.1, i64 1
  %37 = load i8, ptr %arrayidx10.2, align 1
  %conv11.2 = zext i8 %37 to i32
  %sub12.2 = sub i32 %conv9.2, %conv11.2
  %arrayidx13.2 = getelementptr i8, ptr %add.ptr.1, i64 5
  %38 = load i8, ptr %arrayidx13.2, align 1
  %conv14.2 = zext i8 %38 to i32
  %arrayidx15.2 = getelementptr i8, ptr %add.ptr64.1, i64 5
  %39 = load i8, ptr %arrayidx15.2, align 1
  %conv16.2 = zext i8 %39 to i32
  %sub17.2 = sub i32 %conv14.2, %conv16.2
  %shl18.2 = shl i32 %sub17.2, 16
  %add19.2 = add i32 %shl18.2, %sub12.2
  %arrayidx20.2 = getelementptr i8, ptr %add.ptr.1, i64 2
  %40 = load i8, ptr %arrayidx20.2, align 1
  %conv21.2 = zext i8 %40 to i32
  %arrayidx22.2 = getelementptr i8, ptr %add.ptr64.1, i64 2
  %41 = load i8, ptr %arrayidx22.2, align 1
  %conv23.2 = zext i8 %41 to i32
  %sub24.2 = sub i32 %conv21.2, %conv23.2
  %arrayidx25.2 = getelementptr i8, ptr %add.ptr.1, i64 6
  %42 = load i8, ptr %arrayidx25.2, align 1
  %conv26.2 = zext i8 %42 to i32
  %arrayidx27.2 = getelementptr i8, ptr %add.ptr64.1, i64 6
  %43 = load i8, ptr %arrayidx27.2, align 1
  %conv28.2 = zext i8 %43 to i32
  %sub29.2 = sub i32 %conv26.2, %conv28.2
  %shl30.2 = shl i32 %sub29.2, 16
  %add31.2 = add i32 %shl30.2, %sub24.2
  %arrayidx32.2 = getelementptr i8, ptr %add.ptr.1, i64 3
  %44 = load i8, ptr %arrayidx32.2, align 1
  %conv33.2 = zext i8 %44 to i32
  %arrayidx34.2 = getelementptr i8, ptr %add.ptr64.1, i64 3
  %45 = load i8, ptr %arrayidx34.2, align 1
  %conv35.2 = zext i8 %45 to i32
  %sub36.2 = sub i32 %conv33.2, %conv35.2
  %arrayidx37.2 = getelementptr i8, ptr %add.ptr.1, i64 7
  %46 = load i8, ptr %arrayidx37.2, align 1
  %conv38.2 = zext i8 %46 to i32
  %arrayidx39.2 = getelementptr i8, ptr %add.ptr64.1, i64 7
  %47 = load i8, ptr %arrayidx39.2, align 1
  %conv40.2 = zext i8 %47 to i32
  %sub41.2 = sub i32 %conv38.2, %conv40.2
  %shl42.2 = shl i32 %sub41.2, 16
  %add43.2 = add i32 %shl42.2, %sub36.2
  %add44.2 = add i32 %add19.2, %add.2
  %sub45.2 = sub i32 %add.2, %add19.2
  %add46.2 = add i32 %add43.2, %add31.2
  %sub47.2 = sub i32 %add31.2, %add43.2
  %add48.2 = add i32 %add46.2, %add44.2
  %sub51.2 = sub i32 %add44.2, %add46.2
  %add55.2 = add i32 %sub47.2, %sub45.2
  %sub59.2 = sub i32 %sub45.2, %sub47.2
  %48 = load i8, ptr null, align 1
  %conv.3 = zext i8 %48 to i32
  %49 = load i8, ptr null, align 1
  %conv2.3 = zext i8 %49 to i32
  %sub.3 = sub i32 %conv.3, %conv2.3
  %arrayidx3.3 = getelementptr i8, ptr null, i64 4
  %50 = load i8, ptr %arrayidx3.3, align 1
  %conv4.3 = zext i8 %50 to i32
  %arrayidx5.3 = getelementptr i8, ptr null, i64 4
  %51 = load i8, ptr %arrayidx5.3, align 1
  %conv6.3 = zext i8 %51 to i32
  %sub7.3 = sub i32 %conv4.3, %conv6.3
  %shl.3 = shl i32 %sub7.3, 16
  %add.3 = add i32 %shl.3, %sub.3
  %arrayidx8.3 = getelementptr i8, ptr null, i64 1
  %52 = load i8, ptr %arrayidx8.3, align 1
  %conv9.3 = zext i8 %52 to i32
  %arrayidx10.3 = getelementptr i8, ptr null, i64 1
  %53 = load i8, ptr %arrayidx10.3, align 1
  %conv11.3 = zext i8 %53 to i32
  %sub12.3 = sub i32 %conv9.3, %conv11.3
  %54 = load i8, ptr null, align 1
  %conv14.3 = zext i8 %54 to i32
  %arrayidx15.3 = getelementptr i8, ptr null, i64 5
  %55 = load i8, ptr %arrayidx15.3, align 1
  %conv16.3 = zext i8 %55 to i32
  %sub17.3 = sub i32 %conv14.3, %conv16.3
  %shl18.3 = shl i32 %sub17.3, 16
  %add19.3 = add i32 %shl18.3, %sub12.3
  %arrayidx20.3 = getelementptr i8, ptr null, i64 2
  %56 = load i8, ptr %arrayidx20.3, align 1
  %conv21.3 = zext i8 %56 to i32
  %arrayidx22.3 = getelementptr i8, ptr null, i64 2
  %57 = load i8, ptr %arrayidx22.3, align 1
  %conv23.3 = zext i8 %57 to i32
  %sub24.3 = sub i32 %conv21.3, %conv23.3
  %58 = load i8, ptr null, align 1
  %conv26.3 = zext i8 %58 to i32
  %arrayidx27.3 = getelementptr i8, ptr null, i64 6
  %59 = load i8, ptr %arrayidx27.3, align 1
  %conv28.3 = zext i8 %59 to i32
  %sub29.3 = sub i32 %conv26.3, %conv28.3
  %shl30.3 = shl i32 %sub29.3, 16
  %add31.3 = add i32 %shl30.3, %sub24.3
  %arrayidx32.3 = getelementptr i8, ptr null, i64 3
  %60 = load i8, ptr %arrayidx32.3, align 1
  %conv33.3 = zext i8 %60 to i32
  %arrayidx34.3 = getelementptr i8, ptr null, i64 3
  %61 = load i8, ptr %arrayidx34.3, align 1
  %conv35.3 = zext i8 %61 to i32
  %sub36.3 = sub i32 %conv33.3, %conv35.3
  %62 = load i8, ptr null, align 1
  %conv38.3 = zext i8 %62 to i32
  %arrayidx39.3 = getelementptr i8, ptr null, i64 7
  %63 = load i8, ptr %arrayidx39.3, align 1
  %conv40.3 = zext i8 %63 to i32
  %sub41.3 = sub i32 %conv38.3, %conv40.3
  %shl42.3 = shl i32 %sub41.3, 16
  %add43.3 = add i32 %shl42.3, %sub36.3
  %add44.3 = add i32 %add19.3, %add.3
  %sub45.3 = sub i32 %add.3, %add19.3
  %add46.3 = add i32 %add43.3, %add31.3
  %sub47.3 = sub i32 %add31.3, %add43.3
  %add48.3 = add i32 %add46.3, %add44.3
  %sub51.3 = sub i32 %add44.3, %add46.3
  %add55.3 = add i32 %sub47.3, %sub45.3
  %sub59.3 = sub i32 %sub45.3, %sub47.3
  %add78 = add i32 %add48.1, %add48
  %sub86 = sub i32 %add48, %add48.1
  %add94 = add i32 %add48.3, %add48.2
  %sub102 = sub i32 %add48.2, %add48.3
  %add103 = add i32 %add94, %add78
  %sub104 = sub i32 %add78, %add94
  %add105 = add i32 %sub102, %sub86
  %sub106 = sub i32 %sub86, %sub102
  %shr.i = lshr i32 %conv.3, 15
  %and.i = and i32 %shr.i, 65537
  %mul.i = mul i32 %and.i, 65535
  %add.i = add i32 %mul.i, %add103
  %xor.i = xor i32 %add.i, %conv.3
  %shr.i49 = lshr i32 %add46.2, 15
  %and.i50 = and i32 %shr.i49, 65537
  %mul.i51 = mul i32 %and.i50, 65535
  %add.i52 = add i32 %mul.i51, %add105
  %xor.i53 = xor i32 %add.i52, %add46.2
  %shr.i54 = lshr i32 %add46.1, 15
  %and.i55 = and i32 %shr.i54, 65537
  %mul.i56 = mul i32 %and.i55, 65535
  %add.i57 = add i32 %mul.i56, %sub104
  %xor.i58 = xor i32 %add.i57, %add46.1
  %shr.i59 = lshr i32 %add46, 15
  %and.i60 = and i32 %shr.i59, 65537
  %mul.i61 = mul i32 %and.i60, 65535
  %add.i62 = add i32 %mul.i61, %sub106
  %xor.i63 = xor i32 %add.i62, %add46
  %add110 = add i32 %xor.i53, %xor.i
  %add112 = add i32 %add110, %xor.i58
  %add113 = add i32 %add112, %xor.i63
  %add78.1 = add i32 %add55.1, %add55
  %sub86.1 = sub i32 %add55, %add55.1
  %add94.1 = add i32 %add55.3, %add55.2
  %sub102.1 = sub i32 %add55.2, %add55.3
  %add103.1 = add i32 %add94.1, %add78.1
  %sub104.1 = sub i32 %add78.1, %add94.1
  %add105.1 = add i32 %sub102.1, %sub86.1
  %sub106.1 = sub i32 %sub86.1, %sub102.1
  %shr.i.1 = lshr i32 %conv9.2, 15
  %and.i.1 = and i32 %shr.i.1, 65537
  %mul.i.1 = mul i32 %and.i.1, 65535
  %add.i.1 = add i32 %mul.i.1, %add103.1
  %xor.i.1 = xor i32 %add.i.1, %conv9.2
  %shr.i49.1 = lshr i32 %conv.2, 15
  %and.i50.1 = and i32 %shr.i49.1, 65537
  %mul.i51.1 = mul i32 %and.i50.1, 65535
  %add.i52.1 = add i32 %mul.i51.1, %add105.1
  %xor.i53.1 = xor i32 %add.i52.1, %conv.2
  %shr.i54.1 = lshr i32 %sub47.1, 15
  %and.i55.1 = and i32 %shr.i54.1, 65537
  %mul.i56.1 = mul i32 %and.i55.1, 65535
  %add.i57.1 = add i32 %mul.i56.1, %sub104.1
  %xor.i58.1 = xor i32 %add.i57.1, %sub47.1
  %shr.i59.1 = lshr i32 %sub47, 15
  %and.i60.1 = and i32 %shr.i59.1, 65537
  %mul.i61.1 = mul i32 %and.i60.1, 65535
  %add.i62.1 = add i32 %mul.i61.1, %sub106.1
  %xor.i63.1 = xor i32 %add.i62.1, %sub47
  %add108.1 = add i32 %xor.i53.1, %add113
  %add110.1 = add i32 %add108.1, %xor.i.1
  %add112.1 = add i32 %add110.1, %xor.i58.1
  %add113.1 = add i32 %add112.1, %xor.i63.1
  %add78.2 = add i32 %sub51.1, %sub51
  %sub86.2 = sub i32 %sub51, %sub51.1
  %add94.2 = add i32 %sub51.3, %sub51.2
  %sub102.2 = sub i32 %sub51.2, %sub51.3
  %add103.2 = add i32 %add94.2, %add78.2
  %sub104.2 = sub i32 %add78.2, %add94.2
  %add105.2 = add i32 %sub102.2, %sub86.2
  %sub106.2 = sub i32 %sub86.2, %sub102.2
  %shr.i.2 = lshr i32 %conv9.1, 15
  %and.i.2 = and i32 %shr.i.2, 65537
  %mul.i.2 = mul i32 %and.i.2, 65535
  %add.i.2 = add i32 %mul.i.2, %add103.2
  %xor.i.2 = xor i32 %add.i.2, %conv9.1
  %shr.i49.2 = lshr i32 %conv.1, 15
  %and.i50.2 = and i32 %shr.i49.2, 65537
  %mul.i51.2 = mul i32 %and.i50.2, 65535
  %add.i52.2 = add i32 %mul.i51.2, %add105.2
  %xor.i53.2 = xor i32 %add.i52.2, %conv.1
  %shr.i54.2 = lshr i32 %conv21.1, 15
  %and.i55.2 = and i32 %shr.i54.2, 65537
  %mul.i56.2 = mul i32 %and.i55.2, 65535
  %add.i57.2 = add i32 %mul.i56.2, %sub104.2
  %xor.i58.2 = xor i32 %add.i57.2, %conv21.1
  %shr.i59.2 = lshr i32 %add44, 15
  %and.i60.2 = and i32 %shr.i59.2, 65537
  %mul.i61.2 = mul i32 %and.i60.2, 65535
  %add.i62.2 = add i32 %mul.i61.2, %sub106.2
  %xor.i63.2 = xor i32 %add.i62.2, %add44
  %add108.2 = add i32 %xor.i53.2, %add113.1
  %add110.2 = add i32 %add108.2, %xor.i.2
  %add112.2 = add i32 %add110.2, %xor.i58.2
  %add113.2 = add i32 %add112.2, %xor.i63.2
  %add78.3 = add i32 %sub59.1, %sub59
  %sub86.3 = sub i32 %sub59, %sub59.1
  %add94.3 = add i32 %sub59.3, %sub59.2
  %sub102.3 = sub i32 %sub59.2, %sub59.3
  %add103.3 = add i32 %add94.3, %add78.3
  %sub104.3 = sub i32 %add78.3, %add94.3
  %add105.3 = add i32 %sub102.3, %sub86.3
  %sub106.3 = sub i32 %sub86.3, %sub102.3
  %shr.i.3 = lshr i32 %conv9, 15
  %and.i.3 = and i32 %shr.i.3, 65537
  %mul.i.3 = mul i32 %and.i.3, 65535
  %add.i.3 = add i32 %mul.i.3, %add103.3
  %xor.i.3 = xor i32 %add.i.3, %conv9
  %shr.i49.3 = lshr i32 %conv, 15
  %and.i50.3 = and i32 %shr.i49.3, 65537
  %mul.i51.3 = mul i32 %and.i50.3, 65535
  %add.i52.3 = add i32 %mul.i51.3, %add105.3
  %xor.i53.3 = xor i32 %add.i52.3, %conv
  %shr.i54.3 = lshr i32 %conv21, 15
  %and.i55.3 = and i32 %shr.i54.3, 65537
  %mul.i56.3 = mul i32 %and.i55.3, 65535
  %add.i57.3 = add i32 %mul.i56.3, %sub104.3
  %xor.i58.3 = xor i32 %add.i57.3, %conv21
  %shr.i59.3 = lshr i32 %conv33, 15
  %and.i60.3 = and i32 %shr.i59.3, 65537
  %mul.i61.3 = mul i32 %and.i60.3, 65535
  %add.i62.3 = add i32 %mul.i61.3, %sub106.3
  %xor.i63.3 = xor i32 %add.i62.3, %conv33
  %add108.3 = add i32 %xor.i53.3, %add113.2
  %add110.3 = add i32 %add108.3, %xor.i.3
  %add112.3 = add i32 %add110.3, %xor.i58.3
  %add113.3 = add i32 %add112.3, %xor.i63.3
  ret i32 %add113.3
}