; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
; RUN: opt -S -mtriple riscv64-unknown-linux-gnu < %s --passes=slp-vectorizer -mattr=+v -slp-threshold=-20 | FileCheck %s
; RUN: opt -S -mtriple riscv64-unknown-linux-gnu < %s --passes=slp-vectorizer -mattr=+v -slp-threshold=-15 | FileCheck %s --check-prefix=THR15
define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.ptr, ptr %add.ptr64) {
; CHECK-LABEL: define i32 @test(
; CHECK-SAME: ptr [[PIX1:%.*]], ptr [[PIX2:%.*]], i64 [[IDX_EXT:%.*]], i64 [[IDX_EXT63:%.*]], ptr [[ADD_PTR:%.*]], ptr [[ADD_PTR64:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[PIX1]], align 1
; CHECK-NEXT: [[CONV1:%.*]] = zext i8 [[TMP0]] to i32
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PIX1]], i64 4
; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr i8, ptr [[PIX2]], i64 4
; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr i8, ptr [[PIX1]], i64 1
; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 1
; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr i8, ptr [[PIX1]], i64 5
; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr i8, ptr [[PIX2]], i64 5
; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr i8, ptr [[PIX1]], i64 3
; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX32]], align 1
; CHECK-NEXT: [[CONV33:%.*]] = zext i8 [[TMP10]] to i32
; CHECK-NEXT: [[ADD_PTR3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 [[IDX_EXT]]
; CHECK-NEXT: [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[ADD_PTR3]], align 1
; CHECK-NEXT: [[CONV_1:%.*]] = zext i8 [[TMP11]] to i32
; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 4
; CHECK-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 4
; CHECK-NEXT: [[ARRAYIDX8_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 1
; CHECK-NEXT: [[ARRAYIDX22_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 1
; CHECK-NEXT: [[ARRAYIDX25_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 5
; CHECK-NEXT: [[ARRAYIDX27_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 5
; CHECK-NEXT: [[ARRAYIDX32_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 3
; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[ARRAYIDX32_1]], align 1
; CHECK-NEXT: [[CONV33_1:%.*]] = zext i8 [[TMP14]] to i32
; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
; CHECK-NEXT: [[ADD_PTR64_1:%.*]] = getelementptr i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
; CHECK-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
; CHECK-NEXT: [[ARRAYIDX8_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 1
; CHECK-NEXT: [[ARRAYIDX10_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 1
; CHECK-NEXT: [[ARRAYIDX13_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 5
; CHECK-NEXT: [[ARRAYIDX15_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 5
; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP16:%.*]] = zext <2 x i8> [[TMP4]] to <2 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR64_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP7:%.*]] = zext <2 x i8> [[TMP6]] to <2 x i32>
; CHECK-NEXT: [[TMP8:%.*]] = sub <2 x i32> [[TMP16]], [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP13:%.*]] = zext <2 x i8> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP26:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP12:%.*]] = zext <2 x i8> [[TMP26]] to <2 x i32>
; CHECK-NEXT: [[TMP24:%.*]] = sub <2 x i32> [[TMP13]], [[TMP12]]
; CHECK-NEXT: [[TMP25:%.*]] = shl <2 x i32> [[TMP24]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP15:%.*]] = add <2 x i32> [[TMP25]], [[TMP8]]
; CHECK-NEXT: [[TMP28:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX8_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP17:%.*]] = zext <2 x i8> [[TMP28]] to <2 x i32>
; CHECK-NEXT: [[TMP18:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX10_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP19:%.*]] = zext <2 x i8> [[TMP18]] to <2 x i32>
; CHECK-NEXT: [[TMP20:%.*]] = sub <2 x i32> [[TMP17]], [[TMP19]]
; CHECK-NEXT: [[TMP21:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX13_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP22:%.*]] = zext <2 x i8> [[TMP21]] to <2 x i32>
; CHECK-NEXT: [[TMP23:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX15_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP30:%.*]] = zext <2 x i8> [[TMP23]] to <2 x i32>
; CHECK-NEXT: [[TMP36:%.*]] = sub <2 x i32> [[TMP22]], [[TMP30]]
; CHECK-NEXT: [[TMP37:%.*]] = shl <2 x i32> [[TMP36]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP27:%.*]] = add <2 x i32> [[TMP37]], [[TMP20]]
; CHECK-NEXT: [[TMP38:%.*]] = add <2 x i32> [[TMP27]], [[TMP15]]
; CHECK-NEXT: [[TMP29:%.*]] = sub <2 x i32> [[TMP15]], [[TMP27]]
; CHECK-NEXT: [[SUB45_2:%.*]] = extractelement <2 x i32> [[TMP38]], i32 0
; CHECK-NEXT: [[SUB47_2:%.*]] = extractelement <2 x i32> [[TMP38]], i32 1
; CHECK-NEXT: [[ADD48_2:%.*]] = add i32 [[SUB47_2]], [[SUB45_2]]
; CHECK-NEXT: [[SUB51_2:%.*]] = sub i32 [[SUB45_2]], [[SUB47_2]]
; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x i32> [[TMP29]], i32 0
; CHECK-NEXT: [[TMP34:%.*]] = extractelement <2 x i32> [[TMP29]], i32 1
; CHECK-NEXT: [[ADD55_2:%.*]] = add i32 [[TMP34]], [[TMP32]]
; CHECK-NEXT: [[SUB59_2:%.*]] = sub i32 [[TMP32]], [[TMP34]]
; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr i8, ptr null, i64 4
; CHECK-NEXT: [[ARRAYIDX5_3:%.*]] = getelementptr i8, ptr null, i64 4
; CHECK-NEXT: [[ARRAYIDX8_3:%.*]] = getelementptr i8, ptr null, i64 1
; CHECK-NEXT: [[ARRAYIDX10_3:%.*]] = getelementptr i8, ptr null, i64 1
; CHECK-NEXT: [[TMP44:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT: [[ARRAYIDX15_3:%.*]] = getelementptr i8, ptr null, i64 5
; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT: [[TMP53:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP58:%.*]] = zext <2 x i8> [[TMP53]] to <2 x i32>
; CHECK-NEXT: [[TMP54:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP39:%.*]] = zext <2 x i8> [[TMP54]] to <2 x i32>
; CHECK-NEXT: [[TMP40:%.*]] = sub <2 x i32> [[TMP58]], [[TMP39]]
; CHECK-NEXT: [[TMP41:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_3]], i64 -4, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP42:%.*]] = zext <2 x i8> [[TMP41]] to <2 x i32>
; CHECK-NEXT: [[TMP59:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP62:%.*]] = zext <2 x i8> [[TMP59]] to <2 x i32>
; CHECK-NEXT: [[TMP45:%.*]] = sub <2 x i32> [[TMP42]], [[TMP62]]
; CHECK-NEXT: [[TMP46:%.*]] = shl <2 x i32> [[TMP45]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP68:%.*]] = add <2 x i32> [[TMP46]], [[TMP40]]
; CHECK-NEXT: [[TMP48:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX8_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP49:%.*]] = zext <2 x i8> [[TMP48]] to <2 x i32>
; CHECK-NEXT: [[TMP50:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX10_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP51:%.*]] = zext <2 x i8> [[TMP50]] to <2 x i32>
; CHECK-NEXT: [[TMP52:%.*]] = sub <2 x i32> [[TMP49]], [[TMP51]]
; CHECK-NEXT: [[TMP64:%.*]] = insertelement <2 x i8> poison, i8 [[TMP44]], i32 0
; CHECK-NEXT: [[TMP65:%.*]] = insertelement <2 x i8> [[TMP64]], i8 [[TMP43]], i32 1
; CHECK-NEXT: [[TMP55:%.*]] = zext <2 x i8> [[TMP65]] to <2 x i32>
; CHECK-NEXT: [[TMP56:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX15_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP57:%.*]] = zext <2 x i8> [[TMP56]] to <2 x i32>
; CHECK-NEXT: [[TMP69:%.*]] = sub <2 x i32> [[TMP55]], [[TMP57]]
; CHECK-NEXT: [[TMP70:%.*]] = shl <2 x i32> [[TMP69]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP60:%.*]] = add <2 x i32> [[TMP70]], [[TMP52]]
; CHECK-NEXT: [[TMP47:%.*]] = add <2 x i32> [[TMP60]], [[TMP68]]
; CHECK-NEXT: [[TMP33:%.*]] = sub <2 x i32> [[TMP68]], [[TMP60]]
; CHECK-NEXT: [[TMP61:%.*]] = extractelement <2 x i32> [[TMP47]], i32 0
; CHECK-NEXT: [[TMP79:%.*]] = extractelement <2 x i32> [[TMP47]], i32 1
; CHECK-NEXT: [[ADD48_3:%.*]] = add i32 [[TMP79]], [[TMP61]]
; CHECK-NEXT: [[SUB51_3:%.*]] = sub i32 [[TMP61]], [[TMP79]]
; CHECK-NEXT: [[TMP63:%.*]] = extractelement <2 x i32> [[TMP33]], i32 0
; CHECK-NEXT: [[TMP71:%.*]] = extractelement <2 x i32> [[TMP33]], i32 1
; CHECK-NEXT: [[ADD55_3:%.*]] = add i32 [[TMP71]], [[TMP63]]
; CHECK-NEXT: [[SUB59_3:%.*]] = sub i32 [[TMP63]], [[TMP71]]
; CHECK-NEXT: [[ADD95:%.*]] = add i32 [[ADD48_3]], [[ADD48_2]]
; CHECK-NEXT: [[SUB102:%.*]] = sub i32 [[ADD48_2]], [[ADD48_3]]
; CHECK-NEXT: [[TMP77:%.*]] = extractelement <2 x i32> [[TMP58]], i32 0
; CHECK-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[TMP77]], 15
; CHECK-NEXT: [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537
; CHECK-NEXT: [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535
; CHECK-NEXT: [[SHR_I_1:%.*]] = lshr i32 [[SUB47_2]], 15
; CHECK-NEXT: [[AND_I_1:%.*]] = and i32 [[SHR_I_1]], 65537
; CHECK-NEXT: [[MUL_I_1:%.*]] = mul i32 [[AND_I_1]], 65535
; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[ADD55_3]], [[ADD55_2]]
; CHECK-NEXT: [[SUB102_1:%.*]] = sub i32 [[ADD55_2]], [[ADD55_3]]
; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0
; CHECK-NEXT: [[SHR_I49_1:%.*]] = lshr i32 [[TMP107]], 15
; CHECK-NEXT: [[AND_I50_1:%.*]] = and i32 [[SHR_I49_1]], 65537
; CHECK-NEXT: [[MUL_I51_1:%.*]] = mul i32 [[AND_I50_1]], 65535
; CHECK-NEXT: [[ADD94_4:%.*]] = add i32 [[SUB51_3]], [[SUB51_2]]
; CHECK-NEXT: [[SUB102_2:%.*]] = sub i32 [[SUB51_2]], [[SUB51_3]]
; CHECK-NEXT: [[SHR_I49_6:%.*]] = lshr i32 [[CONV_1]], 15
; CHECK-NEXT: [[AND_I50_6:%.*]] = and i32 [[SHR_I49_6]], 65537
; CHECK-NEXT: [[MUL_I51_6:%.*]] = mul i32 [[AND_I50_6]], 65535
; CHECK-NEXT: [[ADD94_5:%.*]] = add i32 [[SUB59_3]], [[SUB59_2]]
; CHECK-NEXT: [[SUB102_3:%.*]] = sub i32 [[SUB59_2]], [[SUB59_3]]
; CHECK-NEXT: [[SHR_I49_4:%.*]] = lshr i32 [[CONV1]], 15
; CHECK-NEXT: [[AND_I50_4:%.*]] = and i32 [[SHR_I49_4]], 65537
; CHECK-NEXT: [[MUL_I51_4:%.*]] = mul i32 [[AND_I50_4]], 65535
; CHECK-NEXT: [[TMP66:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
; CHECK-NEXT: [[TMP102:%.*]] = zext <2 x i8> [[TMP66]] to <2 x i32>
; CHECK-NEXT: [[TMP67:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[PIX2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP72:%.*]] = zext <2 x i8> [[TMP67]] to <2 x i32>
; CHECK-NEXT: [[TMP73:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[TMP1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP74:%.*]] = zext <2 x i8> [[TMP73]] to <2 x i32>
; CHECK-NEXT: [[TMP75:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP76:%.*]] = zext <2 x i8> [[TMP75]] to <2 x i32>
; CHECK-NEXT: [[TMP87:%.*]] = sub <2 x i32> [[TMP74]], [[TMP76]]
; CHECK-NEXT: [[TMP88:%.*]] = shl <2 x i32> [[TMP87]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP85:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP80:%.*]] = zext <2 x i8> [[TMP85]] to <2 x i32>
; CHECK-NEXT: [[TMP81:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX25]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP82:%.*]] = zext <2 x i8> [[TMP81]] to <2 x i32>
; CHECK-NEXT: [[TMP83:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP84:%.*]] = zext <2 x i8> [[TMP83]] to <2 x i32>
; CHECK-NEXT: [[TMP95:%.*]] = sub <2 x i32> [[TMP82]], [[TMP84]]
; CHECK-NEXT: [[TMP96:%.*]] = shl <2 x i32> [[TMP95]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV33]], i32 1
; CHECK-NEXT: [[TMP89:%.*]] = sub <2 x i32> [[TMP97]], [[TMP80]]
; CHECK-NEXT: [[TMP105:%.*]] = add <2 x i32> [[TMP96]], [[TMP89]]
; CHECK-NEXT: [[TMP86:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV1]], i32 0
; CHECK-NEXT: [[TMP100:%.*]] = sub <2 x i32> [[TMP86]], [[TMP72]]
; CHECK-NEXT: [[TMP92:%.*]] = add <2 x i32> [[TMP88]], [[TMP100]]
; CHECK-NEXT: [[TMP93:%.*]] = shufflevector <2 x i32> [[TMP105]], <2 x i32> [[TMP92]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP91:%.*]] = add <2 x i32> [[TMP105]], [[TMP92]]
; CHECK-NEXT: [[TMP101:%.*]] = sub <2 x i32> [[TMP92]], [[TMP105]]
; CHECK-NEXT: [[TMP94:%.*]] = extractelement <2 x i32> [[TMP91]], i32 0
; CHECK-NEXT: [[SUB47:%.*]] = extractelement <2 x i32> [[TMP91]], i32 1
; CHECK-NEXT: [[ADD78:%.*]] = add i32 [[SUB47]], [[TMP94]]
; CHECK-NEXT: [[SUB51:%.*]] = sub i32 [[TMP94]], [[SUB47]]
; CHECK-NEXT: [[TMP98:%.*]] = extractelement <2 x i32> [[TMP101]], i32 0
; CHECK-NEXT: [[TMP99:%.*]] = extractelement <2 x i32> [[TMP101]], i32 1
; CHECK-NEXT: [[ADD55:%.*]] = add i32 [[TMP99]], [[TMP98]]
; CHECK-NEXT: [[SUB59:%.*]] = sub i32 [[TMP98]], [[TMP99]]
; CHECK-NEXT: [[SHR_I59:%.*]] = lshr i32 [[SUB47]], 15
; CHECK-NEXT: [[AND_I60:%.*]] = and i32 [[SHR_I59]], 65537
; CHECK-NEXT: [[MUL_I61:%.*]] = mul i32 [[AND_I60]], 65535
; CHECK-NEXT: [[SHR_I59_1:%.*]] = lshr i32 [[TMP99]], 15
; CHECK-NEXT: [[AND_I60_1:%.*]] = and i32 [[SHR_I59_1]], 65537
; CHECK-NEXT: [[MUL_I61_1:%.*]] = mul i32 [[AND_I60_1]], 65535
; CHECK-NEXT: [[TMP104:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
; CHECK-NEXT: [[TMP110:%.*]] = zext <2 x i8> [[TMP104]] to <2 x i32>
; CHECK-NEXT: [[TMP108:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR644]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP103:%.*]] = zext <2 x i8> [[TMP108]] to <2 x i32>
; CHECK-NEXT: [[TMP109:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP116:%.*]] = zext <2 x i8> [[TMP109]] to <2 x i32>
; CHECK-NEXT: [[TMP106:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP118:%.*]] = zext <2 x i8> [[TMP106]] to <2 x i32>
; CHECK-NEXT: [[TMP124:%.*]] = sub <2 x i32> [[TMP116]], [[TMP118]]
; CHECK-NEXT: [[TMP125:%.*]] = shl <2 x i32> [[TMP124]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP121:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP111:%.*]] = zext <2 x i8> [[TMP121]] to <2 x i32>
; CHECK-NEXT: [[TMP112:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX25_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP113:%.*]] = zext <2 x i8> [[TMP112]] to <2 x i32>
; CHECK-NEXT: [[TMP114:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; CHECK-NEXT: [[TMP115:%.*]] = zext <2 x i8> [[TMP114]] to <2 x i32>
; CHECK-NEXT: [[TMP135:%.*]] = sub <2 x i32> [[TMP113]], [[TMP115]]
; CHECK-NEXT: [[TMP136:%.*]] = shl <2 x i32> [[TMP135]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP137:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV33_1]], i32 1
; CHECK-NEXT: [[TMP119:%.*]] = sub <2 x i32> [[TMP137]], [[TMP111]]
; CHECK-NEXT: [[TMP120:%.*]] = add <2 x i32> [[TMP136]], [[TMP119]]
; CHECK-NEXT: [[TMP117:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV_1]], i32 0
; CHECK-NEXT: [[TMP122:%.*]] = sub <2 x i32> [[TMP117]], [[TMP103]]
; CHECK-NEXT: [[TMP123:%.*]] = add <2 x i32> [[TMP125]], [[TMP122]]
; CHECK-NEXT: [[TMP143:%.*]] = add <2 x i32> [[TMP120]], [[TMP123]]
; CHECK-NEXT: [[TMP156:%.*]] = sub <2 x i32> [[TMP123]], [[TMP120]]
; CHECK-NEXT: [[TMP145:%.*]] = extractelement <2 x i32> [[TMP143]], i32 0
; CHECK-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP143]], i32 1
; CHECK-NEXT: [[ADD94:%.*]] = add i32 [[TMP146]], [[TMP145]]
; CHECK-NEXT: [[SUB51_1:%.*]] = sub i32 [[TMP145]], [[TMP146]]
; CHECK-NEXT: [[TMP180:%.*]] = extractelement <2 x i32> [[TMP156]], i32 0
; CHECK-NEXT: [[TMP142:%.*]] = extractelement <2 x i32> [[TMP156]], i32 1
; CHECK-NEXT: [[ADD55_1:%.*]] = add i32 [[TMP142]], [[TMP180]]
; CHECK-NEXT: [[SUB59_1:%.*]] = sub i32 [[TMP180]], [[TMP142]]
; CHECK-NEXT: [[SHR_I54_1:%.*]] = lshr i32 [[TMP146]], 15
; CHECK-NEXT: [[AND_I55_1:%.*]] = and i32 [[SHR_I54_1]], 65537
; CHECK-NEXT: [[MUL_I56_1:%.*]] = mul i32 [[AND_I55_1]], 65535
; CHECK-NEXT: [[TMP147:%.*]] = lshr <2 x i32> [[TMP110]], <i32 15, i32 15>
; CHECK-NEXT: [[TMP148:%.*]] = and <2 x i32> [[TMP147]], <i32 65537, i32 65537>
; CHECK-NEXT: [[TMP149:%.*]] = mul <2 x i32> [[TMP148]], <i32 65535, i32 65535>
; CHECK-NEXT: [[ADD79:%.*]] = add i32 [[ADD94]], [[ADD78]]
; CHECK-NEXT: [[SUB104:%.*]] = sub i32 [[ADD78]], [[ADD94]]
; CHECK-NEXT: [[ADD103:%.*]] = add i32 [[ADD95]], [[ADD79]]
; CHECK-NEXT: [[SUB105:%.*]] = sub i32 [[ADD79]], [[ADD95]]
; CHECK-NEXT: [[ADD105:%.*]] = add i32 [[SUB102]], [[SUB104]]
; CHECK-NEXT: [[SUB106:%.*]] = sub i32 [[SUB104]], [[SUB102]]
; CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[MUL_I51_3]], [[ADD103]]
; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP77]]
; CHECK-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I_1]], [[ADD105]]
; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[SUB47_2]]
; CHECK-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56_1]], [[SUB105]]
; CHECK-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP146]]
; CHECK-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I61]], [[SUB106]]
; CHECK-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[SUB47]]
; CHECK-NEXT: [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]]
; CHECK-NEXT: [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]]
; CHECK-NEXT: [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]]
; CHECK-NEXT: [[ADD78_1:%.*]] = add i32 [[ADD55_1]], [[ADD55]]
; CHECK-NEXT: [[SUB86_1:%.*]] = sub i32 [[ADD55]], [[ADD55_1]]
; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[SUB102_1]], [[SUB86_1]]
; CHECK-NEXT: [[SUB106_1:%.*]] = sub i32 [[SUB86_1]], [[SUB102_1]]
; CHECK-NEXT: [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_1]], [[ADD105_1]]
; CHECK-NEXT: [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP107]]
; CHECK-NEXT: [[TMP129:%.*]] = shufflevector <2 x i32> [[TMP17]], <2 x i32> [[TMP156]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: [[TMP130:%.*]] = lshr <2 x i32> [[TMP129]], <i32 15, i32 15>
; CHECK-NEXT: [[TMP131:%.*]] = and <2 x i32> [[TMP130]], <i32 65537, i32 65537>
; CHECK-NEXT: [[TMP132:%.*]] = mul <2 x i32> [[TMP131]], <i32 65535, i32 65535>
; CHECK-NEXT: [[TMP133:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_1]], i32 0
; CHECK-NEXT: [[TMP144:%.*]] = shufflevector <2 x i32> [[TMP133]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP151:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_1]], i32 0
; CHECK-NEXT: [[TMP152:%.*]] = shufflevector <2 x i32> [[TMP151]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP153:%.*]] = add <2 x i32> [[TMP144]], [[TMP152]]
; CHECK-NEXT: [[TMP138:%.*]] = sub <2 x i32> [[TMP144]], [[TMP152]]
; CHECK-NEXT: [[TMP139:%.*]] = shufflevector <2 x i32> [[TMP153]], <2 x i32> [[TMP138]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: [[TMP140:%.*]] = add <2 x i32> [[TMP132]], [[TMP139]]
; CHECK-NEXT: [[TMP141:%.*]] = xor <2 x i32> [[TMP140]], [[TMP129]]
; CHECK-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_1]], [[SUB106_1]]
; CHECK-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[TMP99]]
; CHECK-NEXT: [[ADD108_1:%.*]] = add i32 [[XOR_I53_1]], [[ADD113]]
; CHECK-NEXT: [[TMP154:%.*]] = extractelement <2 x i32> [[TMP141]], i32 0
; CHECK-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP154]]
; CHECK-NEXT: [[TMP155:%.*]] = extractelement <2 x i32> [[TMP141]], i32 1
; CHECK-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP155]]
; CHECK-NEXT: [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]]
; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[SUB51_1]], [[SUB51]]
; CHECK-NEXT: [[SUB86_2:%.*]] = sub i32 [[SUB51]], [[SUB51_1]]
; CHECK-NEXT: [[TMP244:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0
; CHECK-NEXT: [[TMP245:%.*]] = shufflevector <2 x i32> [[TMP244]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP197:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_4]], i32 0
; CHECK-NEXT: [[TMP198:%.*]] = shufflevector <2 x i32> [[TMP197]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP207:%.*]] = add <2 x i32> [[TMP245]], [[TMP198]]
; CHECK-NEXT: [[TMP208:%.*]] = sub <2 x i32> [[TMP245]], [[TMP198]]
; CHECK-NEXT: [[TMP209:%.*]] = shufflevector <2 x i32> [[TMP207]], <2 x i32> [[TMP208]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: [[ADD105_2:%.*]] = add i32 [[SUB102_2]], [[SUB86_2]]
; CHECK-NEXT: [[SUB106_2:%.*]] = sub i32 [[SUB86_2]], [[SUB102_2]]
; CHECK-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_6]], [[ADD105_2]]
; CHECK-NEXT: [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]]
; CHECK-NEXT: [[TMP134:%.*]] = add <2 x i32> [[TMP149]], [[TMP209]]
; CHECK-NEXT: [[TMP213:%.*]] = xor <2 x i32> [[TMP134]], [[TMP110]]
; CHECK-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[TMP94]], 15
; CHECK-NEXT: [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537
; CHECK-NEXT: [[MUL_I61_2:%.*]] = mul i32 [[AND_I60_2]], 65535
; CHECK-NEXT: [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_2]], [[SUB106_2]]
; CHECK-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP94]]
; CHECK-NEXT: [[ADD108_2:%.*]] = add i32 [[XOR_I53_2]], [[ADD113_1]]
; CHECK-NEXT: [[TMP157:%.*]] = extractelement <2 x i32> [[TMP213]], i32 0
; CHECK-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP157]]
; CHECK-NEXT: [[TMP158:%.*]] = extractelement <2 x i32> [[TMP213]], i32 1
; CHECK-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP158]]
; CHECK-NEXT: [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]]
; CHECK-NEXT: [[ADD94_3:%.*]] = add i32 [[SUB59_1]], [[SUB59]]
; CHECK-NEXT: [[SUB86_3:%.*]] = sub i32 [[SUB59]], [[SUB59_1]]
; CHECK-NEXT: [[TMP223:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_3]], i32 0
; CHECK-NEXT: [[TMP224:%.*]] = shufflevector <2 x i32> [[TMP223]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP241:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_5]], i32 0
; CHECK-NEXT: [[TMP242:%.*]] = shufflevector <2 x i32> [[TMP241]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP261:%.*]] = add <2 x i32> [[TMP224]], [[TMP242]]
; CHECK-NEXT: [[TMP262:%.*]] = sub <2 x i32> [[TMP224]], [[TMP242]]
; CHECK-NEXT: [[TMP220:%.*]] = shufflevector <2 x i32> [[TMP261]], <2 x i32> [[TMP262]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: [[ADD105_3:%.*]] = add i32 [[SUB102_3]], [[SUB86_3]]
; CHECK-NEXT: [[SUB106_3:%.*]] = sub i32 [[SUB86_3]], [[SUB102_3]]
; CHECK-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_4]], [[ADD105_3]]
; CHECK-NEXT: [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV1]]
; CHECK-NEXT: [[TMP230:%.*]] = lshr <2 x i32> [[TMP102]], <i32 15, i32 15>
; CHECK-NEXT: [[TMP231:%.*]] = and <2 x i32> [[TMP230]], <i32 65537, i32 65537>
; CHECK-NEXT: [[TMP232:%.*]] = mul <2 x i32> [[TMP231]], <i32 65535, i32 65535>
; CHECK-NEXT: [[TMP150:%.*]] = add <2 x i32> [[TMP232]], [[TMP220]]
; CHECK-NEXT: [[TMP234:%.*]] = xor <2 x i32> [[TMP150]], [[TMP102]]
; CHECK-NEXT: [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15
; CHECK-NEXT: [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537
; CHECK-NEXT: [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535
; CHECK-NEXT: [[ADD_I62_3:%.*]] = add i32 [[MUL_I61_3]], [[SUB106_3]]
; CHECK-NEXT: [[XOR_I63_3:%.*]] = xor i32 [[ADD_I62_3]], [[CONV33]]
; CHECK-NEXT: [[ADD108_3:%.*]] = add i32 [[XOR_I53_3]], [[ADD113_2]]
; CHECK-NEXT: [[TMP235:%.*]] = extractelement <2 x i32> [[TMP234]], i32 0
; CHECK-NEXT: [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP235]]
; CHECK-NEXT: [[TMP236:%.*]] = extractelement <2 x i32> [[TMP234]], i32 1
; CHECK-NEXT: [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP236]]
; CHECK-NEXT: [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[XOR_I63_3]]
; CHECK-NEXT: ret i32 [[ADD113_3]]
;
; THR15-LABEL: define i32 @test(
; THR15-SAME: ptr [[PIX1:%.*]], ptr [[PIX2:%.*]], i64 [[IDX_EXT:%.*]], i64 [[IDX_EXT63:%.*]], ptr [[ADD_PTR:%.*]], ptr [[ADD_PTR64:%.*]]) #[[ATTR0:[0-9]+]] {
; THR15-NEXT: entry:
; THR15-NEXT: [[TMP0:%.*]] = load i8, ptr [[PIX1]], align 1
; THR15-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
; THR15-NEXT: [[ARRAYIDX3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 4
; THR15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr i8, ptr [[PIX2]], i64 4
; THR15-NEXT: [[ARRAYIDX8:%.*]] = getelementptr i8, ptr [[PIX1]], i64 1
; THR15-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 1
; THR15-NEXT: [[ARRAYIDX25:%.*]] = getelementptr i8, ptr [[PIX1]], i64 5
; THR15-NEXT: [[ARRAYIDX27:%.*]] = getelementptr i8, ptr [[PIX2]], i64 5
; THR15-NEXT: [[ARRAYIDX32:%.*]] = getelementptr i8, ptr [[PIX1]], i64 3
; THR15-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX32]], align 1
; THR15-NEXT: [[CONV33:%.*]] = zext i8 [[TMP1]] to i32
; THR15-NEXT: [[ADD_PTR3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 [[IDX_EXT]]
; THR15-NEXT: [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
; THR15-NEXT: [[TMP2:%.*]] = load i8, ptr [[ADD_PTR3]], align 1
; THR15-NEXT: [[CONV_1:%.*]] = zext i8 [[TMP2]] to i32
; THR15-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 4
; THR15-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 4
; THR15-NEXT: [[ARRAYIDX8_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 1
; THR15-NEXT: [[ARRAYIDX22_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 1
; THR15-NEXT: [[ARRAYIDX13_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 5
; THR15-NEXT: [[ARRAYIDX27_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 5
; THR15-NEXT: [[ARRAYIDX32_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 3
; THR15-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX32_1]], align 1
; THR15-NEXT: [[CONV33_1:%.*]] = zext i8 [[TMP3]] to i32
; THR15-NEXT: [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
; THR15-NEXT: [[ADD_PTR64_1:%.*]] = getelementptr i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
; THR15-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
; THR15-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
; THR15-NEXT: [[TMP4:%.*]] = load <2 x i8>, ptr [[ADD_PTR_1]], align 1
; THR15-NEXT: [[TMP66:%.*]] = zext <2 x i8> [[TMP4]] to <2 x i32>
; THR15-NEXT: [[TMP6:%.*]] = load <2 x i8>, ptr [[ADD_PTR64_1]], align 1
; THR15-NEXT: [[TMP7:%.*]] = zext <2 x i8> [[TMP6]] to <2 x i32>
; THR15-NEXT: [[TMP8:%.*]] = sub <2 x i32> [[TMP66]], [[TMP7]]
; THR15-NEXT: [[TMP9:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_2]], align 1
; THR15-NEXT: [[TMP10:%.*]] = zext <2 x i8> [[TMP9]] to <2 x i32>
; THR15-NEXT: [[TMP11:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_2]], align 1
; THR15-NEXT: [[TMP12:%.*]] = zext <2 x i8> [[TMP11]] to <2 x i32>
; THR15-NEXT: [[TMP13:%.*]] = sub <2 x i32> [[TMP10]], [[TMP12]]
; THR15-NEXT: [[TMP14:%.*]] = shl <2 x i32> [[TMP13]], <i32 16, i32 16>
; THR15-NEXT: [[TMP15:%.*]] = add <2 x i32> [[TMP14]], [[TMP8]]
; THR15-NEXT: [[ARRAYIDX20_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 2
; THR15-NEXT: [[ARRAYIDX22_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 2
; THR15-NEXT: [[ARRAYIDX25_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 6
; THR15-NEXT: [[ARRAYIDX27_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 6
; THR15-NEXT: [[TMP16:%.*]] = load <2 x i8>, ptr [[ARRAYIDX20_2]], align 1
; THR15-NEXT: [[TMP17:%.*]] = zext <2 x i8> [[TMP16]] to <2 x i32>
; THR15-NEXT: [[TMP18:%.*]] = load <2 x i8>, ptr [[ARRAYIDX22_2]], align 1
; THR15-NEXT: [[TMP19:%.*]] = zext <2 x i8> [[TMP18]] to <2 x i32>
; THR15-NEXT: [[TMP20:%.*]] = sub <2 x i32> [[TMP17]], [[TMP19]]
; THR15-NEXT: [[TMP21:%.*]] = load <2 x i8>, ptr [[ARRAYIDX25_2]], align 1
; THR15-NEXT: [[TMP22:%.*]] = zext <2 x i8> [[TMP21]] to <2 x i32>
; THR15-NEXT: [[TMP23:%.*]] = load <2 x i8>, ptr [[ARRAYIDX27_2]], align 1
; THR15-NEXT: [[TMP24:%.*]] = zext <2 x i8> [[TMP23]] to <2 x i32>
; THR15-NEXT: [[TMP25:%.*]] = sub <2 x i32> [[TMP22]], [[TMP24]]
; THR15-NEXT: [[TMP26:%.*]] = shl <2 x i32> [[TMP25]], <i32 16, i32 16>
; THR15-NEXT: [[TMP27:%.*]] = add <2 x i32> [[TMP26]], [[TMP20]]
; THR15-NEXT: [[TMP28:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0
; THR15-NEXT: [[TMP29:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1
; THR15-NEXT: [[ADD44_2:%.*]] = add i32 [[TMP29]], [[TMP28]]
; THR15-NEXT: [[SUB45_2:%.*]] = sub i32 [[TMP28]], [[TMP29]]
; THR15-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0
; THR15-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1
; THR15-NEXT: [[ADD46_2:%.*]] = add i32 [[TMP31]], [[TMP30]]
; THR15-NEXT: [[SUB47_2:%.*]] = sub i32 [[TMP30]], [[TMP31]]
; THR15-NEXT: [[ADD48_2:%.*]] = add i32 [[ADD46_2]], [[ADD44_2]]
; THR15-NEXT: [[SUB51_2:%.*]] = sub i32 [[ADD44_2]], [[ADD46_2]]
; THR15-NEXT: [[ADD55_2:%.*]] = add i32 [[SUB47_2]], [[SUB45_2]]
; THR15-NEXT: [[SUB59_2:%.*]] = sub i32 [[SUB45_2]], [[SUB47_2]]
; THR15-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr i8, ptr null, i64 4
; THR15-NEXT: [[ARRAYIDX5_3:%.*]] = getelementptr i8, ptr null, i64 4
; THR15-NEXT: [[TMP32:%.*]] = load <2 x i8>, ptr null, align 1
; THR15-NEXT: [[TMP33:%.*]] = zext <2 x i8> [[TMP32]] to <2 x i32>
; THR15-NEXT: [[TMP34:%.*]] = load <2 x i8>, ptr null, align 1
; THR15-NEXT: [[TMP35:%.*]] = zext <2 x i8> [[TMP34]] to <2 x i32>
; THR15-NEXT: [[TMP36:%.*]] = sub <2 x i32> [[TMP33]], [[TMP35]]
; THR15-NEXT: [[TMP37:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_3]], i64 -4, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP38:%.*]] = zext <2 x i8> [[TMP37]] to <2 x i32>
; THR15-NEXT: [[TMP39:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_3]], align 1
; THR15-NEXT: [[TMP40:%.*]] = zext <2 x i8> [[TMP39]] to <2 x i32>
; THR15-NEXT: [[TMP41:%.*]] = sub <2 x i32> [[TMP38]], [[TMP40]]
; THR15-NEXT: [[TMP42:%.*]] = shl <2 x i32> [[TMP41]], <i32 16, i32 16>
; THR15-NEXT: [[TMP43:%.*]] = add <2 x i32> [[TMP42]], [[TMP36]]
; THR15-NEXT: [[ARRAYIDX20_3:%.*]] = getelementptr i8, ptr null, i64 2
; THR15-NEXT: [[ARRAYIDX22_3:%.*]] = getelementptr i8, ptr null, i64 2
; THR15-NEXT: [[TMP44:%.*]] = load i8, ptr null, align 1
; THR15-NEXT: [[ARRAYIDX27_3:%.*]] = getelementptr i8, ptr null, i64 6
; THR15-NEXT: [[TMP45:%.*]] = load i8, ptr null, align 1
; THR15-NEXT: [[TMP46:%.*]] = load <2 x i8>, ptr [[ARRAYIDX20_3]], align 1
; THR15-NEXT: [[TMP47:%.*]] = zext <2 x i8> [[TMP46]] to <2 x i32>
; THR15-NEXT: [[TMP48:%.*]] = load <2 x i8>, ptr [[ARRAYIDX22_3]], align 1
; THR15-NEXT: [[TMP49:%.*]] = zext <2 x i8> [[TMP48]] to <2 x i32>
; THR15-NEXT: [[TMP50:%.*]] = sub <2 x i32> [[TMP47]], [[TMP49]]
; THR15-NEXT: [[TMP51:%.*]] = insertelement <2 x i8> poison, i8 [[TMP44]], i32 0
; THR15-NEXT: [[TMP52:%.*]] = insertelement <2 x i8> [[TMP51]], i8 [[TMP45]], i32 1
; THR15-NEXT: [[TMP53:%.*]] = zext <2 x i8> [[TMP52]] to <2 x i32>
; THR15-NEXT: [[TMP54:%.*]] = load <2 x i8>, ptr [[ARRAYIDX27_3]], align 1
; THR15-NEXT: [[TMP55:%.*]] = zext <2 x i8> [[TMP54]] to <2 x i32>
; THR15-NEXT: [[TMP56:%.*]] = sub <2 x i32> [[TMP53]], [[TMP55]]
; THR15-NEXT: [[TMP57:%.*]] = shl <2 x i32> [[TMP56]], <i32 16, i32 16>
; THR15-NEXT: [[TMP58:%.*]] = add <2 x i32> [[TMP57]], [[TMP50]]
; THR15-NEXT: [[TMP59:%.*]] = extractelement <2 x i32> [[TMP43]], i32 0
; THR15-NEXT: [[TMP60:%.*]] = extractelement <2 x i32> [[TMP43]], i32 1
; THR15-NEXT: [[ADD44_3:%.*]] = add i32 [[TMP60]], [[TMP59]]
; THR15-NEXT: [[SUB45_3:%.*]] = sub i32 [[TMP59]], [[TMP60]]
; THR15-NEXT: [[TMP61:%.*]] = extractelement <2 x i32> [[TMP58]], i32 0
; THR15-NEXT: [[TMP62:%.*]] = extractelement <2 x i32> [[TMP58]], i32 1
; THR15-NEXT: [[ADD46_3:%.*]] = add i32 [[TMP62]], [[TMP61]]
; THR15-NEXT: [[SUB47_3:%.*]] = sub i32 [[TMP61]], [[TMP62]]
; THR15-NEXT: [[ADD48_3:%.*]] = add i32 [[ADD46_3]], [[ADD44_3]]
; THR15-NEXT: [[SUB51_3:%.*]] = sub i32 [[ADD44_3]], [[ADD46_3]]
; THR15-NEXT: [[ADD55_3:%.*]] = add i32 [[SUB47_3]], [[SUB45_3]]
; THR15-NEXT: [[SUB59_3:%.*]] = sub i32 [[SUB45_3]], [[SUB47_3]]
; THR15-NEXT: [[ADD94:%.*]] = add i32 [[ADD48_3]], [[ADD48_2]]
; THR15-NEXT: [[SUB102:%.*]] = sub i32 [[ADD48_2]], [[ADD48_3]]
; THR15-NEXT: [[TMP63:%.*]] = extractelement <2 x i32> [[TMP33]], i32 0
; THR15-NEXT: [[SHR_I:%.*]] = lshr i32 [[TMP63]], 15
; THR15-NEXT: [[AND_I:%.*]] = and i32 [[SHR_I]], 65537
; THR15-NEXT: [[MUL_I:%.*]] = mul i32 [[AND_I]], 65535
; THR15-NEXT: [[SHR_I49:%.*]] = lshr i32 [[ADD46_2]], 15
; THR15-NEXT: [[AND_I50:%.*]] = and i32 [[SHR_I49]], 65537
; THR15-NEXT: [[MUL_I51:%.*]] = mul i32 [[AND_I50]], 65535
; THR15-NEXT: [[ADD55_1:%.*]] = add i32 [[ADD55_3]], [[ADD55_2]]
; THR15-NEXT: [[SUB102_1:%.*]] = sub i32 [[ADD55_2]], [[ADD55_3]]
; THR15-NEXT: [[TMP64:%.*]] = extractelement <2 x i32> [[TMP66]], i32 0
; THR15-NEXT: [[SHR_I49_2:%.*]] = lshr i32 [[TMP64]], 15
; THR15-NEXT: [[AND_I50_2:%.*]] = and i32 [[SHR_I49_2]], 65537
; THR15-NEXT: [[MUL_I51_2:%.*]] = mul i32 [[AND_I50_2]], 65535
; THR15-NEXT: [[ADD94_2:%.*]] = add i32 [[SUB51_3]], [[SUB51_2]]
; THR15-NEXT: [[SUB102_2:%.*]] = sub i32 [[SUB51_2]], [[SUB51_3]]
; THR15-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[CONV_1]], 15
; THR15-NEXT: [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537
; THR15-NEXT: [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535
; THR15-NEXT: [[ADD94_3:%.*]] = add i32 [[SUB59_3]], [[SUB59_2]]
; THR15-NEXT: [[SUB102_3:%.*]] = sub i32 [[SUB59_2]], [[SUB59_3]]
; THR15-NEXT: [[SHR_I49_4:%.*]] = lshr i32 [[CONV]], 15
; THR15-NEXT: [[AND_I50_4:%.*]] = and i32 [[SHR_I49_4]], 65537
; THR15-NEXT: [[MUL_I51_4:%.*]] = mul i32 [[AND_I50_4]], 65535
; THR15-NEXT: [[TMP65:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
; THR15-NEXT: [[TMP74:%.*]] = zext <2 x i8> [[TMP65]] to <2 x i32>
; THR15-NEXT: [[TMP67:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[PIX2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP68:%.*]] = zext <2 x i8> [[TMP67]] to <2 x i32>
; THR15-NEXT: [[TMP69:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP70:%.*]] = zext <2 x i8> [[TMP69]] to <2 x i32>
; THR15-NEXT: [[TMP71:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP81:%.*]] = zext <2 x i8> [[TMP71]] to <2 x i32>
; THR15-NEXT: [[TMP72:%.*]] = sub <2 x i32> [[TMP70]], [[TMP81]]
; THR15-NEXT: [[TMP73:%.*]] = shl <2 x i32> [[TMP72]], <i32 16, i32 16>
; THR15-NEXT: [[TMP75:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP76:%.*]] = zext <2 x i8> [[TMP75]] to <2 x i32>
; THR15-NEXT: [[TMP82:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX25]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP78:%.*]] = zext <2 x i8> [[TMP82]] to <2 x i32>
; THR15-NEXT: [[TMP79:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP80:%.*]] = zext <2 x i8> [[TMP79]] to <2 x i32>
; THR15-NEXT: [[TMP84:%.*]] = sub <2 x i32> [[TMP78]], [[TMP80]]
; THR15-NEXT: [[TMP85:%.*]] = shl <2 x i32> [[TMP84]], <i32 16, i32 16>
; THR15-NEXT: [[TMP86:%.*]] = insertelement <2 x i32> [[TMP74]], i32 [[CONV33]], i32 1
; THR15-NEXT: [[TMP93:%.*]] = sub <2 x i32> [[TMP86]], [[TMP76]]
; THR15-NEXT: [[TMP88:%.*]] = add <2 x i32> [[TMP85]], [[TMP93]]
; THR15-NEXT: [[TMP92:%.*]] = insertelement <2 x i32> [[TMP74]], i32 [[CONV]], i32 0
; THR15-NEXT: [[TMP87:%.*]] = sub <2 x i32> [[TMP92]], [[TMP68]]
; THR15-NEXT: [[TMP95:%.*]] = add <2 x i32> [[TMP73]], [[TMP87]]
; THR15-NEXT: [[TMP97:%.*]] = shufflevector <2 x i32> [[TMP88]], <2 x i32> [[TMP95]], <2 x i32> <i32 0, i32 2>
; THR15-NEXT: [[TMP77:%.*]] = add <2 x i32> [[TMP88]], [[TMP95]]
; THR15-NEXT: [[TMP91:%.*]] = sub <2 x i32> [[TMP95]], [[TMP88]]
; THR15-NEXT: [[TMP89:%.*]] = extractelement <2 x i32> [[TMP77]], i32 0
; THR15-NEXT: [[TMP90:%.*]] = extractelement <2 x i32> [[TMP77]], i32 1
; THR15-NEXT: [[ADD48:%.*]] = add i32 [[TMP90]], [[TMP89]]
; THR15-NEXT: [[SUB51:%.*]] = sub i32 [[TMP89]], [[TMP90]]
; THR15-NEXT: [[TMP94:%.*]] = extractelement <2 x i32> [[TMP91]], i32 0
; THR15-NEXT: [[SUB47:%.*]] = extractelement <2 x i32> [[TMP91]], i32 1
; THR15-NEXT: [[ADD56:%.*]] = add i32 [[SUB47]], [[TMP94]]
; THR15-NEXT: [[SUB59:%.*]] = sub i32 [[TMP94]], [[SUB47]]
; THR15-NEXT: [[SHR_I59:%.*]] = lshr i32 [[TMP90]], 15
; THR15-NEXT: [[AND_I60:%.*]] = and i32 [[SHR_I59]], 65537
; THR15-NEXT: [[MUL_I61:%.*]] = mul i32 [[AND_I60]], 65535
; THR15-NEXT: [[SHR_I59_1:%.*]] = lshr i32 [[SUB47]], 15
; THR15-NEXT: [[AND_I60_1:%.*]] = and i32 [[SHR_I59_1]], 65537
; THR15-NEXT: [[MUL_I61_1:%.*]] = mul i32 [[AND_I60_1]], 65535
; THR15-NEXT: [[TMP96:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
; THR15-NEXT: [[TMP103:%.*]] = zext <2 x i8> [[TMP96]] to <2 x i32>
; THR15-NEXT: [[TMP98:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR644]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP99:%.*]] = zext <2 x i8> [[TMP98]] to <2 x i32>
; THR15-NEXT: [[TMP100:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP104:%.*]] = zext <2 x i8> [[TMP100]] to <2 x i32>
; THR15-NEXT: [[TMP105:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP112:%.*]] = zext <2 x i8> [[TMP105]] to <2 x i32>
; THR15-NEXT: [[TMP101:%.*]] = sub <2 x i32> [[TMP104]], [[TMP112]]
; THR15-NEXT: [[TMP102:%.*]] = shl <2 x i32> [[TMP101]], <i32 16, i32 16>
; THR15-NEXT: [[TMP120:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP107:%.*]] = zext <2 x i8> [[TMP120]] to <2 x i32>
; THR15-NEXT: [[TMP108:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX13_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP109:%.*]] = zext <2 x i8> [[TMP108]] to <2 x i32>
; THR15-NEXT: [[TMP110:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
; THR15-NEXT: [[TMP111:%.*]] = zext <2 x i8> [[TMP110]] to <2 x i32>
; THR15-NEXT: [[TMP113:%.*]] = sub <2 x i32> [[TMP109]], [[TMP111]]
; THR15-NEXT: [[TMP114:%.*]] = shl <2 x i32> [[TMP113]], <i32 16, i32 16>
; THR15-NEXT: [[TMP115:%.*]] = insertelement <2 x i32> [[TMP103]], i32 [[CONV33_1]], i32 1
; THR15-NEXT: [[TMP117:%.*]] = sub <2 x i32> [[TMP115]], [[TMP107]]
; THR15-NEXT: [[TMP116:%.*]] = add <2 x i32> [[TMP114]], [[TMP117]]
; THR15-NEXT: [[TMP126:%.*]] = insertelement <2 x i32> [[TMP103]], i32 [[CONV_1]], i32 0
; THR15-NEXT: [[TMP127:%.*]] = sub <2 x i32> [[TMP126]], [[TMP99]]
; THR15-NEXT: [[TMP128:%.*]] = add <2 x i32> [[TMP102]], [[TMP127]]
; THR15-NEXT: [[TMP106:%.*]] = add <2 x i32> [[TMP116]], [[TMP128]]
; THR15-NEXT: [[TMP121:%.*]] = sub <2 x i32> [[TMP128]], [[TMP116]]
; THR15-NEXT: [[TMP118:%.*]] = extractelement <2 x i32> [[TMP106]], i32 0
; THR15-NEXT: [[TMP119:%.*]] = extractelement <2 x i32> [[TMP106]], i32 1
; THR15-NEXT: [[ADD48_1:%.*]] = add i32 [[TMP119]], [[TMP118]]
; THR15-NEXT: [[SUB51_1:%.*]] = sub i32 [[TMP118]], [[TMP119]]
; THR15-NEXT: [[TMP129:%.*]] = extractelement <2 x i32> [[TMP121]], i32 0
; THR15-NEXT: [[TMP125:%.*]] = extractelement <2 x i32> [[TMP121]], i32 1
; THR15-NEXT: [[ADD55_4:%.*]] = add i32 [[TMP125]], [[TMP129]]
; THR15-NEXT: [[SUB59_1:%.*]] = sub i32 [[TMP129]], [[TMP125]]
; THR15-NEXT: [[SHR_I54_1:%.*]] = lshr i32 [[TMP119]], 15
; THR15-NEXT: [[AND_I55_1:%.*]] = and i32 [[SHR_I54_1]], 65537
; THR15-NEXT: [[MUL_I56_1:%.*]] = mul i32 [[AND_I55_1]], 65535
; THR15-NEXT: [[TMP122:%.*]] = lshr <2 x i32> [[TMP103]], <i32 15, i32 15>
; THR15-NEXT: [[TMP123:%.*]] = and <2 x i32> [[TMP122]], <i32 65537, i32 65537>
; THR15-NEXT: [[TMP124:%.*]] = mul <2 x i32> [[TMP123]], <i32 65535, i32 65535>
; THR15-NEXT: [[ADD78:%.*]] = add i32 [[ADD48_1]], [[ADD48]]
; THR15-NEXT: [[SUB86:%.*]] = sub i32 [[ADD48]], [[ADD48_1]]
; THR15-NEXT: [[ADD103:%.*]] = add i32 [[ADD94]], [[ADD78]]
; THR15-NEXT: [[SUB104:%.*]] = sub i32 [[ADD78]], [[ADD94]]
; THR15-NEXT: [[ADD105:%.*]] = add i32 [[SUB102]], [[SUB86]]
; THR15-NEXT: [[SUB106:%.*]] = sub i32 [[SUB86]], [[SUB102]]
; THR15-NEXT: [[ADD_I:%.*]] = add i32 [[MUL_I]], [[ADD103]]
; THR15-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP63]]
; THR15-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I51]], [[ADD105]]
; THR15-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[ADD46_2]]
; THR15-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56_1]], [[SUB104]]
; THR15-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP119]]
; THR15-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I61]], [[SUB106]]
; THR15-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP90]]
; THR15-NEXT: [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]]
; THR15-NEXT: [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]]
; THR15-NEXT: [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]]
; THR15-NEXT: [[ADD55:%.*]] = add i32 [[ADD55_4]], [[ADD56]]
; THR15-NEXT: [[SUB86_1:%.*]] = sub i32 [[ADD56]], [[ADD55_4]]
; THR15-NEXT: [[ADD105_1:%.*]] = add i32 [[SUB102_1]], [[SUB86_1]]
; THR15-NEXT: [[SUB106_1:%.*]] = sub i32 [[SUB86_1]], [[SUB102_1]]
; THR15-NEXT: [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_2]], [[ADD105_1]]
; THR15-NEXT: [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP64]]
; THR15-NEXT: [[TMP5:%.*]] = shufflevector <2 x i32> [[TMP66]], <2 x i32> [[TMP121]], <2 x i32> <i32 1, i32 3>
; THR15-NEXT: [[TMP132:%.*]] = lshr <2 x i32> [[TMP5]], <i32 15, i32 15>
; THR15-NEXT: [[TMP133:%.*]] = and <2 x i32> [[TMP132]], <i32 65537, i32 65537>
; THR15-NEXT: [[TMP134:%.*]] = mul <2 x i32> [[TMP133]], <i32 65535, i32 65535>
; THR15-NEXT: [[TMP135:%.*]] = insertelement <2 x i32> poison, i32 [[ADD55]], i32 0
; THR15-NEXT: [[TMP136:%.*]] = shufflevector <2 x i32> [[TMP135]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT: [[TMP137:%.*]] = insertelement <2 x i32> poison, i32 [[ADD55_1]], i32 0
; THR15-NEXT: [[TMP138:%.*]] = shufflevector <2 x i32> [[TMP137]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT: [[TMP140:%.*]] = add <2 x i32> [[TMP136]], [[TMP138]]
; THR15-NEXT: [[TMP139:%.*]] = sub <2 x i32> [[TMP136]], [[TMP138]]
; THR15-NEXT: [[TMP144:%.*]] = shufflevector <2 x i32> [[TMP140]], <2 x i32> [[TMP139]], <2 x i32> <i32 0, i32 3>
; THR15-NEXT: [[TMP148:%.*]] = add <2 x i32> [[TMP134]], [[TMP144]]
; THR15-NEXT: [[TMP149:%.*]] = xor <2 x i32> [[TMP148]], [[TMP5]]
; THR15-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_1]], [[SUB106_1]]
; THR15-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[SUB47]]
; THR15-NEXT: [[ADD108_1:%.*]] = add i32 [[XOR_I53_1]], [[ADD113]]
; THR15-NEXT: [[TMP150:%.*]] = extractelement <2 x i32> [[TMP149]], i32 0
; THR15-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP150]]
; THR15-NEXT: [[TMP151:%.*]] = extractelement <2 x i32> [[TMP149]], i32 1
; THR15-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP151]]
; THR15-NEXT: [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]]
; THR15-NEXT: [[ADD78_2:%.*]] = add i32 [[SUB51_1]], [[SUB51]]
; THR15-NEXT: [[SUB86_2:%.*]] = sub i32 [[SUB51]], [[SUB51_1]]
; THR15-NEXT: [[TMP152:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_2]], i32 0
; THR15-NEXT: [[TMP153:%.*]] = shufflevector <2 x i32> [[TMP152]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT: [[TMP154:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0
; THR15-NEXT: [[TMP155:%.*]] = shufflevector <2 x i32> [[TMP154]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT: [[TMP156:%.*]] = add <2 x i32> [[TMP153]], [[TMP155]]
; THR15-NEXT: [[TMP157:%.*]] = sub <2 x i32> [[TMP153]], [[TMP155]]
; THR15-NEXT: [[TMP158:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP157]], <2 x i32> <i32 0, i32 3>
; THR15-NEXT: [[ADD105_2:%.*]] = add i32 [[SUB102_2]], [[SUB86_2]]
; THR15-NEXT: [[SUB106_2:%.*]] = sub i32 [[SUB86_2]], [[SUB102_2]]
; THR15-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_3]], [[ADD105_2]]
; THR15-NEXT: [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]]
; THR15-NEXT: [[TMP159:%.*]] = add <2 x i32> [[TMP124]], [[TMP158]]
; THR15-NEXT: [[TMP160:%.*]] = xor <2 x i32> [[TMP159]], [[TMP103]]
; THR15-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[TMP89]], 15
; THR15-NEXT: [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537
; THR15-NEXT: [[MUL_I61_2:%.*]] = mul i32 [[AND_I60_2]], 65535
; THR15-NEXT: [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_2]], [[SUB106_2]]
; THR15-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP89]]
; THR15-NEXT: [[ADD108_2:%.*]] = add i32 [[XOR_I53_2]], [[ADD113_1]]
; THR15-NEXT: [[TMP161:%.*]] = extractelement <2 x i32> [[TMP160]], i32 0
; THR15-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP161]]
; THR15-NEXT: [[TMP162:%.*]] = extractelement <2 x i32> [[TMP160]], i32 1
; THR15-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP162]]
; THR15-NEXT: [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]]
; THR15-NEXT: [[ADD78_3:%.*]] = add i32 [[SUB59_1]], [[SUB59]]
; THR15-NEXT: [[SUB86_3:%.*]] = sub i32 [[SUB59]], [[SUB59_1]]
; THR15-NEXT: [[TMP163:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_3]], i32 0
; THR15-NEXT: [[TMP164:%.*]] = shufflevector <2 x i32> [[TMP163]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT: [[TMP165:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_3]], i32 0
; THR15-NEXT: [[TMP166:%.*]] = shufflevector <2 x i32> [[TMP165]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT: [[TMP167:%.*]] = add <2 x i32> [[TMP164]], [[TMP166]]
; THR15-NEXT: [[TMP168:%.*]] = sub <2 x i32> [[TMP164]], [[TMP166]]
; THR15-NEXT: [[TMP169:%.*]] = shufflevector <2 x i32> [[TMP167]], <2 x i32> [[TMP168]], <2 x i32> <i32 0, i32 3>
; THR15-NEXT: [[ADD105_3:%.*]] = add i32 [[SUB102_3]], [[SUB86_3]]
; THR15-NEXT: [[SUB106_3:%.*]] = sub i32 [[SUB86_3]], [[SUB102_3]]
; THR15-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_4]], [[ADD105_3]]
; THR15-NEXT: [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV]]
; THR15-NEXT: [[TMP170:%.*]] = lshr <2 x i32> [[TMP74]], <i32 15, i32 15>
; THR15-NEXT: [[TMP171:%.*]] = and <2 x i32> [[TMP170]], <i32 65537, i32 65537>
; THR15-NEXT: [[TMP172:%.*]] = mul <2 x i32> [[TMP171]], <i32 65535, i32 65535>
; THR15-NEXT: [[TMP173:%.*]] = add <2 x i32> [[TMP172]], [[TMP169]]
; THR15-NEXT: [[TMP174:%.*]] = xor <2 x i32> [[TMP173]], [[TMP74]]
; THR15-NEXT: [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15
; THR15-NEXT: [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537
; THR15-NEXT: [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535
; THR15-NEXT: [[ADD_I62_3:%.*]] = add i32 [[MUL_I61_3]], [[SUB106_3]]
; THR15-NEXT: [[XOR_I63_3:%.*]] = xor i32 [[ADD_I62_3]], [[CONV33]]
; THR15-NEXT: [[ADD108_3:%.*]] = add i32 [[XOR_I53_3]], [[ADD113_2]]
; THR15-NEXT: [[TMP175:%.*]] = extractelement <2 x i32> [[TMP174]], i32 0
; THR15-NEXT: [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP175]]
; THR15-NEXT: [[TMP176:%.*]] = extractelement <2 x i32> [[TMP174]], i32 1
; THR15-NEXT: [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP176]]
; THR15-NEXT: [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[XOR_I63_3]]
; THR15-NEXT: ret i32 [[ADD113_3]]
;
entry:
%0 = load i8, ptr %pix1, align 1
%conv = zext i8 %0 to i32
%1 = load i8, ptr %pix2, align 1
%conv2 = zext i8 %1 to i32
%sub = sub i32 %conv, %conv2
%arrayidx3 = getelementptr i8, ptr %pix1, i64 4
%2 = load i8, ptr %arrayidx3, align 1
%conv4 = zext i8 %2 to i32
%arrayidx5 = getelementptr i8, ptr %pix2, i64 4
%3 = load i8, ptr %arrayidx5, align 1
%conv6 = zext i8 %3 to i32
%sub7 = sub i32 %conv4, %conv6
%shl = shl i32 %sub7, 16
%add = add i32 %shl, %sub
%arrayidx8 = getelementptr i8, ptr %pix1, i64 1
%4 = load i8, ptr %arrayidx8, align 1
%conv9 = zext i8 %4 to i32
%arrayidx10 = getelementptr i8, ptr %pix2, i64 1
%5 = load i8, ptr %arrayidx10, align 1
%conv11 = zext i8 %5 to i32
%sub12 = sub i32 %conv9, %conv11
%arrayidx13 = getelementptr i8, ptr %pix1, i64 5
%6 = load i8, ptr %arrayidx13, align 1
%conv14 = zext i8 %6 to i32
%arrayidx15 = getelementptr i8, ptr %pix2, i64 5
%7 = load i8, ptr %arrayidx15, align 1
%conv16 = zext i8 %7 to i32
%sub17 = sub i32 %conv14, %conv16
%shl18 = shl i32 %sub17, 16
%add19 = add i32 %shl18, %sub12
%arrayidx20 = getelementptr i8, ptr %pix1, i64 2
%8 = load i8, ptr %arrayidx20, align 1
%conv21 = zext i8 %8 to i32
%arrayidx22 = getelementptr i8, ptr %pix2, i64 2
%9 = load i8, ptr %arrayidx22, align 1
%conv23 = zext i8 %9 to i32
%sub24 = sub i32 %conv21, %conv23
%arrayidx25 = getelementptr i8, ptr %pix1, i64 6
%10 = load i8, ptr %arrayidx25, align 1
%conv26 = zext i8 %10 to i32
%arrayidx27 = getelementptr i8, ptr %pix2, i64 6
%11 = load i8, ptr %arrayidx27, align 1
%conv28 = zext i8 %11 to i32
%sub29 = sub i32 %conv26, %conv28
%shl30 = shl i32 %sub29, 16
%add31 = add i32 %shl30, %sub24
%arrayidx32 = getelementptr i8, ptr %pix1, i64 3
%12 = load i8, ptr %arrayidx32, align 1
%conv33 = zext i8 %12 to i32
%arrayidx34 = getelementptr i8, ptr %pix2, i64 3
%13 = load i8, ptr %arrayidx34, align 1
%conv35 = zext i8 %13 to i32
%sub36 = sub i32 %conv33, %conv35
%arrayidx37 = getelementptr i8, ptr %pix1, i64 7
%14 = load i8, ptr %arrayidx37, align 1
%conv38 = zext i8 %14 to i32
%arrayidx39 = getelementptr i8, ptr %pix2, i64 7
%15 = load i8, ptr %arrayidx39, align 1
%conv40 = zext i8 %15 to i32
%sub41 = sub i32 %conv38, %conv40
%shl42 = shl i32 %sub41, 16
%add43 = add i32 %shl42, %sub36
%add44 = add i32 %add19, %add
%sub45 = sub i32 %add, %add19
%add46 = add i32 %add43, %add31
%sub47 = sub i32 %add31, %add43
%add48 = add i32 %add46, %add44
%sub51 = sub i32 %add44, %add46
%add55 = add i32 %sub47, %sub45
%sub59 = sub i32 %sub45, %sub47
%add.ptr3 = getelementptr i8, ptr %pix1, i64 %idx.ext
%add.ptr644 = getelementptr i8, ptr %pix2, i64 %idx.ext63
%16 = load i8, ptr %add.ptr3, align 1
%conv.1 = zext i8 %16 to i32
%17 = load i8, ptr %add.ptr644, align 1
%conv2.1 = zext i8 %17 to i32
%sub.1 = sub i32 %conv.1, %conv2.1
%arrayidx3.1 = getelementptr i8, ptr %add.ptr3, i64 4
%18 = load i8, ptr %arrayidx3.1, align 1
%conv4.1 = zext i8 %18 to i32
%arrayidx5.1 = getelementptr i8, ptr %add.ptr644, i64 4
%19 = load i8, ptr %arrayidx5.1, align 1
%conv6.1 = zext i8 %19 to i32
%sub7.1 = sub i32 %conv4.1, %conv6.1
%shl.1 = shl i32 %sub7.1, 16
%add.1 = add i32 %shl.1, %sub.1
%arrayidx8.1 = getelementptr i8, ptr %add.ptr3, i64 1
%20 = load i8, ptr %arrayidx8.1, align 1
%conv9.1 = zext i8 %20 to i32
%arrayidx10.1 = getelementptr i8, ptr %add.ptr644, i64 1
%21 = load i8, ptr %arrayidx10.1, align 1
%conv11.1 = zext i8 %21 to i32
%sub12.1 = sub i32 %conv9.1, %conv11.1
%arrayidx13.1 = getelementptr i8, ptr %add.ptr3, i64 5
%22 = load i8, ptr %arrayidx13.1, align 1
%conv14.1 = zext i8 %22 to i32
%arrayidx15.1 = getelementptr i8, ptr %add.ptr644, i64 5
%23 = load i8, ptr %arrayidx15.1, align 1
%conv16.1 = zext i8 %23 to i32
%sub17.1 = sub i32 %conv14.1, %conv16.1
%shl18.1 = shl i32 %sub17.1, 16
%add19.1 = add i32 %shl18.1, %sub12.1
%arrayidx20.1 = getelementptr i8, ptr %add.ptr3, i64 2
%24 = load i8, ptr %arrayidx20.1, align 1
%conv21.1 = zext i8 %24 to i32
%arrayidx22.1 = getelementptr i8, ptr %add.ptr644, i64 2
%25 = load i8, ptr %arrayidx22.1, align 1
%conv23.1 = zext i8 %25 to i32
%sub24.1 = sub i32 %conv21.1, %conv23.1
%arrayidx25.1 = getelementptr i8, ptr %add.ptr3, i64 6
%26 = load i8, ptr %arrayidx25.1, align 1
%conv26.1 = zext i8 %26 to i32
%arrayidx27.1 = getelementptr i8, ptr %add.ptr644, i64 6
%27 = load i8, ptr %arrayidx27.1, align 1
%conv28.1 = zext i8 %27 to i32
%sub29.1 = sub i32 %conv26.1, %conv28.1
%shl30.1 = shl i32 %sub29.1, 16
%add31.1 = add i32 %shl30.1, %sub24.1
%arrayidx32.1 = getelementptr i8, ptr %add.ptr3, i64 3
%28 = load i8, ptr %arrayidx32.1, align 1
%conv33.1 = zext i8 %28 to i32
%arrayidx34.1 = getelementptr i8, ptr %add.ptr644, i64 3
%29 = load i8, ptr %arrayidx34.1, align 1
%conv35.1 = zext i8 %29 to i32
%sub36.1 = sub i32 %conv33.1, %conv35.1
%arrayidx37.1 = getelementptr i8, ptr %add.ptr3, i64 7
%30 = load i8, ptr %arrayidx37.1, align 1
%conv38.1 = zext i8 %30 to i32
%arrayidx39.1 = getelementptr i8, ptr %add.ptr644, i64 7
%31 = load i8, ptr %arrayidx39.1, align 1
%conv40.1 = zext i8 %31 to i32
%sub41.1 = sub i32 %conv38.1, %conv40.1
%shl42.1 = shl i32 %sub41.1, 16
%add43.1 = add i32 %shl42.1, %sub36.1
%add44.1 = add i32 %add19.1, %add.1
%sub45.1 = sub i32 %add.1, %add19.1
%add46.1 = add i32 %add43.1, %add31.1
%sub47.1 = sub i32 %add31.1, %add43.1
%add48.1 = add i32 %add46.1, %add44.1
%sub51.1 = sub i32 %add44.1, %add46.1
%add55.1 = add i32 %sub47.1, %sub45.1
%sub59.1 = sub i32 %sub45.1, %sub47.1
%add.ptr.1 = getelementptr i8, ptr %add.ptr, i64 %idx.ext
%add.ptr64.1 = getelementptr i8, ptr %add.ptr64, i64 %idx.ext63
%32 = load i8, ptr %add.ptr.1, align 1
%conv.2 = zext i8 %32 to i32
%33 = load i8, ptr %add.ptr64.1, align 1
%conv2.2 = zext i8 %33 to i32
%sub.2 = sub i32 %conv.2, %conv2.2
%arrayidx3.2 = getelementptr i8, ptr %add.ptr.1, i64 4
%34 = load i8, ptr %arrayidx3.2, align 1
%conv4.2 = zext i8 %34 to i32
%arrayidx5.2 = getelementptr i8, ptr %add.ptr64.1, i64 4
%35 = load i8, ptr %arrayidx5.2, align 1
%conv6.2 = zext i8 %35 to i32
%sub7.2 = sub i32 %conv4.2, %conv6.2
%shl.2 = shl i32 %sub7.2, 16
%add.2 = add i32 %shl.2, %sub.2
%arrayidx8.2 = getelementptr i8, ptr %add.ptr.1, i64 1
%36 = load i8, ptr %arrayidx8.2, align 1
%conv9.2 = zext i8 %36 to i32
%arrayidx10.2 = getelementptr i8, ptr %add.ptr64.1, i64 1
%37 = load i8, ptr %arrayidx10.2, align 1
%conv11.2 = zext i8 %37 to i32
%sub12.2 = sub i32 %conv9.2, %conv11.2
%arrayidx13.2 = getelementptr i8, ptr %add.ptr.1, i64 5
%38 = load i8, ptr %arrayidx13.2, align 1
%conv14.2 = zext i8 %38 to i32
%arrayidx15.2 = getelementptr i8, ptr %add.ptr64.1, i64 5
%39 = load i8, ptr %arrayidx15.2, align 1
%conv16.2 = zext i8 %39 to i32
%sub17.2 = sub i32 %conv14.2, %conv16.2
%shl18.2 = shl i32 %sub17.2, 16
%add19.2 = add i32 %shl18.2, %sub12.2
%arrayidx20.2 = getelementptr i8, ptr %add.ptr.1, i64 2
%40 = load i8, ptr %arrayidx20.2, align 1
%conv21.2 = zext i8 %40 to i32
%arrayidx22.2 = getelementptr i8, ptr %add.ptr64.1, i64 2
%41 = load i8, ptr %arrayidx22.2, align 1
%conv23.2 = zext i8 %41 to i32
%sub24.2 = sub i32 %conv21.2, %conv23.2
%arrayidx25.2 = getelementptr i8, ptr %add.ptr.1, i64 6
%42 = load i8, ptr %arrayidx25.2, align 1
%conv26.2 = zext i8 %42 to i32
%arrayidx27.2 = getelementptr i8, ptr %add.ptr64.1, i64 6
%43 = load i8, ptr %arrayidx27.2, align 1
%conv28.2 = zext i8 %43 to i32
%sub29.2 = sub i32 %conv26.2, %conv28.2
%shl30.2 = shl i32 %sub29.2, 16
%add31.2 = add i32 %shl30.2, %sub24.2
%arrayidx32.2 = getelementptr i8, ptr %add.ptr.1, i64 3
%44 = load i8, ptr %arrayidx32.2, align 1
%conv33.2 = zext i8 %44 to i32
%arrayidx34.2 = getelementptr i8, ptr %add.ptr64.1, i64 3
%45 = load i8, ptr %arrayidx34.2, align 1
%conv35.2 = zext i8 %45 to i32
%sub36.2 = sub i32 %conv33.2, %conv35.2
%arrayidx37.2 = getelementptr i8, ptr %add.ptr.1, i64 7
%46 = load i8, ptr %arrayidx37.2, align 1
%conv38.2 = zext i8 %46 to i32
%arrayidx39.2 = getelementptr i8, ptr %add.ptr64.1, i64 7
%47 = load i8, ptr %arrayidx39.2, align 1
%conv40.2 = zext i8 %47 to i32
%sub41.2 = sub i32 %conv38.2, %conv40.2
%shl42.2 = shl i32 %sub41.2, 16
%add43.2 = add i32 %shl42.2, %sub36.2
%add44.2 = add i32 %add19.2, %add.2
%sub45.2 = sub i32 %add.2, %add19.2
%add46.2 = add i32 %add43.2, %add31.2
%sub47.2 = sub i32 %add31.2, %add43.2
%add48.2 = add i32 %add46.2, %add44.2
%sub51.2 = sub i32 %add44.2, %add46.2
%add55.2 = add i32 %sub47.2, %sub45.2
%sub59.2 = sub i32 %sub45.2, %sub47.2
%48 = load i8, ptr null, align 1
%conv.3 = zext i8 %48 to i32
%49 = load i8, ptr null, align 1
%conv2.3 = zext i8 %49 to i32
%sub.3 = sub i32 %conv.3, %conv2.3
%arrayidx3.3 = getelementptr i8, ptr null, i64 4
%50 = load i8, ptr %arrayidx3.3, align 1
%conv4.3 = zext i8 %50 to i32
%arrayidx5.3 = getelementptr i8, ptr null, i64 4
%51 = load i8, ptr %arrayidx5.3, align 1
%conv6.3 = zext i8 %51 to i32
%sub7.3 = sub i32 %conv4.3, %conv6.3
%shl.3 = shl i32 %sub7.3, 16
%add.3 = add i32 %shl.3, %sub.3
%arrayidx8.3 = getelementptr i8, ptr null, i64 1
%52 = load i8, ptr %arrayidx8.3, align 1
%conv9.3 = zext i8 %52 to i32
%arrayidx10.3 = getelementptr i8, ptr null, i64 1
%53 = load i8, ptr %arrayidx10.3, align 1
%conv11.3 = zext i8 %53 to i32
%sub12.3 = sub i32 %conv9.3, %conv11.3
%54 = load i8, ptr null, align 1
%conv14.3 = zext i8 %54 to i32
%arrayidx15.3 = getelementptr i8, ptr null, i64 5
%55 = load i8, ptr %arrayidx15.3, align 1
%conv16.3 = zext i8 %55 to i32
%sub17.3 = sub i32 %conv14.3, %conv16.3
%shl18.3 = shl i32 %sub17.3, 16
%add19.3 = add i32 %shl18.3, %sub12.3
%arrayidx20.3 = getelementptr i8, ptr null, i64 2
%56 = load i8, ptr %arrayidx20.3, align 1
%conv21.3 = zext i8 %56 to i32
%arrayidx22.3 = getelementptr i8, ptr null, i64 2
%57 = load i8, ptr %arrayidx22.3, align 1
%conv23.3 = zext i8 %57 to i32
%sub24.3 = sub i32 %conv21.3, %conv23.3
%58 = load i8, ptr null, align 1
%conv26.3 = zext i8 %58 to i32
%arrayidx27.3 = getelementptr i8, ptr null, i64 6
%59 = load i8, ptr %arrayidx27.3, align 1
%conv28.3 = zext i8 %59 to i32
%sub29.3 = sub i32 %conv26.3, %conv28.3
%shl30.3 = shl i32 %sub29.3, 16
%add31.3 = add i32 %shl30.3, %sub24.3
%arrayidx32.3 = getelementptr i8, ptr null, i64 3
%60 = load i8, ptr %arrayidx32.3, align 1
%conv33.3 = zext i8 %60 to i32
%arrayidx34.3 = getelementptr i8, ptr null, i64 3
%61 = load i8, ptr %arrayidx34.3, align 1
%conv35.3 = zext i8 %61 to i32
%sub36.3 = sub i32 %conv33.3, %conv35.3
%62 = load i8, ptr null, align 1
%conv38.3 = zext i8 %62 to i32
%arrayidx39.3 = getelementptr i8, ptr null, i64 7
%63 = load i8, ptr %arrayidx39.3, align 1
%conv40.3 = zext i8 %63 to i32
%sub41.3 = sub i32 %conv38.3, %conv40.3
%shl42.3 = shl i32 %sub41.3, 16
%add43.3 = add i32 %shl42.3, %sub36.3
%add44.3 = add i32 %add19.3, %add.3
%sub45.3 = sub i32 %add.3, %add19.3
%add46.3 = add i32 %add43.3, %add31.3
%sub47.3 = sub i32 %add31.3, %add43.3
%add48.3 = add i32 %add46.3, %add44.3
%sub51.3 = sub i32 %add44.3, %add46.3
%add55.3 = add i32 %sub47.3, %sub45.3
%sub59.3 = sub i32 %sub45.3, %sub47.3
%add78 = add i32 %add48.1, %add48
%sub86 = sub i32 %add48, %add48.1
%add94 = add i32 %add48.3, %add48.2
%sub102 = sub i32 %add48.2, %add48.3
%add103 = add i32 %add94, %add78
%sub104 = sub i32 %add78, %add94
%add105 = add i32 %sub102, %sub86
%sub106 = sub i32 %sub86, %sub102
%shr.i = lshr i32 %conv.3, 15
%and.i = and i32 %shr.i, 65537
%mul.i = mul i32 %and.i, 65535
%add.i = add i32 %mul.i, %add103
%xor.i = xor i32 %add.i, %conv.3
%shr.i49 = lshr i32 %add46.2, 15
%and.i50 = and i32 %shr.i49, 65537
%mul.i51 = mul i32 %and.i50, 65535
%add.i52 = add i32 %mul.i51, %add105
%xor.i53 = xor i32 %add.i52, %add46.2
%shr.i54 = lshr i32 %add46.1, 15
%and.i55 = and i32 %shr.i54, 65537
%mul.i56 = mul i32 %and.i55, 65535
%add.i57 = add i32 %mul.i56, %sub104
%xor.i58 = xor i32 %add.i57, %add46.1
%shr.i59 = lshr i32 %add46, 15
%and.i60 = and i32 %shr.i59, 65537
%mul.i61 = mul i32 %and.i60, 65535
%add.i62 = add i32 %mul.i61, %sub106
%xor.i63 = xor i32 %add.i62, %add46
%add110 = add i32 %xor.i53, %xor.i
%add112 = add i32 %add110, %xor.i58
%add113 = add i32 %add112, %xor.i63
%add78.1 = add i32 %add55.1, %add55
%sub86.1 = sub i32 %add55, %add55.1
%add94.1 = add i32 %add55.3, %add55.2
%sub102.1 = sub i32 %add55.2, %add55.3
%add103.1 = add i32 %add94.1, %add78.1
%sub104.1 = sub i32 %add78.1, %add94.1
%add105.1 = add i32 %sub102.1, %sub86.1
%sub106.1 = sub i32 %sub86.1, %sub102.1
%shr.i.1 = lshr i32 %conv9.2, 15
%and.i.1 = and i32 %shr.i.1, 65537
%mul.i.1 = mul i32 %and.i.1, 65535
%add.i.1 = add i32 %mul.i.1, %add103.1
%xor.i.1 = xor i32 %add.i.1, %conv9.2
%shr.i49.1 = lshr i32 %conv.2, 15
%and.i50.1 = and i32 %shr.i49.1, 65537
%mul.i51.1 = mul i32 %and.i50.1, 65535
%add.i52.1 = add i32 %mul.i51.1, %add105.1
%xor.i53.1 = xor i32 %add.i52.1, %conv.2
%shr.i54.1 = lshr i32 %sub47.1, 15
%and.i55.1 = and i32 %shr.i54.1, 65537
%mul.i56.1 = mul i32 %and.i55.1, 65535
%add.i57.1 = add i32 %mul.i56.1, %sub104.1
%xor.i58.1 = xor i32 %add.i57.1, %sub47.1
%shr.i59.1 = lshr i32 %sub47, 15
%and.i60.1 = and i32 %shr.i59.1, 65537
%mul.i61.1 = mul i32 %and.i60.1, 65535
%add.i62.1 = add i32 %mul.i61.1, %sub106.1
%xor.i63.1 = xor i32 %add.i62.1, %sub47
%add108.1 = add i32 %xor.i53.1, %add113
%add110.1 = add i32 %add108.1, %xor.i.1
%add112.1 = add i32 %add110.1, %xor.i58.1
%add113.1 = add i32 %add112.1, %xor.i63.1
%add78.2 = add i32 %sub51.1, %sub51
%sub86.2 = sub i32 %sub51, %sub51.1
%add94.2 = add i32 %sub51.3, %sub51.2
%sub102.2 = sub i32 %sub51.2, %sub51.3
%add103.2 = add i32 %add94.2, %add78.2
%sub104.2 = sub i32 %add78.2, %add94.2
%add105.2 = add i32 %sub102.2, %sub86.2
%sub106.2 = sub i32 %sub86.2, %sub102.2
%shr.i.2 = lshr i32 %conv9.1, 15
%and.i.2 = and i32 %shr.i.2, 65537
%mul.i.2 = mul i32 %and.i.2, 65535
%add.i.2 = add i32 %mul.i.2, %add103.2
%xor.i.2 = xor i32 %add.i.2, %conv9.1
%shr.i49.2 = lshr i32 %conv.1, 15
%and.i50.2 = and i32 %shr.i49.2, 65537
%mul.i51.2 = mul i32 %and.i50.2, 65535
%add.i52.2 = add i32 %mul.i51.2, %add105.2
%xor.i53.2 = xor i32 %add.i52.2, %conv.1
%shr.i54.2 = lshr i32 %conv21.1, 15
%and.i55.2 = and i32 %shr.i54.2, 65537
%mul.i56.2 = mul i32 %and.i55.2, 65535
%add.i57.2 = add i32 %mul.i56.2, %sub104.2
%xor.i58.2 = xor i32 %add.i57.2, %conv21.1
%shr.i59.2 = lshr i32 %add44, 15
%and.i60.2 = and i32 %shr.i59.2, 65537
%mul.i61.2 = mul i32 %and.i60.2, 65535
%add.i62.2 = add i32 %mul.i61.2, %sub106.2
%xor.i63.2 = xor i32 %add.i62.2, %add44
%add108.2 = add i32 %xor.i53.2, %add113.1
%add110.2 = add i32 %add108.2, %xor.i.2
%add112.2 = add i32 %add110.2, %xor.i58.2
%add113.2 = add i32 %add112.2, %xor.i63.2
%add78.3 = add i32 %sub59.1, %sub59
%sub86.3 = sub i32 %sub59, %sub59.1
%add94.3 = add i32 %sub59.3, %sub59.2
%sub102.3 = sub i32 %sub59.2, %sub59.3
%add103.3 = add i32 %add94.3, %add78.3
%sub104.3 = sub i32 %add78.3, %add94.3
%add105.3 = add i32 %sub102.3, %sub86.3
%sub106.3 = sub i32 %sub86.3, %sub102.3
%shr.i.3 = lshr i32 %conv9, 15
%and.i.3 = and i32 %shr.i.3, 65537
%mul.i.3 = mul i32 %and.i.3, 65535
%add.i.3 = add i32 %mul.i.3, %add103.3
%xor.i.3 = xor i32 %add.i.3, %conv9
%shr.i49.3 = lshr i32 %conv, 15
%and.i50.3 = and i32 %shr.i49.3, 65537
%mul.i51.3 = mul i32 %and.i50.3, 65535
%add.i52.3 = add i32 %mul.i51.3, %add105.3
%xor.i53.3 = xor i32 %add.i52.3, %conv
%shr.i54.3 = lshr i32 %conv21, 15
%and.i55.3 = and i32 %shr.i54.3, 65537
%mul.i56.3 = mul i32 %and.i55.3, 65535
%add.i57.3 = add i32 %mul.i56.3, %sub104.3
%xor.i58.3 = xor i32 %add.i57.3, %conv21
%shr.i59.3 = lshr i32 %conv33, 15
%and.i60.3 = and i32 %shr.i59.3, 65537
%mul.i61.3 = mul i32 %and.i60.3, 65535
%add.i62.3 = add i32 %mul.i61.3, %sub106.3
%xor.i63.3 = xor i32 %add.i62.3, %conv33
%add108.3 = add i32 %xor.i53.3, %add113.2
%add110.3 = add i32 %add108.3, %xor.i.3
%add112.3 = add i32 %add110.3, %xor.i58.3
%add113.3 = add i32 %add112.3, %xor.i63.3
ret i32 %add113.3
}