Files
clang-p2996/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
Philip Reames 248be98418 Reapply "[RISCV][TTI] Add shuffle costing for masked slide lowering (#128537)"
With a fix for fully undef masks.  These can't reach the lowering code, but
can reach the costing code via e.g. SLP.

This change adds the TTI costing corresponding to the recently added
isMaskedSlidePair lowering for vector shuffles. However, since the
existing costing code hadn't covered either slideup, slidedown, or the
(now removed) isElementRotate, the impact is larger in scope than just
that new lowering.

---------

Co-authored-by: Alexey Bataev <a.bataev@gmx.com>
Co-authored-by: Luke Lau <luke_lau@icloud.com>
2025-02-28 08:02:27 -08:00

591 lines
36 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
; RUN: opt -S -mtriple riscv64-unknown-linux-gnu < %s --passes=slp-vectorizer -mattr=+v -slp-threshold=-20 | FileCheck %s
; RUN: opt -S -mtriple riscv64-unknown-linux-gnu < %s --passes=slp-vectorizer -mattr=+v -slp-threshold=-15 | FileCheck %s --check-prefix=THR15
define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.ptr, ptr %add.ptr64) {
; CHECK-LABEL: define i32 @test(
; CHECK-SAME: ptr [[PIX1:%.*]], ptr [[PIX2:%.*]], i64 [[IDX_EXT:%.*]], i64 [[IDX_EXT63:%.*]], ptr [[ADD_PTR:%.*]], ptr [[ADD_PTR64:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PIX1]], i64 4
; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr i8, ptr [[PIX2]], i64 4
; CHECK-NEXT: [[ADD_PTR3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 [[IDX_EXT]]
; CHECK-NEXT: [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 4
; CHECK-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 4
; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
; CHECK-NEXT: [[ADD_PTR64_1:%.*]] = getelementptr i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
; CHECK-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
; CHECK-NEXT: [[ARRAYIDX5_3:%.*]] = getelementptr i8, ptr null, i64 4
; CHECK-NEXT: [[TMP52:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[PIX1]], align 1
; CHECK-NEXT: [[TMP92:%.*]] = load <4 x i8>, ptr [[PIX2]], align 1
; CHECK-NEXT: [[TMP95:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1
; CHECK-NEXT: [[TMP98:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5]], align 1
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i8>, ptr [[ADD_PTR3]], align 1
; CHECK-NEXT: [[TMP132:%.*]] = load <4 x i8>, ptr [[ADD_PTR644]], align 1
; CHECK-NEXT: [[TMP135:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_1]], align 1
; CHECK-NEXT: [[TMP138:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_1]], align 1
; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i8>, ptr [[ADD_PTR_1]], align 1
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i8>, ptr [[ADD_PTR64_1]], align 1
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_2]], align 1
; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_2]], align 1
; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i8>, ptr null, align 1
; CHECK-NEXT: [[TMP15:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> poison, <4 x i8> [[TMP10]], i64 0)
; CHECK-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP15]], <4 x i8> [[TMP14]], i64 4)
; CHECK-NEXT: [[TMP17:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP16]], <4 x i8> [[TMP2]], i64 8)
; CHECK-NEXT: [[TMP18:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP17]], <4 x i8> [[TMP6]], i64 12)
; CHECK-NEXT: [[TMP19:%.*]] = zext <16 x i8> [[TMP18]] to <16 x i32>
; CHECK-NEXT: [[TMP20:%.*]] = load <4 x i8>, ptr null, align 1
; CHECK-NEXT: [[TMP21:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> poison, <4 x i8> [[TMP11]], i64 0)
; CHECK-NEXT: [[TMP22:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP21]], <4 x i8> [[TMP20]], i64 4)
; CHECK-NEXT: [[TMP23:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP22]], <4 x i8> [[TMP92]], i64 8)
; CHECK-NEXT: [[TMP24:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP23]], <4 x i8> [[TMP132]], i64 12)
; CHECK-NEXT: [[TMP25:%.*]] = zext <16 x i8> [[TMP24]] to <16 x i32>
; CHECK-NEXT: [[TMP26:%.*]] = sub <16 x i32> [[TMP19]], [[TMP25]]
; CHECK-NEXT: [[TMP27:%.*]] = shufflevector <16 x i32> [[TMP26]], <16 x i32> poison, <16 x i32> <i32 3, i32 7, i32 15, i32 11, i32 2, i32 6, i32 14, i32 10, i32 1, i32 5, i32 13, i32 9, i32 0, i32 4, i32 12, i32 8>
; CHECK-NEXT: [[TMP28:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 4, <2 x i1> splat (i1 true), i32 2)
; CHECK-NEXT: [[TMP29:%.*]] = shufflevector <2 x i8> [[TMP28]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
; CHECK-NEXT: [[TMP30:%.*]] = shufflevector <4 x i8> [[TMP12]], <4 x i8> [[TMP29]], <16 x i32> <i32 3, i32 4, i32 poison, i32 poison, i32 2, i32 poison, i32 poison, i32 poison, i32 1, i32 poison, i32 poison, i32 poison, i32 0, i32 5, i32 poison, i32 poison>
; CHECK-NEXT: [[TMP31:%.*]] = shufflevector <4 x i8> [[TMP135]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT: [[TMP32:%.*]] = shufflevector <16 x i8> [[TMP30]], <16 x i8> [[TMP31]], <16 x i32> <i32 0, i32 1, i32 19, i32 poison, i32 4, i32 poison, i32 18, i32 poison, i32 8, i32 poison, i32 17, i32 poison, i32 12, i32 13, i32 16, i32 poison>
; CHECK-NEXT: [[TMP33:%.*]] = shufflevector <4 x i8> [[TMP95]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT: [[TMP34:%.*]] = shufflevector <16 x i8> [[TMP32]], <16 x i8> [[TMP33]], <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 4, i32 poison, i32 6, i32 18, i32 8, i32 poison, i32 10, i32 17, i32 12, i32 13, i32 14, i32 16>
; CHECK-NEXT: [[TMP35:%.*]] = insertelement <16 x i8> [[TMP34]], i8 [[TMP3]], i32 5
; CHECK-NEXT: [[TMP36:%.*]] = insertelement <16 x i8> [[TMP35]], i8 [[TMP52]], i32 9
; CHECK-NEXT: [[TMP37:%.*]] = zext <16 x i8> [[TMP36]] to <16 x i32>
; CHECK-NEXT: [[TMP38:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_3]], align 1
; CHECK-NEXT: [[TMP39:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> poison, <4 x i8> [[TMP13]], i64 0)
; CHECK-NEXT: [[TMP40:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP39]], <4 x i8> [[TMP38]], i64 4)
; CHECK-NEXT: [[TMP41:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP40]], <4 x i8> [[TMP98]], i64 8)
; CHECK-NEXT: [[TMP42:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP41]], <4 x i8> [[TMP138]], i64 12)
; CHECK-NEXT: [[TMP43:%.*]] = zext <16 x i8> [[TMP42]] to <16 x i32>
; CHECK-NEXT: [[TMP44:%.*]] = shufflevector <16 x i32> [[TMP43]], <16 x i32> poison, <16 x i32> <i32 3, i32 7, i32 15, i32 11, i32 2, i32 6, i32 14, i32 10, i32 1, i32 5, i32 13, i32 9, i32 0, i32 4, i32 12, i32 8>
; CHECK-NEXT: [[TMP45:%.*]] = sub <16 x i32> [[TMP37]], [[TMP44]]
; CHECK-NEXT: [[TMP46:%.*]] = shl <16 x i32> [[TMP45]], splat (i32 16)
; CHECK-NEXT: [[TMP47:%.*]] = add <16 x i32> [[TMP46]], [[TMP27]]
; CHECK-NEXT: [[TMP48:%.*]] = shufflevector <16 x i32> [[TMP47]], <16 x i32> poison, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
; CHECK-NEXT: [[TMP49:%.*]] = add <16 x i32> [[TMP47]], [[TMP48]]
; CHECK-NEXT: [[TMP50:%.*]] = sub <16 x i32> [[TMP47]], [[TMP48]]
; CHECK-NEXT: [[TMP51:%.*]] = shufflevector <16 x i32> [[TMP49]], <16 x i32> [[TMP50]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP70:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> poison, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP53:%.*]] = add <16 x i32> [[TMP51]], [[TMP70]]
; CHECK-NEXT: [[TMP54:%.*]] = sub <16 x i32> [[TMP51]], [[TMP70]]
; CHECK-NEXT: [[TMP55:%.*]] = shufflevector <16 x i32> [[TMP53]], <16 x i32> [[TMP54]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP56:%.*]] = shufflevector <16 x i32> [[TMP55]], <16 x i32> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
; CHECK-NEXT: [[TMP57:%.*]] = sub <16 x i32> [[TMP55]], [[TMP56]]
; CHECK-NEXT: [[TMP58:%.*]] = add <16 x i32> [[TMP55]], [[TMP56]]
; CHECK-NEXT: [[TMP59:%.*]] = shufflevector <16 x i32> [[TMP57]], <16 x i32> [[TMP58]], <16 x i32> <i32 0, i32 17, i32 18, i32 3, i32 4, i32 21, i32 22, i32 7, i32 8, i32 25, i32 26, i32 11, i32 12, i32 29, i32 30, i32 15>
; CHECK-NEXT: [[TMP60:%.*]] = shufflevector <16 x i32> [[TMP59]], <16 x i32> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
; CHECK-NEXT: [[TMP61:%.*]] = add <16 x i32> [[TMP59]], [[TMP60]]
; CHECK-NEXT: [[TMP62:%.*]] = sub <16 x i32> [[TMP59]], [[TMP60]]
; CHECK-NEXT: [[TMP63:%.*]] = shufflevector <16 x i32> [[TMP61]], <16 x i32> [[TMP62]], <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 4, i32 5, i32 22, i32 23, i32 8, i32 9, i32 26, i32 27, i32 12, i32 13, i32 30, i32 31>
; CHECK-NEXT: [[TMP64:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> [[TMP19]], <16 x i32> <i32 0, i32 20, i32 2, i32 3, i32 16, i32 17, i32 6, i32 7, i32 28, i32 29, i32 30, i32 11, i32 24, i32 25, i32 26, i32 27>
; CHECK-NEXT: [[TMP65:%.*]] = lshr <16 x i32> [[TMP64]], splat (i32 15)
; CHECK-NEXT: [[TMP66:%.*]] = and <16 x i32> [[TMP65]], splat (i32 65537)
; CHECK-NEXT: [[TMP67:%.*]] = mul <16 x i32> [[TMP66]], splat (i32 65535)
; CHECK-NEXT: [[TMP68:%.*]] = add <16 x i32> [[TMP67]], [[TMP63]]
; CHECK-NEXT: [[TMP69:%.*]] = xor <16 x i32> [[TMP68]], [[TMP64]]
; CHECK-NEXT: [[ADD113_3:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]])
; CHECK-NEXT: ret i32 [[ADD113_3]]
;
; THR15-LABEL: define i32 @test(
; THR15-SAME: ptr [[PIX1:%.*]], ptr [[PIX2:%.*]], i64 [[IDX_EXT:%.*]], i64 [[IDX_EXT63:%.*]], ptr [[ADD_PTR:%.*]], ptr [[ADD_PTR64:%.*]]) #[[ATTR0:[0-9]+]] {
; THR15-NEXT: entry:
; THR15-NEXT: [[ARRAYIDX3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 4
; THR15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr i8, ptr [[PIX2]], i64 4
; THR15-NEXT: [[ADD_PTR3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 [[IDX_EXT]]
; THR15-NEXT: [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
; THR15-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 4
; THR15-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 4
; THR15-NEXT: [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
; THR15-NEXT: [[ADD_PTR64_1:%.*]] = getelementptr i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
; THR15-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
; THR15-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
; THR15-NEXT: [[ARRAYIDX5_3:%.*]] = getelementptr i8, ptr null, i64 4
; THR15-NEXT: [[TMP48:%.*]] = load i8, ptr null, align 1
; THR15-NEXT: [[TMP1:%.*]] = load i8, ptr null, align 1
; THR15-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[PIX1]], align 1
; THR15-NEXT: [[TMP143:%.*]] = load <4 x i8>, ptr [[PIX2]], align 1
; THR15-NEXT: [[TMP146:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3]], align 1
; THR15-NEXT: [[TMP147:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5]], align 1
; THR15-NEXT: [[TMP6:%.*]] = load <4 x i8>, ptr [[ADD_PTR3]], align 1
; THR15-NEXT: [[TMP148:%.*]] = load <4 x i8>, ptr [[ADD_PTR644]], align 1
; THR15-NEXT: [[TMP152:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_1]], align 1
; THR15-NEXT: [[TMP153:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_1]], align 1
; THR15-NEXT: [[TMP10:%.*]] = load <4 x i8>, ptr [[ADD_PTR_1]], align 1
; THR15-NEXT: [[TMP11:%.*]] = load <4 x i8>, ptr [[ADD_PTR64_1]], align 1
; THR15-NEXT: [[TMP12:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_2]], align 1
; THR15-NEXT: [[TMP13:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_2]], align 1
; THR15-NEXT: [[TMP14:%.*]] = load <4 x i8>, ptr null, align 1
; THR15-NEXT: [[TMP15:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> poison, <4 x i8> [[TMP10]], i64 0)
; THR15-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP15]], <4 x i8> [[TMP14]], i64 4)
; THR15-NEXT: [[TMP17:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP16]], <4 x i8> [[TMP2]], i64 8)
; THR15-NEXT: [[TMP18:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP17]], <4 x i8> [[TMP6]], i64 12)
; THR15-NEXT: [[TMP19:%.*]] = zext <16 x i8> [[TMP18]] to <16 x i32>
; THR15-NEXT: [[TMP20:%.*]] = load <4 x i8>, ptr null, align 1
; THR15-NEXT: [[TMP21:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> poison, <4 x i8> [[TMP11]], i64 0)
; THR15-NEXT: [[TMP22:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP21]], <4 x i8> [[TMP20]], i64 4)
; THR15-NEXT: [[TMP23:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP22]], <4 x i8> [[TMP143]], i64 8)
; THR15-NEXT: [[TMP24:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP23]], <4 x i8> [[TMP148]], i64 12)
; THR15-NEXT: [[TMP25:%.*]] = zext <16 x i8> [[TMP24]] to <16 x i32>
; THR15-NEXT: [[TMP26:%.*]] = sub <16 x i32> [[TMP19]], [[TMP25]]
; THR15-NEXT: [[TMP27:%.*]] = shufflevector <16 x i32> [[TMP26]], <16 x i32> poison, <16 x i32> <i32 3, i32 7, i32 15, i32 11, i32 2, i32 6, i32 14, i32 10, i32 1, i32 5, i32 13, i32 9, i32 0, i32 4, i32 12, i32 8>
; THR15-NEXT: [[TMP28:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 4, <2 x i1> splat (i1 true), i32 2)
; THR15-NEXT: [[TMP29:%.*]] = shufflevector <2 x i8> [[TMP28]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
; THR15-NEXT: [[TMP30:%.*]] = shufflevector <4 x i8> [[TMP12]], <4 x i8> [[TMP29]], <16 x i32> <i32 3, i32 4, i32 poison, i32 poison, i32 2, i32 poison, i32 poison, i32 poison, i32 1, i32 poison, i32 poison, i32 poison, i32 0, i32 5, i32 poison, i32 poison>
; THR15-NEXT: [[TMP31:%.*]] = shufflevector <4 x i8> [[TMP152]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; THR15-NEXT: [[TMP32:%.*]] = shufflevector <16 x i8> [[TMP30]], <16 x i8> [[TMP31]], <16 x i32> <i32 0, i32 1, i32 19, i32 poison, i32 4, i32 poison, i32 18, i32 poison, i32 8, i32 poison, i32 17, i32 poison, i32 12, i32 13, i32 16, i32 poison>
; THR15-NEXT: [[TMP33:%.*]] = shufflevector <4 x i8> [[TMP146]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; THR15-NEXT: [[TMP34:%.*]] = shufflevector <16 x i8> [[TMP32]], <16 x i8> [[TMP33]], <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 4, i32 poison, i32 6, i32 18, i32 8, i32 poison, i32 10, i32 17, i32 12, i32 13, i32 14, i32 16>
; THR15-NEXT: [[TMP35:%.*]] = insertelement <16 x i8> [[TMP34]], i8 [[TMP1]], i32 5
; THR15-NEXT: [[TMP36:%.*]] = insertelement <16 x i8> [[TMP35]], i8 [[TMP48]], i32 9
; THR15-NEXT: [[TMP37:%.*]] = zext <16 x i8> [[TMP36]] to <16 x i32>
; THR15-NEXT: [[TMP38:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_3]], align 1
; THR15-NEXT: [[TMP39:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> poison, <4 x i8> [[TMP13]], i64 0)
; THR15-NEXT: [[TMP40:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP39]], <4 x i8> [[TMP38]], i64 4)
; THR15-NEXT: [[TMP41:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP40]], <4 x i8> [[TMP147]], i64 8)
; THR15-NEXT: [[TMP42:%.*]] = call <16 x i8> @llvm.vector.insert.v16i8.v4i8(<16 x i8> [[TMP41]], <4 x i8> [[TMP153]], i64 12)
; THR15-NEXT: [[TMP43:%.*]] = zext <16 x i8> [[TMP42]] to <16 x i32>
; THR15-NEXT: [[TMP44:%.*]] = shufflevector <16 x i32> [[TMP43]], <16 x i32> poison, <16 x i32> <i32 3, i32 7, i32 15, i32 11, i32 2, i32 6, i32 14, i32 10, i32 1, i32 5, i32 13, i32 9, i32 0, i32 4, i32 12, i32 8>
; THR15-NEXT: [[TMP45:%.*]] = sub <16 x i32> [[TMP37]], [[TMP44]]
; THR15-NEXT: [[TMP46:%.*]] = shl <16 x i32> [[TMP45]], splat (i32 16)
; THR15-NEXT: [[TMP47:%.*]] = add <16 x i32> [[TMP46]], [[TMP27]]
; THR15-NEXT: [[TMP70:%.*]] = shufflevector <16 x i32> [[TMP47]], <16 x i32> poison, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
; THR15-NEXT: [[TMP49:%.*]] = add <16 x i32> [[TMP47]], [[TMP70]]
; THR15-NEXT: [[TMP50:%.*]] = sub <16 x i32> [[TMP47]], [[TMP70]]
; THR15-NEXT: [[TMP51:%.*]] = shufflevector <16 x i32> [[TMP49]], <16 x i32> [[TMP50]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 28, i32 29, i32 30, i32 31>
; THR15-NEXT: [[TMP52:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> poison, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; THR15-NEXT: [[TMP53:%.*]] = add <16 x i32> [[TMP51]], [[TMP52]]
; THR15-NEXT: [[TMP54:%.*]] = sub <16 x i32> [[TMP51]], [[TMP52]]
; THR15-NEXT: [[TMP55:%.*]] = shufflevector <16 x i32> [[TMP53]], <16 x i32> [[TMP54]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; THR15-NEXT: [[TMP56:%.*]] = shufflevector <16 x i32> [[TMP55]], <16 x i32> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
; THR15-NEXT: [[TMP57:%.*]] = sub <16 x i32> [[TMP55]], [[TMP56]]
; THR15-NEXT: [[TMP58:%.*]] = add <16 x i32> [[TMP55]], [[TMP56]]
; THR15-NEXT: [[TMP59:%.*]] = shufflevector <16 x i32> [[TMP57]], <16 x i32> [[TMP58]], <16 x i32> <i32 0, i32 17, i32 18, i32 3, i32 4, i32 21, i32 22, i32 7, i32 8, i32 25, i32 26, i32 11, i32 12, i32 29, i32 30, i32 15>
; THR15-NEXT: [[TMP60:%.*]] = shufflevector <16 x i32> [[TMP59]], <16 x i32> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
; THR15-NEXT: [[TMP61:%.*]] = add <16 x i32> [[TMP59]], [[TMP60]]
; THR15-NEXT: [[TMP62:%.*]] = sub <16 x i32> [[TMP59]], [[TMP60]]
; THR15-NEXT: [[TMP63:%.*]] = shufflevector <16 x i32> [[TMP61]], <16 x i32> [[TMP62]], <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 4, i32 5, i32 22, i32 23, i32 8, i32 9, i32 26, i32 27, i32 12, i32 13, i32 30, i32 31>
; THR15-NEXT: [[TMP64:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> [[TMP19]], <16 x i32> <i32 0, i32 20, i32 2, i32 3, i32 16, i32 17, i32 6, i32 7, i32 28, i32 29, i32 30, i32 11, i32 24, i32 25, i32 26, i32 27>
; THR15-NEXT: [[TMP65:%.*]] = lshr <16 x i32> [[TMP64]], splat (i32 15)
; THR15-NEXT: [[TMP66:%.*]] = and <16 x i32> [[TMP65]], splat (i32 65537)
; THR15-NEXT: [[TMP67:%.*]] = mul <16 x i32> [[TMP66]], splat (i32 65535)
; THR15-NEXT: [[TMP68:%.*]] = add <16 x i32> [[TMP67]], [[TMP63]]
; THR15-NEXT: [[TMP69:%.*]] = xor <16 x i32> [[TMP68]], [[TMP64]]
; THR15-NEXT: [[ADD113_3:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]])
; THR15-NEXT: ret i32 [[ADD113_3]]
;
entry:
%0 = load i8, ptr %pix1, align 1
%conv = zext i8 %0 to i32
%1 = load i8, ptr %pix2, align 1
%conv2 = zext i8 %1 to i32
%sub = sub i32 %conv, %conv2
%arrayidx3 = getelementptr i8, ptr %pix1, i64 4
%2 = load i8, ptr %arrayidx3, align 1
%conv4 = zext i8 %2 to i32
%arrayidx5 = getelementptr i8, ptr %pix2, i64 4
%3 = load i8, ptr %arrayidx5, align 1
%conv6 = zext i8 %3 to i32
%sub7 = sub i32 %conv4, %conv6
%shl = shl i32 %sub7, 16
%add = add i32 %shl, %sub
%arrayidx8 = getelementptr i8, ptr %pix1, i64 1
%4 = load i8, ptr %arrayidx8, align 1
%conv9 = zext i8 %4 to i32
%arrayidx10 = getelementptr i8, ptr %pix2, i64 1
%5 = load i8, ptr %arrayidx10, align 1
%conv11 = zext i8 %5 to i32
%sub12 = sub i32 %conv9, %conv11
%arrayidx13 = getelementptr i8, ptr %pix1, i64 5
%6 = load i8, ptr %arrayidx13, align 1
%conv14 = zext i8 %6 to i32
%arrayidx15 = getelementptr i8, ptr %pix2, i64 5
%7 = load i8, ptr %arrayidx15, align 1
%conv16 = zext i8 %7 to i32
%sub17 = sub i32 %conv14, %conv16
%shl18 = shl i32 %sub17, 16
%add19 = add i32 %shl18, %sub12
%arrayidx20 = getelementptr i8, ptr %pix1, i64 2
%8 = load i8, ptr %arrayidx20, align 1
%conv21 = zext i8 %8 to i32
%arrayidx22 = getelementptr i8, ptr %pix2, i64 2
%9 = load i8, ptr %arrayidx22, align 1
%conv23 = zext i8 %9 to i32
%sub24 = sub i32 %conv21, %conv23
%arrayidx25 = getelementptr i8, ptr %pix1, i64 6
%10 = load i8, ptr %arrayidx25, align 1
%conv26 = zext i8 %10 to i32
%arrayidx27 = getelementptr i8, ptr %pix2, i64 6
%11 = load i8, ptr %arrayidx27, align 1
%conv28 = zext i8 %11 to i32
%sub29 = sub i32 %conv26, %conv28
%shl30 = shl i32 %sub29, 16
%add31 = add i32 %shl30, %sub24
%arrayidx32 = getelementptr i8, ptr %pix1, i64 3
%12 = load i8, ptr %arrayidx32, align 1
%conv33 = zext i8 %12 to i32
%arrayidx34 = getelementptr i8, ptr %pix2, i64 3
%13 = load i8, ptr %arrayidx34, align 1
%conv35 = zext i8 %13 to i32
%sub36 = sub i32 %conv33, %conv35
%arrayidx37 = getelementptr i8, ptr %pix1, i64 7
%14 = load i8, ptr %arrayidx37, align 1
%conv38 = zext i8 %14 to i32
%arrayidx39 = getelementptr i8, ptr %pix2, i64 7
%15 = load i8, ptr %arrayidx39, align 1
%conv40 = zext i8 %15 to i32
%sub41 = sub i32 %conv38, %conv40
%shl42 = shl i32 %sub41, 16
%add43 = add i32 %shl42, %sub36
%add44 = add i32 %add19, %add
%sub45 = sub i32 %add, %add19
%add46 = add i32 %add43, %add31
%sub47 = sub i32 %add31, %add43
%add48 = add i32 %add46, %add44
%sub51 = sub i32 %add44, %add46
%add55 = add i32 %sub47, %sub45
%sub59 = sub i32 %sub45, %sub47
%add.ptr3 = getelementptr i8, ptr %pix1, i64 %idx.ext
%add.ptr644 = getelementptr i8, ptr %pix2, i64 %idx.ext63
%16 = load i8, ptr %add.ptr3, align 1
%conv.1 = zext i8 %16 to i32
%17 = load i8, ptr %add.ptr644, align 1
%conv2.1 = zext i8 %17 to i32
%sub.1 = sub i32 %conv.1, %conv2.1
%arrayidx3.1 = getelementptr i8, ptr %add.ptr3, i64 4
%18 = load i8, ptr %arrayidx3.1, align 1
%conv4.1 = zext i8 %18 to i32
%arrayidx5.1 = getelementptr i8, ptr %add.ptr644, i64 4
%19 = load i8, ptr %arrayidx5.1, align 1
%conv6.1 = zext i8 %19 to i32
%sub7.1 = sub i32 %conv4.1, %conv6.1
%shl.1 = shl i32 %sub7.1, 16
%add.1 = add i32 %shl.1, %sub.1
%arrayidx8.1 = getelementptr i8, ptr %add.ptr3, i64 1
%20 = load i8, ptr %arrayidx8.1, align 1
%conv9.1 = zext i8 %20 to i32
%arrayidx10.1 = getelementptr i8, ptr %add.ptr644, i64 1
%21 = load i8, ptr %arrayidx10.1, align 1
%conv11.1 = zext i8 %21 to i32
%sub12.1 = sub i32 %conv9.1, %conv11.1
%arrayidx13.1 = getelementptr i8, ptr %add.ptr3, i64 5
%22 = load i8, ptr %arrayidx13.1, align 1
%conv14.1 = zext i8 %22 to i32
%arrayidx15.1 = getelementptr i8, ptr %add.ptr644, i64 5
%23 = load i8, ptr %arrayidx15.1, align 1
%conv16.1 = zext i8 %23 to i32
%sub17.1 = sub i32 %conv14.1, %conv16.1
%shl18.1 = shl i32 %sub17.1, 16
%add19.1 = add i32 %shl18.1, %sub12.1
%arrayidx20.1 = getelementptr i8, ptr %add.ptr3, i64 2
%24 = load i8, ptr %arrayidx20.1, align 1
%conv21.1 = zext i8 %24 to i32
%arrayidx22.1 = getelementptr i8, ptr %add.ptr644, i64 2
%25 = load i8, ptr %arrayidx22.1, align 1
%conv23.1 = zext i8 %25 to i32
%sub24.1 = sub i32 %conv21.1, %conv23.1
%arrayidx25.1 = getelementptr i8, ptr %add.ptr3, i64 6
%26 = load i8, ptr %arrayidx25.1, align 1
%conv26.1 = zext i8 %26 to i32
%arrayidx27.1 = getelementptr i8, ptr %add.ptr644, i64 6
%27 = load i8, ptr %arrayidx27.1, align 1
%conv28.1 = zext i8 %27 to i32
%sub29.1 = sub i32 %conv26.1, %conv28.1
%shl30.1 = shl i32 %sub29.1, 16
%add31.1 = add i32 %shl30.1, %sub24.1
%arrayidx32.1 = getelementptr i8, ptr %add.ptr3, i64 3
%28 = load i8, ptr %arrayidx32.1, align 1
%conv33.1 = zext i8 %28 to i32
%arrayidx34.1 = getelementptr i8, ptr %add.ptr644, i64 3
%29 = load i8, ptr %arrayidx34.1, align 1
%conv35.1 = zext i8 %29 to i32
%sub36.1 = sub i32 %conv33.1, %conv35.1
%arrayidx37.1 = getelementptr i8, ptr %add.ptr3, i64 7
%30 = load i8, ptr %arrayidx37.1, align 1
%conv38.1 = zext i8 %30 to i32
%arrayidx39.1 = getelementptr i8, ptr %add.ptr644, i64 7
%31 = load i8, ptr %arrayidx39.1, align 1
%conv40.1 = zext i8 %31 to i32
%sub41.1 = sub i32 %conv38.1, %conv40.1
%shl42.1 = shl i32 %sub41.1, 16
%add43.1 = add i32 %shl42.1, %sub36.1
%add44.1 = add i32 %add19.1, %add.1
%sub45.1 = sub i32 %add.1, %add19.1
%add46.1 = add i32 %add43.1, %add31.1
%sub47.1 = sub i32 %add31.1, %add43.1
%add48.1 = add i32 %add46.1, %add44.1
%sub51.1 = sub i32 %add44.1, %add46.1
%add55.1 = add i32 %sub47.1, %sub45.1
%sub59.1 = sub i32 %sub45.1, %sub47.1
%add.ptr.1 = getelementptr i8, ptr %add.ptr, i64 %idx.ext
%add.ptr64.1 = getelementptr i8, ptr %add.ptr64, i64 %idx.ext63
%32 = load i8, ptr %add.ptr.1, align 1
%conv.2 = zext i8 %32 to i32
%33 = load i8, ptr %add.ptr64.1, align 1
%conv2.2 = zext i8 %33 to i32
%sub.2 = sub i32 %conv.2, %conv2.2
%arrayidx3.2 = getelementptr i8, ptr %add.ptr.1, i64 4
%34 = load i8, ptr %arrayidx3.2, align 1
%conv4.2 = zext i8 %34 to i32
%arrayidx5.2 = getelementptr i8, ptr %add.ptr64.1, i64 4
%35 = load i8, ptr %arrayidx5.2, align 1
%conv6.2 = zext i8 %35 to i32
%sub7.2 = sub i32 %conv4.2, %conv6.2
%shl.2 = shl i32 %sub7.2, 16
%add.2 = add i32 %shl.2, %sub.2
%arrayidx8.2 = getelementptr i8, ptr %add.ptr.1, i64 1
%36 = load i8, ptr %arrayidx8.2, align 1
%conv9.2 = zext i8 %36 to i32
%arrayidx10.2 = getelementptr i8, ptr %add.ptr64.1, i64 1
%37 = load i8, ptr %arrayidx10.2, align 1
%conv11.2 = zext i8 %37 to i32
%sub12.2 = sub i32 %conv9.2, %conv11.2
%arrayidx13.2 = getelementptr i8, ptr %add.ptr.1, i64 5
%38 = load i8, ptr %arrayidx13.2, align 1
%conv14.2 = zext i8 %38 to i32
%arrayidx15.2 = getelementptr i8, ptr %add.ptr64.1, i64 5
%39 = load i8, ptr %arrayidx15.2, align 1
%conv16.2 = zext i8 %39 to i32
%sub17.2 = sub i32 %conv14.2, %conv16.2
%shl18.2 = shl i32 %sub17.2, 16
%add19.2 = add i32 %shl18.2, %sub12.2
%arrayidx20.2 = getelementptr i8, ptr %add.ptr.1, i64 2
%40 = load i8, ptr %arrayidx20.2, align 1
%conv21.2 = zext i8 %40 to i32
%arrayidx22.2 = getelementptr i8, ptr %add.ptr64.1, i64 2
%41 = load i8, ptr %arrayidx22.2, align 1
%conv23.2 = zext i8 %41 to i32
%sub24.2 = sub i32 %conv21.2, %conv23.2
%arrayidx25.2 = getelementptr i8, ptr %add.ptr.1, i64 6
%42 = load i8, ptr %arrayidx25.2, align 1
%conv26.2 = zext i8 %42 to i32
%arrayidx27.2 = getelementptr i8, ptr %add.ptr64.1, i64 6
%43 = load i8, ptr %arrayidx27.2, align 1
%conv28.2 = zext i8 %43 to i32
%sub29.2 = sub i32 %conv26.2, %conv28.2
%shl30.2 = shl i32 %sub29.2, 16
%add31.2 = add i32 %shl30.2, %sub24.2
%arrayidx32.2 = getelementptr i8, ptr %add.ptr.1, i64 3
%44 = load i8, ptr %arrayidx32.2, align 1
%conv33.2 = zext i8 %44 to i32
%arrayidx34.2 = getelementptr i8, ptr %add.ptr64.1, i64 3
%45 = load i8, ptr %arrayidx34.2, align 1
%conv35.2 = zext i8 %45 to i32
%sub36.2 = sub i32 %conv33.2, %conv35.2
%arrayidx37.2 = getelementptr i8, ptr %add.ptr.1, i64 7
%46 = load i8, ptr %arrayidx37.2, align 1
%conv38.2 = zext i8 %46 to i32
%arrayidx39.2 = getelementptr i8, ptr %add.ptr64.1, i64 7
%47 = load i8, ptr %arrayidx39.2, align 1
%conv40.2 = zext i8 %47 to i32
%sub41.2 = sub i32 %conv38.2, %conv40.2
%shl42.2 = shl i32 %sub41.2, 16
%add43.2 = add i32 %shl42.2, %sub36.2
%add44.2 = add i32 %add19.2, %add.2
%sub45.2 = sub i32 %add.2, %add19.2
%add46.2 = add i32 %add43.2, %add31.2
%sub47.2 = sub i32 %add31.2, %add43.2
%add48.2 = add i32 %add46.2, %add44.2
%sub51.2 = sub i32 %add44.2, %add46.2
%add55.2 = add i32 %sub47.2, %sub45.2
%sub59.2 = sub i32 %sub45.2, %sub47.2
%48 = load i8, ptr null, align 1
%conv.3 = zext i8 %48 to i32
%49 = load i8, ptr null, align 1
%conv2.3 = zext i8 %49 to i32
%sub.3 = sub i32 %conv.3, %conv2.3
%arrayidx3.3 = getelementptr i8, ptr null, i64 4
%50 = load i8, ptr %arrayidx3.3, align 1
%conv4.3 = zext i8 %50 to i32
%arrayidx5.3 = getelementptr i8, ptr null, i64 4
%51 = load i8, ptr %arrayidx5.3, align 1
%conv6.3 = zext i8 %51 to i32
%sub7.3 = sub i32 %conv4.3, %conv6.3
%shl.3 = shl i32 %sub7.3, 16
%add.3 = add i32 %shl.3, %sub.3
%arrayidx8.3 = getelementptr i8, ptr null, i64 1
%52 = load i8, ptr %arrayidx8.3, align 1
%conv9.3 = zext i8 %52 to i32
%arrayidx10.3 = getelementptr i8, ptr null, i64 1
%53 = load i8, ptr %arrayidx10.3, align 1
%conv11.3 = zext i8 %53 to i32
%sub12.3 = sub i32 %conv9.3, %conv11.3
%54 = load i8, ptr null, align 1
%conv14.3 = zext i8 %54 to i32
%arrayidx15.3 = getelementptr i8, ptr null, i64 5
%55 = load i8, ptr %arrayidx15.3, align 1
%conv16.3 = zext i8 %55 to i32
%sub17.3 = sub i32 %conv14.3, %conv16.3
%shl18.3 = shl i32 %sub17.3, 16
%add19.3 = add i32 %shl18.3, %sub12.3
%arrayidx20.3 = getelementptr i8, ptr null, i64 2
%56 = load i8, ptr %arrayidx20.3, align 1
%conv21.3 = zext i8 %56 to i32
%arrayidx22.3 = getelementptr i8, ptr null, i64 2
%57 = load i8, ptr %arrayidx22.3, align 1
%conv23.3 = zext i8 %57 to i32
%sub24.3 = sub i32 %conv21.3, %conv23.3
%58 = load i8, ptr null, align 1
%conv26.3 = zext i8 %58 to i32
%arrayidx27.3 = getelementptr i8, ptr null, i64 6
%59 = load i8, ptr %arrayidx27.3, align 1
%conv28.3 = zext i8 %59 to i32
%sub29.3 = sub i32 %conv26.3, %conv28.3
%shl30.3 = shl i32 %sub29.3, 16
%add31.3 = add i32 %shl30.3, %sub24.3
%arrayidx32.3 = getelementptr i8, ptr null, i64 3
%60 = load i8, ptr %arrayidx32.3, align 1
%conv33.3 = zext i8 %60 to i32
%arrayidx34.3 = getelementptr i8, ptr null, i64 3
%61 = load i8, ptr %arrayidx34.3, align 1
%conv35.3 = zext i8 %61 to i32
%sub36.3 = sub i32 %conv33.3, %conv35.3
%62 = load i8, ptr null, align 1
%conv38.3 = zext i8 %62 to i32
%arrayidx39.3 = getelementptr i8, ptr null, i64 7
%63 = load i8, ptr %arrayidx39.3, align 1
%conv40.3 = zext i8 %63 to i32
%sub41.3 = sub i32 %conv38.3, %conv40.3
%shl42.3 = shl i32 %sub41.3, 16
%add43.3 = add i32 %shl42.3, %sub36.3
%add44.3 = add i32 %add19.3, %add.3
%sub45.3 = sub i32 %add.3, %add19.3
%add46.3 = add i32 %add43.3, %add31.3
%sub47.3 = sub i32 %add31.3, %add43.3
%add48.3 = add i32 %add46.3, %add44.3
%sub51.3 = sub i32 %add44.3, %add46.3
%add55.3 = add i32 %sub47.3, %sub45.3
%sub59.3 = sub i32 %sub45.3, %sub47.3
%add78 = add i32 %add48.1, %add48
%sub86 = sub i32 %add48, %add48.1
%add94 = add i32 %add48.3, %add48.2
%sub102 = sub i32 %add48.2, %add48.3
%add103 = add i32 %add94, %add78
%sub104 = sub i32 %add78, %add94
%add105 = add i32 %sub102, %sub86
%sub106 = sub i32 %sub86, %sub102
%shr.i = lshr i32 %conv.3, 15
%and.i = and i32 %shr.i, 65537
%mul.i = mul i32 %and.i, 65535
%add.i = add i32 %mul.i, %add103
%xor.i = xor i32 %add.i, %conv.3
%shr.i49 = lshr i32 %add46.2, 15
%and.i50 = and i32 %shr.i49, 65537
%mul.i51 = mul i32 %and.i50, 65535
%add.i52 = add i32 %mul.i51, %add105
%xor.i53 = xor i32 %add.i52, %add46.2
%shr.i54 = lshr i32 %add46.1, 15
%and.i55 = and i32 %shr.i54, 65537
%mul.i56 = mul i32 %and.i55, 65535
%add.i57 = add i32 %mul.i56, %sub104
%xor.i58 = xor i32 %add.i57, %add46.1
%shr.i59 = lshr i32 %add46, 15
%and.i60 = and i32 %shr.i59, 65537
%mul.i61 = mul i32 %and.i60, 65535
%add.i62 = add i32 %mul.i61, %sub106
%xor.i63 = xor i32 %add.i62, %add46
%add110 = add i32 %xor.i53, %xor.i
%add112 = add i32 %add110, %xor.i58
%add113 = add i32 %add112, %xor.i63
%add78.1 = add i32 %add55.1, %add55
%sub86.1 = sub i32 %add55, %add55.1
%add94.1 = add i32 %add55.3, %add55.2
%sub102.1 = sub i32 %add55.2, %add55.3
%add103.1 = add i32 %add94.1, %add78.1
%sub104.1 = sub i32 %add78.1, %add94.1
%add105.1 = add i32 %sub102.1, %sub86.1
%sub106.1 = sub i32 %sub86.1, %sub102.1
%shr.i.1 = lshr i32 %conv9.2, 15
%and.i.1 = and i32 %shr.i.1, 65537
%mul.i.1 = mul i32 %and.i.1, 65535
%add.i.1 = add i32 %mul.i.1, %add103.1
%xor.i.1 = xor i32 %add.i.1, %conv9.2
%shr.i49.1 = lshr i32 %conv.2, 15
%and.i50.1 = and i32 %shr.i49.1, 65537
%mul.i51.1 = mul i32 %and.i50.1, 65535
%add.i52.1 = add i32 %mul.i51.1, %add105.1
%xor.i53.1 = xor i32 %add.i52.1, %conv.2
%shr.i54.1 = lshr i32 %sub47.1, 15
%and.i55.1 = and i32 %shr.i54.1, 65537
%mul.i56.1 = mul i32 %and.i55.1, 65535
%add.i57.1 = add i32 %mul.i56.1, %sub104.1
%xor.i58.1 = xor i32 %add.i57.1, %sub47.1
%shr.i59.1 = lshr i32 %sub47, 15
%and.i60.1 = and i32 %shr.i59.1, 65537
%mul.i61.1 = mul i32 %and.i60.1, 65535
%add.i62.1 = add i32 %mul.i61.1, %sub106.1
%xor.i63.1 = xor i32 %add.i62.1, %sub47
%add108.1 = add i32 %xor.i53.1, %add113
%add110.1 = add i32 %add108.1, %xor.i.1
%add112.1 = add i32 %add110.1, %xor.i58.1
%add113.1 = add i32 %add112.1, %xor.i63.1
%add78.2 = add i32 %sub51.1, %sub51
%sub86.2 = sub i32 %sub51, %sub51.1
%add94.2 = add i32 %sub51.3, %sub51.2
%sub102.2 = sub i32 %sub51.2, %sub51.3
%add103.2 = add i32 %add94.2, %add78.2
%sub104.2 = sub i32 %add78.2, %add94.2
%add105.2 = add i32 %sub102.2, %sub86.2
%sub106.2 = sub i32 %sub86.2, %sub102.2
%shr.i.2 = lshr i32 %conv9.1, 15
%and.i.2 = and i32 %shr.i.2, 65537
%mul.i.2 = mul i32 %and.i.2, 65535
%add.i.2 = add i32 %mul.i.2, %add103.2
%xor.i.2 = xor i32 %add.i.2, %conv9.1
%shr.i49.2 = lshr i32 %conv.1, 15
%and.i50.2 = and i32 %shr.i49.2, 65537
%mul.i51.2 = mul i32 %and.i50.2, 65535
%add.i52.2 = add i32 %mul.i51.2, %add105.2
%xor.i53.2 = xor i32 %add.i52.2, %conv.1
%shr.i54.2 = lshr i32 %conv21.1, 15
%and.i55.2 = and i32 %shr.i54.2, 65537
%mul.i56.2 = mul i32 %and.i55.2, 65535
%add.i57.2 = add i32 %mul.i56.2, %sub104.2
%xor.i58.2 = xor i32 %add.i57.2, %conv21.1
%shr.i59.2 = lshr i32 %add44, 15
%and.i60.2 = and i32 %shr.i59.2, 65537
%mul.i61.2 = mul i32 %and.i60.2, 65535
%add.i62.2 = add i32 %mul.i61.2, %sub106.2
%xor.i63.2 = xor i32 %add.i62.2, %add44
%add108.2 = add i32 %xor.i53.2, %add113.1
%add110.2 = add i32 %add108.2, %xor.i.2
%add112.2 = add i32 %add110.2, %xor.i58.2
%add113.2 = add i32 %add112.2, %xor.i63.2
%add78.3 = add i32 %sub59.1, %sub59
%sub86.3 = sub i32 %sub59, %sub59.1
%add94.3 = add i32 %sub59.3, %sub59.2
%sub102.3 = sub i32 %sub59.2, %sub59.3
%add103.3 = add i32 %add94.3, %add78.3
%sub104.3 = sub i32 %add78.3, %add94.3
%add105.3 = add i32 %sub102.3, %sub86.3
%sub106.3 = sub i32 %sub86.3, %sub102.3
%shr.i.3 = lshr i32 %conv9, 15
%and.i.3 = and i32 %shr.i.3, 65537
%mul.i.3 = mul i32 %and.i.3, 65535
%add.i.3 = add i32 %mul.i.3, %add103.3
%xor.i.3 = xor i32 %add.i.3, %conv9
%shr.i49.3 = lshr i32 %conv, 15
%and.i50.3 = and i32 %shr.i49.3, 65537
%mul.i51.3 = mul i32 %and.i50.3, 65535
%add.i52.3 = add i32 %mul.i51.3, %add105.3
%xor.i53.3 = xor i32 %add.i52.3, %conv
%shr.i54.3 = lshr i32 %conv21, 15
%and.i55.3 = and i32 %shr.i54.3, 65537
%mul.i56.3 = mul i32 %and.i55.3, 65535
%add.i57.3 = add i32 %mul.i56.3, %sub104.3
%xor.i58.3 = xor i32 %add.i57.3, %conv21
%shr.i59.3 = lshr i32 %conv33, 15
%and.i60.3 = and i32 %shr.i59.3, 65537
%mul.i61.3 = mul i32 %and.i60.3, 65535
%add.i62.3 = add i32 %mul.i61.3, %sub106.3
%xor.i63.3 = xor i32 %add.i62.3, %conv33
%add108.3 = add i32 %xor.i53.3, %add113.2
%add110.3 = add i32 %add108.3, %xor.i.3
%add112.3 = add i32 %add110.3, %xor.i58.3
%add113.3 = add i32 %add112.3, %xor.i63.3
ret i32 %add113.3
}