Skip to content

Commit 82511af

Browse files
committed
[LoopUnroll] Introduce parallel reduction phis when unrolling.
When partially or runtime unrolling loops with reductions, currently the reductions are performed in-order in the loop, negating most benefits from unrolling such loops. This patch extends unrolling code-gen to keep a parallel reduction phi per unrolled iteration and combining the final result after the loop. For out-of-order CPUs, this allows executing mutliple reduction chains in parallel. For now, the initial transformation is restricted to cases where we unroll a small number of iterations (hard-coded to 4, but should maybe be capped by TTI depending on the execution units), to avoid introducing an excessive amount of parallel phis. It also requires single block loops for now, where the unrolled iterations are known to not exit the loop (either due to runtime unrolling or partial unrolling). This ensures that the unrolled loop will have a single basic block, with a single exit block where we can place the final reduction value computation. The initial implementation also only supports parallelizing loops with a single reduction and only integer reductions. Those restrictions are just to keep the initial implementation simpler, and can easily be lifted as follow-ups. With corresponding TTI to the AArch64 unrolling preferences which I will also share soon, this triggers in ~300 loops across a wide range of workloads, including LLVM itself, ffmgep, av1aom, sqlite, blender, brotli, zstd and more.
1 parent e138c95 commit 82511af

File tree

8 files changed

+341
-134
lines changed

8 files changed

+341
-134
lines changed

llvm/include/llvm/Transforms/Utils/UnrollLoop.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,9 @@ LLVM_ABI bool computeUnrollCount(
163163
TargetTransformInfo::UnrollingPreferences &UP,
164164
TargetTransformInfo::PeelingPreferences &PP, bool &UseUpperBound);
165165

166+
LLVM_ABI std::optional<RecurrenceDescriptor>
167+
canParallelizeReductionWhenUnrolling(PHINode &Phi, Loop *L,
168+
ScalarEvolution *SE);
166169
} // end namespace llvm
167170

168171
#endif // LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H

llvm/lib/Transforms/Utils/LoopUnroll.cpp

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
#include "llvm/IR/DiagnosticInfo.h"
4242
#include "llvm/IR/Dominators.h"
4343
#include "llvm/IR/Function.h"
44+
#include "llvm/IR/IRBuilder.h"
4445
#include "llvm/IR/Instruction.h"
4546
#include "llvm/IR/Instructions.h"
4647
#include "llvm/IR/IntrinsicInst.h"
@@ -660,6 +661,38 @@ llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
660661
OrigPHINode.push_back(cast<PHINode>(I));
661662
}
662663

664+
// Collect phi nodes for reductions for which we can introduce multiple
665+
// parallel reduction phis and compute the final reduction result after the
666+
// loop. This requires a single exit block after unrolling. This is ensured by
667+
// restricting to single-block loops where the unrolled iterations are known
668+
// to not exit.
669+
DenseMap<PHINode *, RecurrenceDescriptor> Reductions;
670+
bool CanAddAdditionalAccumulators =
671+
!CompletelyUnroll && L->getNumBlocks() == 1 &&
672+
(ULO.Runtime ||
673+
(ExitInfos.contains(Header) && ((ExitInfos[Header].TripCount != 0 &&
674+
ExitInfos[Header].BreakoutTrip == 0))));
675+
676+
// Limit parallelizing reductions to unroll counts of 4 or less for now.
677+
// TODO: The number of parallel reductions should depend on the number of
678+
// execution units. We also don't have to add a parallel reduction phi per
679+
// unrolled iteration, but could for example add a parallel phi for every 2
680+
// unrolled iterations.
681+
if (CanAddAdditionalAccumulators && ULO.Count <= 4) {
682+
for (PHINode &Phi : Header->phis()) {
683+
auto RdxDesc = canParallelizeReductionWhenUnrolling(Phi, L, SE);
684+
if (!RdxDesc)
685+
continue;
686+
687+
// Only handle duplicate phis for a single reduction for now.
688+
// TODO: Handle any number of reductions
689+
if (!Reductions.empty())
690+
continue;
691+
692+
Reductions[&Phi] = *RdxDesc;
693+
}
694+
}
695+
663696
std::vector<BasicBlock *> Headers;
664697
std::vector<BasicBlock *> Latches;
665698
Headers.push_back(Header);
@@ -710,6 +743,7 @@ llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
710743
// latch. This is a reasonable default placement if we don't have block
711744
// frequencies, and if we do, well the layout will be adjusted later.
712745
auto BlockInsertPt = std::next(LatchBlock->getIterator());
746+
SmallVector<Value *> PartialReductions;
713747
for (unsigned It = 1; It != ULO.Count; ++It) {
714748
SmallVector<BasicBlock *, 8> NewBlocks;
715749
SmallDenseMap<const Loop *, Loop *, 4> NewLoops;
@@ -733,6 +767,31 @@ llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
733767
for (PHINode *OrigPHI : OrigPHINode) {
734768
PHINode *NewPHI = cast<PHINode>(VMap[OrigPHI]);
735769
Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock);
770+
771+
// Use cloned phis as parallel phis for partial reductions, which will
772+
// get combined to the final reduction result after the loop.
773+
if (Reductions.contains(OrigPHI)) {
774+
// Collect partial reduction results.
775+
if (PartialReductions.empty())
776+
PartialReductions.push_back(InVal);
777+
PartialReductions.push_back(VMap[InVal]);
778+
779+
// Update the start value for the cloned phis to use the identity
780+
// value for the reduction.
781+
const RecurrenceDescriptor &RdxDesc = Reductions[OrigPHI];
782+
NewPHI->setIncomingValueForBlock(
783+
L->getLoopPreheader(),
784+
getRecurrenceIdentity(RdxDesc.getRecurrenceKind(),
785+
OrigPHI->getType(),
786+
RdxDesc.getFastMathFlags()));
787+
788+
// Update NewPHI to use the cloned value for the iteration and move
789+
// to header.
790+
NewPHI->replaceUsesOfWith(InVal, VMap[InVal]);
791+
NewPHI->moveBefore(OrigPHI->getIterator());
792+
continue;
793+
}
794+
736795
if (Instruction *InValI = dyn_cast<Instruction>(InVal))
737796
if (It > 1 && L->contains(InValI))
738797
InVal = LastValueMap[InValI];
@@ -832,7 +891,11 @@ llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
832891
PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader));
833892
PN->eraseFromParent();
834893
} else if (ULO.Count > 1) {
894+
if (Reductions.contains(PN))
895+
continue;
896+
835897
Value *InVal = PN->removeIncomingValue(LatchBlock, false);
898+
836899
// If this value was defined in the loop, take the value defined by the
837900
// last iteration of the loop.
838901
if (Instruction *InValI = dyn_cast<Instruction>(InVal)) {
@@ -1010,6 +1073,35 @@ llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
10101073
}
10111074
}
10121075

1076+
// If there are partial reductions, create code in the exit block to compute
1077+
// the final result and update users of the final result.
1078+
if (!PartialReductions.empty()) {
1079+
BasicBlock *ExitBlock = L->getExitBlock();
1080+
assert(ExitBlock &&
1081+
"Can only introduce parallel reduction phis with single exit block");
1082+
assert(Reductions.size() == 1 &&
1083+
"currently only a single reduction is supported");
1084+
Value *FinalRdxValue = PartialReductions.back();
1085+
Value *RdxResult = nullptr;
1086+
for (PHINode &Phi : ExitBlock->phis()) {
1087+
if (Phi.getIncomingValueForBlock(L->getLoopLatch()) != FinalRdxValue)
1088+
continue;
1089+
if (!RdxResult) {
1090+
RdxResult = PartialReductions.front();
1091+
IRBuilder Builder(ExitBlock, ExitBlock->getFirstNonPHIIt());
1092+
RecurKind RK = Reductions.begin()->second.getRecurrenceKind();
1093+
for (Value *RdxPart : drop_begin(PartialReductions)) {
1094+
RdxResult = Builder.CreateBinOp(
1095+
(Instruction::BinaryOps)RecurrenceDescriptor::getOpcode(RK),
1096+
RdxPart, RdxResult, "bin.rdx");
1097+
}
1098+
NeedToFixLCSSA = true;
1099+
}
1100+
Phi.replaceAllUsesWith(RdxResult);
1101+
continue;
1102+
}
1103+
}
1104+
10131105
if (DTUToUse) {
10141106
// Apply updates to the DomTree.
10151107
DT = &DTU.getDomTree();
@@ -1111,3 +1203,38 @@ MDNode *llvm::GetUnrollMetadata(MDNode *LoopID, StringRef Name) {
11111203
}
11121204
return nullptr;
11131205
}
1206+
1207+
std::optional<RecurrenceDescriptor>
1208+
llvm::canParallelizeReductionWhenUnrolling(PHINode &Phi, Loop *L,
1209+
ScalarEvolution *SE) {
1210+
RecurrenceDescriptor RedDes;
1211+
if (!RecurrenceDescriptor::isReductionPHI(&Phi, L, RedDes,
1212+
/*DemandedBits=*/nullptr,
1213+
/*AC=*/nullptr, /*DT=*/nullptr, SE))
1214+
return std::nullopt;
1215+
RecurKind RK = RedDes.getRecurrenceKind();
1216+
// Skip unsupported reductions.
1217+
// TODO: Handle additional reductions, including FP and min-max
1218+
// reductions.
1219+
if (!RecurrenceDescriptor::isIntegerRecurrenceKind(RK) ||
1220+
RecurrenceDescriptor::isAnyOfRecurrenceKind(RK) ||
1221+
RecurrenceDescriptor::isFindIVRecurrenceKind(RK) ||
1222+
RecurrenceDescriptor::isMinMaxRecurrenceKind(RK))
1223+
return std::nullopt;
1224+
1225+
// Don't unroll reductions with constant ops; those can be folded to a
1226+
// single induction update.
1227+
if (any_of(cast<Instruction>(Phi.getIncomingValueForBlock(L->getLoopLatch()))
1228+
->operands(),
1229+
IsaPred<Constant>))
1230+
return std::nullopt;
1231+
1232+
BasicBlock *Latch = L->getLoopLatch();
1233+
if (!Latch ||
1234+
!is_contained(
1235+
cast<Instruction>(Phi.getIncomingValueForBlock(Latch))->operands(),
1236+
&Phi))
1237+
return std::nullopt;
1238+
1239+
return RedDes;
1240+
}

llvm/test/Transforms/LoopUnroll/AArch64/apple-unrolling.ll

Lines changed: 31 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -603,27 +603,33 @@ define i32 @test_add_reduction_unroll_partial(ptr %a, i64 noundef %n) {
603603
; OTHER-NEXT: br label %[[LOOP:.*]]
604604
; OTHER: [[LOOP]]:
605605
; OTHER-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ]
606-
; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ]
606+
; OTHER-NEXT: [[RDX_1:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ]
607+
; OTHER-NEXT: [[RDX_2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_2:%.*]], %[[LOOP]] ]
608+
; OTHER-NEXT: [[RDX_3:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ]
609+
; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ]
607610
; OTHER-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]]
608611
; OTHER-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_A]], align 2
609-
; OTHER-NEXT: [[RDX_NEXT:%.*]] = add nuw nsw i32 [[RDX]], [[TMP0]]
612+
; OTHER-NEXT: [[RDX_NEXT]] = add nuw nsw i32 [[RDX]], [[TMP0]]
610613
; OTHER-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1
611614
; OTHER-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT]]
612615
; OTHER-NEXT: [[TMP1:%.*]] = load i32, ptr [[GEP_A_1]], align 2
613-
; OTHER-NEXT: [[RDX_2:%.*]] = add nuw nsw i32 [[RDX_NEXT]], [[TMP1]]
616+
; OTHER-NEXT: [[RDX_NEXT_1]] = add nuw nsw i32 [[RDX_1]], [[TMP1]]
614617
; OTHER-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2
615618
; OTHER-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_1]]
616619
; OTHER-NEXT: [[TMP2:%.*]] = load i32, ptr [[GEP_A_2]], align 2
617-
; OTHER-NEXT: [[RDX_NEXT_2:%.*]] = add nuw nsw i32 [[RDX_2]], [[TMP2]]
620+
; OTHER-NEXT: [[RDX_NEXT_2]] = add nuw nsw i32 [[RDX_2]], [[TMP2]]
618621
; OTHER-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3
619622
; OTHER-NEXT: [[GEP_A_3:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_2]]
620623
; OTHER-NEXT: [[TMP3:%.*]] = load i32, ptr [[GEP_A_3]], align 2
621-
; OTHER-NEXT: [[RDX_NEXT_3]] = add nuw nsw i32 [[RDX_NEXT_2]], [[TMP3]]
624+
; OTHER-NEXT: [[RDX_NEXT_3]] = add nuw nsw i32 [[RDX_3]], [[TMP3]]
622625
; OTHER-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4
623626
; OTHER-NEXT: [[EC_3:%.*]] = icmp eq i64 [[IV_NEXT_3]], 1024
624627
; OTHER-NEXT: br i1 [[EC_3]], label %[[EXIT:.*]], label %[[LOOP]]
625628
; OTHER: [[EXIT]]:
626-
; OTHER-NEXT: [[BIN_RDX2:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ]
629+
; OTHER-NEXT: [[RES:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ]
630+
; OTHER-NEXT: [[BIN_RDX:%.*]] = add i32 [[RDX_NEXT_1]], [[RDX_NEXT]]
631+
; OTHER-NEXT: [[BIN_RDX1:%.*]] = add i32 [[RDX_NEXT_2]], [[BIN_RDX]]
632+
; OTHER-NEXT: [[BIN_RDX2:%.*]] = add i32 [[RDX_NEXT_3]], [[BIN_RDX1]]
627633
; OTHER-NEXT: ret i32 [[BIN_RDX2]]
628634
;
629635
entry:
@@ -747,23 +753,25 @@ define i32 @test_add_and_mul_reduction_unroll_partial(ptr %a, i64 noundef %n) {
747753
; OTHER-NEXT: br label %[[LOOP:.*]]
748754
; OTHER: [[LOOP]]:
749755
; OTHER-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_1:%.*]], %[[LOOP]] ]
750-
; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ]
756+
; OTHER-NEXT: [[RDX_1:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ]
757+
; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ]
751758
; OTHER-NEXT: [[RDX_2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_2_NEXT_1:%.*]], %[[LOOP]] ]
752759
; OTHER-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]]
753760
; OTHER-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_A]], align 2
754-
; OTHER-NEXT: [[RDX_NEXT:%.*]] = add nuw nsw i32 [[RDX]], [[TMP0]]
761+
; OTHER-NEXT: [[RDX_NEXT]] = add nuw nsw i32 [[RDX]], [[TMP0]]
755762
; OTHER-NEXT: [[RDX_2_NEXT:%.*]] = mul i32 [[RDX_2]], [[TMP0]]
756763
; OTHER-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1
757764
; OTHER-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT]]
758765
; OTHER-NEXT: [[TMP1:%.*]] = load i32, ptr [[GEP_A_1]], align 2
759-
; OTHER-NEXT: [[RDX_NEXT_1]] = add nuw nsw i32 [[RDX_NEXT]], [[TMP1]]
766+
; OTHER-NEXT: [[RDX_NEXT_1]] = add nuw nsw i32 [[RDX_1]], [[TMP1]]
760767
; OTHER-NEXT: [[RDX_2_NEXT_1]] = mul i32 [[RDX_2_NEXT]], [[TMP1]]
761768
; OTHER-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2
762769
; OTHER-NEXT: [[EC_1:%.*]] = icmp eq i64 [[IV_NEXT_1]], 1024
763770
; OTHER-NEXT: br i1 [[EC_1]], label %[[EXIT:.*]], label %[[LOOP]]
764771
; OTHER: [[EXIT]]:
765-
; OTHER-NEXT: [[BIN_RDX:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ]
772+
; OTHER-NEXT: [[RES_1:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ]
766773
; OTHER-NEXT: [[RES_2:%.*]] = phi i32 [ [[RDX_2_NEXT_1]], %[[LOOP]] ]
774+
; OTHER-NEXT: [[BIN_RDX:%.*]] = add i32 [[RDX_NEXT_1]], [[RDX_NEXT]]
767775
; OTHER-NEXT: [[SUM:%.*]] = add i32 [[BIN_RDX]], [[RES_2]]
768776
; OTHER-NEXT: ret i32 [[SUM]]
769777
;
@@ -820,23 +828,26 @@ define i32 @test_add_reduction_runtime(ptr %a, i64 noundef %n) {
820828
; OTHER-NEXT: br label %[[LOOP:.*]]
821829
; OTHER: [[LOOP]]:
822830
; OTHER-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ]
823-
; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ]
831+
; OTHER-NEXT: [[RDX_1:%.*]] = phi i32 [ 0, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ]
832+
; OTHER-NEXT: [[RDX_2:%.*]] = phi i32 [ 0, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_2:%.*]], %[[LOOP]] ]
833+
; OTHER-NEXT: [[RDX_3:%.*]] = phi i32 [ 0, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ]
834+
; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY_NEW]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ]
824835
; OTHER-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_3:%.*]], %[[LOOP]] ]
825836
; OTHER-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]]
826837
; OTHER-NEXT: [[TMP2:%.*]] = load i32, ptr [[GEP_A]], align 2
827-
; OTHER-NEXT: [[RDX_NEXT:%.*]] = add nuw nsw i32 [[RDX]], [[TMP2]]
838+
; OTHER-NEXT: [[RDX_NEXT]] = add nuw nsw i32 [[RDX]], [[TMP2]]
828839
; OTHER-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1
829840
; OTHER-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT]]
830841
; OTHER-NEXT: [[TMP3:%.*]] = load i32, ptr [[GEP_A_1]], align 2
831-
; OTHER-NEXT: [[RDX_2:%.*]] = add nuw nsw i32 [[RDX_NEXT]], [[TMP3]]
842+
; OTHER-NEXT: [[RDX_NEXT_1]] = add nuw nsw i32 [[RDX_1]], [[TMP3]]
832843
; OTHER-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2
833844
; OTHER-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_1]]
834845
; OTHER-NEXT: [[TMP4:%.*]] = load i32, ptr [[GEP_A_2]], align 2
835-
; OTHER-NEXT: [[RDX_NEXT_2:%.*]] = add nuw nsw i32 [[RDX_2]], [[TMP4]]
846+
; OTHER-NEXT: [[RDX_NEXT_2]] = add nuw nsw i32 [[RDX_2]], [[TMP4]]
836847
; OTHER-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3
837848
; OTHER-NEXT: [[GEP_A_3:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_2]]
838849
; OTHER-NEXT: [[TMP5:%.*]] = load i32, ptr [[GEP_A_3]], align 2
839-
; OTHER-NEXT: [[RDX_NEXT_3]] = add nuw nsw i32 [[RDX_NEXT_2]], [[TMP5]]
850+
; OTHER-NEXT: [[RDX_NEXT_3]] = add nuw nsw i32 [[RDX_3]], [[TMP5]]
840851
; OTHER-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4
841852
; OTHER-NEXT: [[NITER_NEXT_3]] = add i64 [[NITER]], 4
842853
; OTHER-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i64 [[NITER_NEXT_3]], [[UNROLL_ITER]]
@@ -845,11 +856,14 @@ define i32 @test_add_reduction_runtime(ptr %a, i64 noundef %n) {
845856
; OTHER-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ]
846857
; OTHER-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[LOOP]] ]
847858
; OTHER-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ]
859+
; OTHER-NEXT: [[BIN_RDX:%.*]] = add i32 [[RDX_NEXT_1]], [[RDX_NEXT]]
860+
; OTHER-NEXT: [[BIN_RDX2:%.*]] = add i32 [[RDX_NEXT_2]], [[BIN_RDX]]
861+
; OTHER-NEXT: [[BIN_RDX3:%.*]] = add i32 [[RDX_NEXT_3]], [[BIN_RDX2]]
848862
; OTHER-NEXT: br label %[[EXIT_UNR_LCSSA]]
849863
; OTHER: [[EXIT_UNR_LCSSA]]:
850-
; OTHER-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[RES_PH_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ]
864+
; OTHER-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[BIN_RDX3]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ]
851865
; OTHER-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ]
852-
; OTHER-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ]
866+
; OTHER-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[BIN_RDX3]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ]
853867
; OTHER-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
854868
; OTHER-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]]
855869
; OTHER: [[LOOP_EPIL_PREHEADER]]:

0 commit comments

Comments
 (0)