| File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp |
| Warning: | line 198, column 28 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===----------------- LoopRotationUtils.cpp -----------------------------===// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | // | |||
| 9 | // This file provides utilities to convert a loop into a loop with bottom test. | |||
| 10 | // | |||
| 11 | //===----------------------------------------------------------------------===// | |||
| 12 | ||||
| 13 | #include "llvm/Transforms/Utils/LoopRotationUtils.h" | |||
| 14 | #include "llvm/ADT/Statistic.h" | |||
| 15 | #include "llvm/Analysis/AssumptionCache.h" | |||
| 16 | #include "llvm/Analysis/BasicAliasAnalysis.h" | |||
| 17 | #include "llvm/Analysis/CodeMetrics.h" | |||
| 18 | #include "llvm/Analysis/DomTreeUpdater.h" | |||
| 19 | #include "llvm/Analysis/GlobalsModRef.h" | |||
| 20 | #include "llvm/Analysis/InstructionSimplify.h" | |||
| 21 | #include "llvm/Analysis/LoopPass.h" | |||
| 22 | #include "llvm/Analysis/MemorySSA.h" | |||
| 23 | #include "llvm/Analysis/MemorySSAUpdater.h" | |||
| 24 | #include "llvm/Analysis/ScalarEvolution.h" | |||
| 25 | #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" | |||
| 26 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
| 27 | #include "llvm/Analysis/ValueTracking.h" | |||
| 28 | #include "llvm/IR/CFG.h" | |||
| 29 | #include "llvm/IR/DebugInfo.h" | |||
| 30 | #include "llvm/IR/Dominators.h" | |||
| 31 | #include "llvm/IR/Function.h" | |||
| 32 | #include "llvm/IR/IntrinsicInst.h" | |||
| 33 | #include "llvm/IR/Module.h" | |||
| 34 | #include "llvm/Support/CommandLine.h" | |||
| 35 | #include "llvm/Support/Debug.h" | |||
| 36 | #include "llvm/Support/raw_ostream.h" | |||
| 37 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
| 38 | #include "llvm/Transforms/Utils/Cloning.h" | |||
| 39 | #include "llvm/Transforms/Utils/Local.h" | |||
| 40 | #include "llvm/Transforms/Utils/LoopUtils.h" | |||
| 41 | #include "llvm/Transforms/Utils/SSAUpdater.h" | |||
| 42 | #include "llvm/Transforms/Utils/ValueMapper.h" | |||
| 43 | using namespace llvm; | |||
| 44 | ||||
| 45 | #define DEBUG_TYPE"loop-rotate" "loop-rotate" | |||
| 46 | ||||
| 47 | STATISTIC(NumNotRotatedDueToHeaderSize,static llvm::Statistic NumNotRotatedDueToHeaderSize = {"loop-rotate" , "NumNotRotatedDueToHeaderSize", "Number of loops not rotated due to the header size" } | |||
| 48 | "Number of loops not rotated due to the header size")static llvm::Statistic NumNotRotatedDueToHeaderSize = {"loop-rotate" , "NumNotRotatedDueToHeaderSize", "Number of loops not rotated due to the header size" }; | |||
| 49 | STATISTIC(NumInstrsHoisted,static llvm::Statistic NumInstrsHoisted = {"loop-rotate", "NumInstrsHoisted" , "Number of instructions hoisted into loop preheader"} | |||
| 50 | "Number of instructions hoisted into loop preheader")static llvm::Statistic NumInstrsHoisted = {"loop-rotate", "NumInstrsHoisted" , "Number of instructions hoisted into loop preheader"}; | |||
| 51 | STATISTIC(NumInstrsDuplicated,static llvm::Statistic NumInstrsDuplicated = {"loop-rotate", "NumInstrsDuplicated" , "Number of instructions cloned into loop preheader"} | |||
| 52 | "Number of instructions cloned into loop preheader")static llvm::Statistic NumInstrsDuplicated = {"loop-rotate", "NumInstrsDuplicated" , "Number of instructions cloned into loop preheader"}; | |||
| 53 | STATISTIC(NumRotated, "Number of loops rotated")static llvm::Statistic NumRotated = {"loop-rotate", "NumRotated" , "Number of loops rotated"}; | |||
| 54 | ||||
| 55 | static cl::opt<bool> | |||
| 56 | MultiRotate("loop-rotate-multi", cl::init(false), cl::Hidden, | |||
| 57 | cl::desc("Allow loop rotation multiple times in order to reach " | |||
| 58 | "a better latch exit")); | |||
| 59 | ||||
| 60 | namespace { | |||
| 61 | /// A simple loop rotation transformation. | |||
| 62 | class LoopRotate { | |||
| 63 | const unsigned MaxHeaderSize; | |||
| 64 | LoopInfo *LI; | |||
| 65 | const TargetTransformInfo *TTI; | |||
| 66 | AssumptionCache *AC; | |||
| 67 | DominatorTree *DT; | |||
| 68 | ScalarEvolution *SE; | |||
| 69 | MemorySSAUpdater *MSSAU; | |||
| 70 | const SimplifyQuery &SQ; | |||
| 71 | bool RotationOnly; | |||
| 72 | bool IsUtilMode; | |||
| 73 | bool PrepareForLTO; | |||
| 74 | ||||
| 75 | public: | |||
| 76 | LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI, | |||
| 77 | const TargetTransformInfo *TTI, AssumptionCache *AC, | |||
| 78 | DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU, | |||
| 79 | const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode, | |||
| 80 | bool PrepareForLTO) | |||
| 81 | : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE), | |||
| 82 | MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly), | |||
| 83 | IsUtilMode(IsUtilMode), PrepareForLTO(PrepareForLTO) {} | |||
| 84 | bool processLoop(Loop *L); | |||
| 85 | ||||
| 86 | private: | |||
| 87 | bool rotateLoop(Loop *L, bool SimplifiedLatch); | |||
| 88 | bool simplifyLoopLatch(Loop *L); | |||
| 89 | }; | |||
| 90 | } // end anonymous namespace | |||
| 91 | ||||
| 92 | /// Insert (K, V) pair into the ValueToValueMap, and verify the key did not | |||
| 93 | /// previously exist in the map, and the value was inserted. | |||
| 94 | static void InsertNewValueIntoMap(ValueToValueMapTy &VM, Value *K, Value *V) { | |||
| 95 | bool Inserted = VM.insert({K, V}).second; | |||
| 96 | assert(Inserted)((void)0); | |||
| 97 | (void)Inserted; | |||
| 98 | } | |||
| 99 | /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the | |||
| 100 | /// old header into the preheader. If there were uses of the values produced by | |||
| 101 | /// these instruction that were outside of the loop, we have to insert PHI nodes | |||
| 102 | /// to merge the two values. Do this now. | |||
| 103 | static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, | |||
| 104 | BasicBlock *OrigPreheader, | |||
| 105 | ValueToValueMapTy &ValueMap, | |||
| 106 | SmallVectorImpl<PHINode*> *InsertedPHIs) { | |||
| 107 | // Remove PHI node entries that are no longer live. | |||
| 108 | BasicBlock::iterator I, E = OrigHeader->end(); | |||
| 109 | for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) | |||
| 110 | PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader)); | |||
| 111 | ||||
| 112 | // Now fix up users of the instructions in OrigHeader, inserting PHI nodes | |||
| 113 | // as necessary. | |||
| 114 | SSAUpdater SSA(InsertedPHIs); | |||
| 115 | for (I = OrigHeader->begin(); I != E; ++I) { | |||
| 116 | Value *OrigHeaderVal = &*I; | |||
| 117 | ||||
| 118 | // If there are no uses of the value (e.g. because it returns void), there | |||
| 119 | // is nothing to rewrite. | |||
| 120 | if (OrigHeaderVal->use_empty()) | |||
| 121 | continue; | |||
| 122 | ||||
| 123 | Value *OrigPreHeaderVal = ValueMap.lookup(OrigHeaderVal); | |||
| 124 | ||||
| 125 | // The value now exits in two versions: the initial value in the preheader | |||
| 126 | // and the loop "next" value in the original header. | |||
| 127 | SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName()); | |||
| 128 | SSA.AddAvailableValue(OrigHeader, OrigHeaderVal); | |||
| 129 | SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal); | |||
| 130 | ||||
| 131 | // Visit each use of the OrigHeader instruction. | |||
| 132 | for (Value::use_iterator UI = OrigHeaderVal->use_begin(), | |||
| 133 | UE = OrigHeaderVal->use_end(); | |||
| 134 | UI != UE;) { | |||
| 135 | // Grab the use before incrementing the iterator. | |||
| 136 | Use &U = *UI; | |||
| 137 | ||||
| 138 | // Increment the iterator before removing the use from the list. | |||
| 139 | ++UI; | |||
| 140 | ||||
| 141 | // SSAUpdater can't handle a non-PHI use in the same block as an | |||
| 142 | // earlier def. We can easily handle those cases manually. | |||
| 143 | Instruction *UserInst = cast<Instruction>(U.getUser()); | |||
| 144 | if (!isa<PHINode>(UserInst)) { | |||
| 145 | BasicBlock *UserBB = UserInst->getParent(); | |||
| 146 | ||||
| 147 | // The original users in the OrigHeader are already using the | |||
| 148 | // original definitions. | |||
| 149 | if (UserBB == OrigHeader) | |||
| 150 | continue; | |||
| 151 | ||||
| 152 | // Users in the OrigPreHeader need to use the value to which the | |||
| 153 | // original definitions are mapped. | |||
| 154 | if (UserBB == OrigPreheader) { | |||
| 155 | U = OrigPreHeaderVal; | |||
| 156 | continue; | |||
| 157 | } | |||
| 158 | } | |||
| 159 | ||||
| 160 | // Anything else can be handled by SSAUpdater. | |||
| 161 | SSA.RewriteUse(U); | |||
| 162 | } | |||
| 163 | ||||
| 164 | // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug | |||
| 165 | // intrinsics. | |||
| 166 | SmallVector<DbgValueInst *, 1> DbgValues; | |||
| 167 | llvm::findDbgValues(DbgValues, OrigHeaderVal); | |||
| 168 | for (auto &DbgValue : DbgValues) { | |||
| 169 | // The original users in the OrigHeader are already using the original | |||
| 170 | // definitions. | |||
| 171 | BasicBlock *UserBB = DbgValue->getParent(); | |||
| 172 | if (UserBB == OrigHeader) | |||
| 173 | continue; | |||
| 174 | ||||
| 175 | // Users in the OrigPreHeader need to use the value to which the | |||
| 176 | // original definitions are mapped and anything else can be handled by | |||
| 177 | // the SSAUpdater. To avoid adding PHINodes, check if the value is | |||
| 178 | // available in UserBB, if not substitute undef. | |||
| 179 | Value *NewVal; | |||
| 180 | if (UserBB == OrigPreheader) | |||
| 181 | NewVal = OrigPreHeaderVal; | |||
| 182 | else if (SSA.HasValueForBlock(UserBB)) | |||
| 183 | NewVal = SSA.GetValueInMiddleOfBlock(UserBB); | |||
| 184 | else | |||
| 185 | NewVal = UndefValue::get(OrigHeaderVal->getType()); | |||
| 186 | DbgValue->replaceVariableLocationOp(OrigHeaderVal, NewVal); | |||
| 187 | } | |||
| 188 | } | |||
| 189 | } | |||
| 190 | ||||
| 191 | // Assuming both header and latch are exiting, look for a phi which is only | |||
| 192 | // used outside the loop (via a LCSSA phi) in the exit from the header. | |||
| 193 | // This means that rotating the loop can remove the phi. | |||
| 194 | static bool profitableToRotateLoopExitingLatch(Loop *L) { | |||
| 195 | BasicBlock *Header = L->getHeader(); | |||
| 196 | BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator()); | |||
| 197 | assert(BI && BI->isConditional() && "need header with conditional exit")((void)0); | |||
| 198 | BasicBlock *HeaderExit = BI->getSuccessor(0); | |||
| ||||
| 199 | if (L->contains(HeaderExit)) | |||
| 200 | HeaderExit = BI->getSuccessor(1); | |||
| 201 | ||||
| 202 | for (auto &Phi : Header->phis()) { | |||
| 203 | // Look for uses of this phi in the loop/via exits other than the header. | |||
| 204 | if (llvm::any_of(Phi.users(), [HeaderExit](const User *U) { | |||
| 205 | return cast<Instruction>(U)->getParent() != HeaderExit; | |||
| 206 | })) | |||
| 207 | continue; | |||
| 208 | return true; | |||
| 209 | } | |||
| 210 | return false; | |||
| 211 | } | |||
| 212 | ||||
| 213 | // Check that latch exit is deoptimizing (which means - very unlikely to happen) | |||
| 214 | // and there is another exit from the loop which is non-deoptimizing. | |||
| 215 | // If we rotate latch to that exit our loop has a better chance of being fully | |||
| 216 | // canonical. | |||
| 217 | // | |||
| 218 | // It can give false positives in some rare cases. | |||
| 219 | static bool canRotateDeoptimizingLatchExit(Loop *L) { | |||
| 220 | BasicBlock *Latch = L->getLoopLatch(); | |||
| 221 | assert(Latch && "need latch")((void)0); | |||
| 222 | BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); | |||
| 223 | // Need normal exiting latch. | |||
| 224 | if (!BI || !BI->isConditional()) | |||
| 225 | return false; | |||
| 226 | ||||
| 227 | BasicBlock *Exit = BI->getSuccessor(1); | |||
| 228 | if (L->contains(Exit)) | |||
| 229 | Exit = BI->getSuccessor(0); | |||
| 230 | ||||
| 231 | // Latch exit is non-deoptimizing, no need to rotate. | |||
| 232 | if (!Exit->getPostdominatingDeoptimizeCall()) | |||
| 233 | return false; | |||
| 234 | ||||
| 235 | SmallVector<BasicBlock *, 4> Exits; | |||
| 236 | L->getUniqueExitBlocks(Exits); | |||
| 237 | if (!Exits.empty()) { | |||
| 238 | // There is at least one non-deoptimizing exit. | |||
| 239 | // | |||
| 240 | // Note, that BasicBlock::getPostdominatingDeoptimizeCall is not exact, | |||
| 241 | // as it can conservatively return false for deoptimizing exits with | |||
| 242 | // complex enough control flow down to deoptimize call. | |||
| 243 | // | |||
| 244 | // That means here we can report success for a case where | |||
| 245 | // all exits are deoptimizing but one of them has complex enough | |||
| 246 | // control flow (e.g. with loops). | |||
| 247 | // | |||
| 248 | // That should be a very rare case and false positives for this function | |||
| 249 | // have compile-time effect only. | |||
| 250 | return any_of(Exits, [](const BasicBlock *BB) { | |||
| 251 | return !BB->getPostdominatingDeoptimizeCall(); | |||
| 252 | }); | |||
| 253 | } | |||
| 254 | return false; | |||
| 255 | } | |||
| 256 | ||||
| 257 | /// Rotate loop LP. Return true if the loop is rotated. | |||
| 258 | /// | |||
| 259 | /// \param SimplifiedLatch is true if the latch was just folded into the final | |||
| 260 | /// loop exit. In this case we may want to rotate even though the new latch is | |||
| 261 | /// now an exiting branch. This rotation would have happened had the latch not | |||
| 262 | /// been simplified. However, if SimplifiedLatch is false, then we avoid | |||
| 263 | /// rotating loops in which the latch exits to avoid excessive or endless | |||
| 264 | /// rotation. LoopRotate should be repeatable and converge to a canonical | |||
| 265 | /// form. This property is satisfied because simplifying the loop latch can only | |||
| 266 | /// happen once across multiple invocations of the LoopRotate pass. | |||
| 267 | /// | |||
| 268 | /// If -loop-rotate-multi is enabled we can do multiple rotations in one go | |||
| 269 | /// so to reach a suitable (non-deoptimizing) exit. | |||
| 270 | bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { | |||
| 271 | // If the loop has only one block then there is not much to rotate. | |||
| 272 | if (L->getBlocks().size() == 1) | |||
| ||||
| 273 | return false; | |||
| 274 | ||||
| 275 | bool Rotated = false; | |||
| 276 | do { | |||
| 277 | BasicBlock *OrigHeader = L->getHeader(); | |||
| 278 | BasicBlock *OrigLatch = L->getLoopLatch(); | |||
| 279 | ||||
| 280 | BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator()); | |||
| 281 | if (!BI
| |||
| 282 | return Rotated; | |||
| 283 | ||||
| 284 | // If the loop header is not one of the loop exiting blocks then | |||
| 285 | // either this loop is already rotated or it is not | |||
| 286 | // suitable for loop rotation transformations. | |||
| 287 | if (!L->isLoopExiting(OrigHeader)) | |||
| 288 | return Rotated; | |||
| 289 | ||||
| 290 | // If the loop latch already contains a branch that leaves the loop then the | |||
| 291 | // loop is already rotated. | |||
| 292 | if (!OrigLatch) | |||
| 293 | return Rotated; | |||
| 294 | ||||
| 295 | // Rotate if either the loop latch does *not* exit the loop, or if the loop | |||
| 296 | // latch was just simplified. Or if we think it will be profitable. | |||
| 297 | if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch && IsUtilMode == false && | |||
| 298 | !profitableToRotateLoopExitingLatch(L) && | |||
| 299 | !canRotateDeoptimizingLatchExit(L)) | |||
| 300 | return Rotated; | |||
| 301 | ||||
| 302 | // Check size of original header and reject loop if it is very big or we can't | |||
| 303 | // duplicate blocks inside it. | |||
| 304 | { | |||
| 305 | SmallPtrSet<const Value *, 32> EphValues; | |||
| 306 | CodeMetrics::collectEphemeralValues(L, AC, EphValues); | |||
| 307 | ||||
| 308 | CodeMetrics Metrics; | |||
| 309 | Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues, PrepareForLTO); | |||
| 310 | if (Metrics.notDuplicatable) { | |||
| 311 | LLVM_DEBUG(do { } while (false) | |||
| 312 | dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable"do { } while (false) | |||
| 313 | << " instructions: ";do { } while (false) | |||
| 314 | L->dump())do { } while (false); | |||
| 315 | return Rotated; | |||
| 316 | } | |||
| 317 | if (Metrics.convergent) { | |||
| 318 | LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent "do { } while (false) | |||
| 319 | "instructions: ";do { } while (false) | |||
| 320 | L->dump())do { } while (false); | |||
| 321 | return Rotated; | |||
| 322 | } | |||
| 323 | if (Metrics.NumInsts > MaxHeaderSize) { | |||
| 324 | LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains "do { } while (false) | |||
| 325 | << Metrics.NumInstsdo { } while (false) | |||
| 326 | << " instructions, which is more than the threshold ("do { } while (false) | |||
| 327 | << MaxHeaderSize << " instructions): ";do { } while (false) | |||
| 328 | L->dump())do { } while (false); | |||
| 329 | ++NumNotRotatedDueToHeaderSize; | |||
| 330 | return Rotated; | |||
| 331 | } | |||
| 332 | ||||
| 333 | // When preparing for LTO, avoid rotating loops with calls that could be | |||
| 334 | // inlined during the LTO stage. | |||
| 335 | if (PrepareForLTO && Metrics.NumInlineCandidates > 0) | |||
| 336 | return Rotated; | |||
| 337 | } | |||
| 338 | ||||
| 339 | // Now, this loop is suitable for rotation. | |||
| 340 | BasicBlock *OrigPreheader = L->getLoopPreheader(); | |||
| 341 | ||||
| 342 | // If the loop could not be converted to canonical form, it must have an | |||
| 343 | // indirectbr in it, just give up. | |||
| 344 | if (!OrigPreheader || !L->hasDedicatedExits()) | |||
| 345 | return Rotated; | |||
| 346 | ||||
| 347 | // Anything ScalarEvolution may know about this loop or the PHI nodes | |||
| 348 | // in its header will soon be invalidated. We should also invalidate | |||
| 349 | // all outer loops because insertion and deletion of blocks that happens | |||
| 350 | // during the rotation may violate invariants related to backedge taken | |||
| 351 | // infos in them. | |||
| 352 | if (SE) | |||
| 353 | SE->forgetTopmostLoop(L); | |||
| 354 | ||||
| 355 | LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump())do { } while (false); | |||
| 356 | if (MSSAU && VerifyMemorySSA) | |||
| 357 | MSSAU->getMemorySSA()->verifyMemorySSA(); | |||
| 358 | ||||
| 359 | // Find new Loop header. NewHeader is a Header's one and only successor | |||
| 360 | // that is inside loop. Header's other successor is outside the | |||
| 361 | // loop. Otherwise loop is not suitable for rotation. | |||
| 362 | BasicBlock *Exit = BI->getSuccessor(0); | |||
| 363 | BasicBlock *NewHeader = BI->getSuccessor(1); | |||
| 364 | if (L->contains(Exit)) | |||
| 365 | std::swap(Exit, NewHeader); | |||
| 366 | assert(NewHeader && "Unable to determine new loop header")((void)0); | |||
| 367 | assert(L->contains(NewHeader) && !L->contains(Exit) &&((void)0) | |||
| 368 | "Unable to determine loop header and exit blocks")((void)0); | |||
| 369 | ||||
| 370 | // This code assumes that the new header has exactly one predecessor. | |||
| 371 | // Remove any single-entry PHI nodes in it. | |||
| 372 | assert(NewHeader->getSinglePredecessor() &&((void)0) | |||
| 373 | "New header doesn't have one pred!")((void)0); | |||
| 374 | FoldSingleEntryPHINodes(NewHeader); | |||
| 375 | ||||
| 376 | // Begin by walking OrigHeader and populating ValueMap with an entry for | |||
| 377 | // each Instruction. | |||
| 378 | BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end(); | |||
| 379 | ValueToValueMapTy ValueMap, ValueMapMSSA; | |||
| 380 | ||||
| 381 | // For PHI nodes, the value available in OldPreHeader is just the | |||
| 382 | // incoming value from OldPreHeader. | |||
| 383 | for (; PHINode *PN = dyn_cast<PHINode>(I); ++I) | |||
| 384 | InsertNewValueIntoMap(ValueMap, PN, | |||
| 385 | PN->getIncomingValueForBlock(OrigPreheader)); | |||
| 386 | ||||
| 387 | // For the rest of the instructions, either hoist to the OrigPreheader if | |||
| 388 | // possible or create a clone in the OldPreHeader if not. | |||
| 389 | Instruction *LoopEntryBranch = OrigPreheader->getTerminator(); | |||
| 390 | ||||
| 391 | // Record all debug intrinsics preceding LoopEntryBranch to avoid | |||
| 392 | // duplication. | |||
| 393 | using DbgIntrinsicHash = | |||
| 394 | std::pair<std::pair<hash_code, DILocalVariable *>, DIExpression *>; | |||
| 395 | auto makeHash = [](DbgVariableIntrinsic *D) -> DbgIntrinsicHash { | |||
| 396 | auto VarLocOps = D->location_ops(); | |||
| 397 | return {{hash_combine_range(VarLocOps.begin(), VarLocOps.end()), | |||
| 398 | D->getVariable()}, | |||
| 399 | D->getExpression()}; | |||
| 400 | }; | |||
| 401 | SmallDenseSet<DbgIntrinsicHash, 8> DbgIntrinsics; | |||
| 402 | for (auto I = std::next(OrigPreheader->rbegin()), E = OrigPreheader->rend(); | |||
| 403 | I != E; ++I) { | |||
| 404 | if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&*I)) | |||
| 405 | DbgIntrinsics.insert(makeHash(DII)); | |||
| 406 | else | |||
| 407 | break; | |||
| 408 | } | |||
| 409 | ||||
| 410 | // Remember the local noalias scope declarations in the header. After the | |||
| 411 | // rotation, they must be duplicated and the scope must be cloned. This | |||
| 412 | // avoids unwanted interaction across iterations. | |||
| 413 | SmallVector<NoAliasScopeDeclInst *, 6> NoAliasDeclInstructions; | |||
| 414 | for (Instruction &I : *OrigHeader) | |||
| 415 | if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) | |||
| 416 | NoAliasDeclInstructions.push_back(Decl); | |||
| 417 | ||||
| 418 | while (I != E) { | |||
| 419 | Instruction *Inst = &*I++; | |||
| 420 | ||||
| 421 | // If the instruction's operands are invariant and it doesn't read or write | |||
| 422 | // memory, then it is safe to hoist. Doing this doesn't change the order of | |||
| 423 | // execution in the preheader, but does prevent the instruction from | |||
| 424 | // executing in each iteration of the loop. This means it is safe to hoist | |||
| 425 | // something that might trap, but isn't safe to hoist something that reads | |||
| 426 | // memory (without proving that the loop doesn't write). | |||
| 427 | if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() && | |||
| 428 | !Inst->mayWriteToMemory() && !Inst->isTerminator() && | |||
| 429 | !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) { | |||
| 430 | Inst->moveBefore(LoopEntryBranch); | |||
| 431 | ++NumInstrsHoisted; | |||
| 432 | continue; | |||
| 433 | } | |||
| 434 | ||||
| 435 | // Otherwise, create a duplicate of the instruction. | |||
| 436 | Instruction *C = Inst->clone(); | |||
| 437 | ++NumInstrsDuplicated; | |||
| 438 | ||||
| 439 | // Eagerly remap the operands of the instruction. | |||
| 440 | RemapInstruction(C, ValueMap, | |||
| 441 | RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); | |||
| 442 | ||||
| 443 | // Avoid inserting the same intrinsic twice. | |||
| 444 | if (auto *DII = dyn_cast<DbgVariableIntrinsic>(C)) | |||
| 445 | if (DbgIntrinsics.count(makeHash(DII))) { | |||
| 446 | C->deleteValue(); | |||
| 447 | continue; | |||
| 448 | } | |||
| 449 | ||||
| 450 | // With the operands remapped, see if the instruction constant folds or is | |||
| 451 | // otherwise simplifyable. This commonly occurs because the entry from PHI | |||
| 452 | // nodes allows icmps and other instructions to fold. | |||
| 453 | Value *V = SimplifyInstruction(C, SQ); | |||
| 454 | if (V && LI->replacementPreservesLCSSAForm(C, V)) { | |||
| 455 | // If so, then delete the temporary instruction and stick the folded value | |||
| 456 | // in the map. | |||
| 457 | InsertNewValueIntoMap(ValueMap, Inst, V); | |||
| 458 | if (!C->mayHaveSideEffects()) { | |||
| 459 | C->deleteValue(); | |||
| 460 | C = nullptr; | |||
| 461 | } | |||
| 462 | } else { | |||
| 463 | InsertNewValueIntoMap(ValueMap, Inst, C); | |||
| 464 | } | |||
| 465 | if (C) { | |||
| 466 | // Otherwise, stick the new instruction into the new block! | |||
| 467 | C->setName(Inst->getName()); | |||
| 468 | C->insertBefore(LoopEntryBranch); | |||
| 469 | ||||
| 470 | if (auto *II = dyn_cast<AssumeInst>(C)) | |||
| 471 | AC->registerAssumption(II); | |||
| 472 | // MemorySSA cares whether the cloned instruction was inserted or not, and | |||
| 473 | // not whether it can be remapped to a simplified value. | |||
| 474 | if (MSSAU) | |||
| 475 | InsertNewValueIntoMap(ValueMapMSSA, Inst, C); | |||
| 476 | } | |||
| 477 | } | |||
| 478 | ||||
| 479 | if (!NoAliasDeclInstructions.empty()) { | |||
| 480 | // There are noalias scope declarations: | |||
| 481 | // (general): | |||
| 482 | // Original: OrigPre { OrigHeader NewHeader ... Latch } | |||
| 483 | // after: (OrigPre+OrigHeader') { NewHeader ... Latch OrigHeader } | |||
| 484 | // | |||
| 485 | // with D: llvm.experimental.noalias.scope.decl, | |||
| 486 | // U: !noalias or !alias.scope depending on D | |||
| 487 | // ... { D U1 U2 } can transform into: | |||
| 488 | // (0) : ... { D U1 U2 } // no relevant rotation for this part | |||
| 489 | // (1) : ... D' { U1 U2 D } // D is part of OrigHeader | |||
| 490 | // (2) : ... D' U1' { U2 D U1 } // D, U1 are part of OrigHeader | |||
| 491 | // | |||
| 492 | // We now want to transform: | |||
| 493 | // (1) -> : ... D' { D U1 U2 D'' } | |||
| 494 | // (2) -> : ... D' U1' { D U2 D'' U1'' } | |||
| 495 | // D: original llvm.experimental.noalias.scope.decl | |||
| 496 | // D', U1': duplicate with replaced scopes | |||
| 497 | // D'', U1'': different duplicate with replaced scopes | |||
| 498 | // This ensures a safe fallback to 'may_alias' introduced by the rotate, | |||
| 499 | // as U1'' and U1' scopes will not be compatible wrt to the local restrict | |||
| 500 | ||||
| 501 | // Clone the llvm.experimental.noalias.decl again for the NewHeader. | |||
| 502 | Instruction *NewHeaderInsertionPoint = &(*NewHeader->getFirstNonPHI()); | |||
| 503 | for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions) { | |||
| 504 | LLVM_DEBUG(dbgs() << " Cloning llvm.experimental.noalias.scope.decl:"do { } while (false) | |||
| 505 | << *NAD << "\n")do { } while (false); | |||
| 506 | Instruction *NewNAD = NAD->clone(); | |||
| 507 | NewNAD->insertBefore(NewHeaderInsertionPoint); | |||
| 508 | } | |||
| 509 | ||||
| 510 | // Scopes must now be duplicated, once for OrigHeader and once for | |||
| 511 | // OrigPreHeader'. | |||
| 512 | { | |||
| 513 | auto &Context = NewHeader->getContext(); | |||
| 514 | ||||
| 515 | SmallVector<MDNode *, 8> NoAliasDeclScopes; | |||
| 516 | for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions) | |||
| 517 | NoAliasDeclScopes.push_back(NAD->getScopeList()); | |||
| 518 | ||||
| 519 | LLVM_DEBUG(dbgs() << " Updating OrigHeader scopes\n")do { } while (false); | |||
| 520 | cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, {OrigHeader}, Context, | |||
| 521 | "h.rot"); | |||
| 522 | LLVM_DEBUG(OrigHeader->dump())do { } while (false); | |||
| 523 | ||||
| 524 | // Keep the compile time impact low by only adapting the inserted block | |||
| 525 | // of instructions in the OrigPreHeader. This might result in slightly | |||
| 526 | // more aliasing between these instructions and those that were already | |||
| 527 | // present, but it will be much faster when the original PreHeader is | |||
| 528 | // large. | |||
| 529 | LLVM_DEBUG(dbgs() << " Updating part of OrigPreheader scopes\n")do { } while (false); | |||
| 530 | auto *FirstDecl = | |||
| 531 | cast<Instruction>(ValueMap[*NoAliasDeclInstructions.begin()]); | |||
| 532 | auto *LastInst = &OrigPreheader->back(); | |||
| 533 | cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, FirstDecl, LastInst, | |||
| 534 | Context, "pre.rot"); | |||
| 535 | LLVM_DEBUG(OrigPreheader->dump())do { } while (false); | |||
| 536 | ||||
| 537 | LLVM_DEBUG(dbgs() << " Updated NewHeader:\n")do { } while (false); | |||
| 538 | LLVM_DEBUG(NewHeader->dump())do { } while (false); | |||
| 539 | } | |||
| 540 | } | |||
| 541 | ||||
| 542 | // Along with all the other instructions, we just cloned OrigHeader's | |||
| 543 | // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's | |||
| 544 | // successors by duplicating their incoming values for OrigHeader. | |||
| 545 | for (BasicBlock *SuccBB : successors(OrigHeader)) | |||
| 546 | for (BasicBlock::iterator BI = SuccBB->begin(); | |||
| 547 | PHINode *PN = dyn_cast<PHINode>(BI); ++BI) | |||
| 548 | PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader); | |||
| 549 | ||||
| 550 | // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove | |||
| 551 | // OrigPreHeader's old terminator (the original branch into the loop), and | |||
| 552 | // remove the corresponding incoming values from the PHI nodes in OrigHeader. | |||
| 553 | LoopEntryBranch->eraseFromParent(); | |||
| 554 | ||||
| 555 | // Update MemorySSA before the rewrite call below changes the 1:1 | |||
| 556 | // instruction:cloned_instruction_or_value mapping. | |||
| 557 | if (MSSAU) { | |||
| 558 | InsertNewValueIntoMap(ValueMapMSSA, OrigHeader, OrigPreheader); | |||
| 559 | MSSAU->updateForClonedBlockIntoPred(OrigHeader, OrigPreheader, | |||
| 560 | ValueMapMSSA); | |||
| 561 | } | |||
| 562 | ||||
| 563 | SmallVector<PHINode*, 2> InsertedPHIs; | |||
| 564 | // If there were any uses of instructions in the duplicated block outside the | |||
| 565 | // loop, update them, inserting PHI nodes as required | |||
| 566 | RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, | |||
| 567 | &InsertedPHIs); | |||
| 568 | ||||
| 569 | // Attach dbg.value intrinsics to the new phis if that phi uses a value that | |||
| 570 | // previously had debug metadata attached. This keeps the debug info | |||
| 571 | // up-to-date in the loop body. | |||
| 572 | if (!InsertedPHIs.empty()) | |||
| 573 | insertDebugValuesForPHIs(OrigHeader, InsertedPHIs); | |||
| 574 | ||||
| 575 | // NewHeader is now the header of the loop. | |||
| 576 | L->moveToHeader(NewHeader); | |||
| 577 | assert(L->getHeader() == NewHeader && "Latch block is our new header")((void)0); | |||
| 578 | ||||
| 579 | // Inform DT about changes to the CFG. | |||
| 580 | if (DT) { | |||
| 581 | // The OrigPreheader branches to the NewHeader and Exit now. Then, inform | |||
| 582 | // the DT about the removed edge to the OrigHeader (that got removed). | |||
| 583 | SmallVector<DominatorTree::UpdateType, 3> Updates; | |||
| 584 | Updates.push_back({DominatorTree::Insert, OrigPreheader, Exit}); | |||
| 585 | Updates.push_back({DominatorTree::Insert, OrigPreheader, NewHeader}); | |||
| 586 | Updates.push_back({DominatorTree::Delete, OrigPreheader, OrigHeader}); | |||
| 587 | ||||
| 588 | if (MSSAU) { | |||
| 589 | MSSAU->applyUpdates(Updates, *DT, /*UpdateDT=*/true); | |||
| 590 | if (VerifyMemorySSA) | |||
| 591 | MSSAU->getMemorySSA()->verifyMemorySSA(); | |||
| 592 | } else { | |||
| 593 | DT->applyUpdates(Updates); | |||
| 594 | } | |||
| 595 | } | |||
| 596 | ||||
| 597 | // At this point, we've finished our major CFG changes. As part of cloning | |||
| 598 | // the loop into the preheader we've simplified instructions and the | |||
| 599 | // duplicated conditional branch may now be branching on a constant. If it is | |||
| 600 | // branching on a constant and if that constant means that we enter the loop, | |||
| 601 | // then we fold away the cond branch to an uncond branch. This simplifies the | |||
| 602 | // loop in cases important for nested loops, and it also means we don't have | |||
| 603 | // to split as many edges. | |||
| 604 | BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator()); | |||
| 605 | assert(PHBI->isConditional() && "Should be clone of BI condbr!")((void)0); | |||
| 606 | if (!isa<ConstantInt>(PHBI->getCondition()) || | |||
| 607 | PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) != | |||
| 608 | NewHeader) { | |||
| 609 | // The conditional branch can't be folded, handle the general case. | |||
| 610 | // Split edges as necessary to preserve LoopSimplify form. | |||
| 611 | ||||
| 612 | // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and | |||
| 613 | // thus is not a preheader anymore. | |||
| 614 | // Split the edge to form a real preheader. | |||
| 615 | BasicBlock *NewPH = SplitCriticalEdge( | |||
| 616 | OrigPreheader, NewHeader, | |||
| 617 | CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); | |||
| 618 | NewPH->setName(NewHeader->getName() + ".lr.ph"); | |||
| 619 | ||||
| 620 | // Preserve canonical loop form, which means that 'Exit' should have only | |||
| 621 | // one predecessor. Note that Exit could be an exit block for multiple | |||
| 622 | // nested loops, causing both of the edges to now be critical and need to | |||
| 623 | // be split. | |||
| 624 | SmallVector<BasicBlock *, 4> ExitPreds(pred_begin(Exit), pred_end(Exit)); | |||
| 625 | bool SplitLatchEdge = false; | |||
| 626 | for (BasicBlock *ExitPred : ExitPreds) { | |||
| 627 | // We only need to split loop exit edges. | |||
| 628 | Loop *PredLoop = LI->getLoopFor(ExitPred); | |||
| 629 | if (!PredLoop || PredLoop->contains(Exit) || | |||
| 630 | ExitPred->getTerminator()->isIndirectTerminator()) | |||
| 631 | continue; | |||
| 632 | SplitLatchEdge |= L->getLoopLatch() == ExitPred; | |||
| 633 | BasicBlock *ExitSplit = SplitCriticalEdge( | |||
| 634 | ExitPred, Exit, | |||
| 635 | CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); | |||
| 636 | ExitSplit->moveBefore(Exit); | |||
| 637 | } | |||
| 638 | assert(SplitLatchEdge &&((void)0) | |||
| 639 | "Despite splitting all preds, failed to split latch exit?")((void)0); | |||
| 640 | (void)SplitLatchEdge; | |||
| 641 | } else { | |||
| 642 | // We can fold the conditional branch in the preheader, this makes things | |||
| 643 | // simpler. The first step is to remove the extra edge to the Exit block. | |||
| 644 | Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/); | |||
| 645 | BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI); | |||
| 646 | NewBI->setDebugLoc(PHBI->getDebugLoc()); | |||
| 647 | PHBI->eraseFromParent(); | |||
| 648 | ||||
| 649 | // With our CFG finalized, update DomTree if it is available. | |||
| 650 | if (DT) DT->deleteEdge(OrigPreheader, Exit); | |||
| 651 | ||||
| 652 | // Update MSSA too, if available. | |||
| 653 | if (MSSAU) | |||
| 654 | MSSAU->removeEdge(OrigPreheader, Exit); | |||
| 655 | } | |||
| 656 | ||||
| 657 | assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation")((void)0); | |||
| 658 | assert(L->getLoopLatch() && "Invalid loop latch after loop rotation")((void)0); | |||
| 659 | ||||
| 660 | if (MSSAU && VerifyMemorySSA) | |||
| 661 | MSSAU->getMemorySSA()->verifyMemorySSA(); | |||
| 662 | ||||
| 663 | // Now that the CFG and DomTree are in a consistent state again, try to merge | |||
| 664 | // the OrigHeader block into OrigLatch. This will succeed if they are | |||
| 665 | // connected by an unconditional branch. This is just a cleanup so the | |||
| 666 | // emitted code isn't too gross in this common case. | |||
| 667 | DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); | |||
| 668 | BasicBlock *PredBB = OrigHeader->getUniquePredecessor(); | |||
| 669 | bool DidMerge = MergeBlockIntoPredecessor(OrigHeader, &DTU, LI, MSSAU); | |||
| 670 | if (DidMerge) | |||
| 671 | RemoveRedundantDbgInstrs(PredBB); | |||
| 672 | ||||
| 673 | if (MSSAU && VerifyMemorySSA) | |||
| 674 | MSSAU->getMemorySSA()->verifyMemorySSA(); | |||
| 675 | ||||
| 676 | LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump())do { } while (false); | |||
| 677 | ||||
| 678 | ++NumRotated; | |||
| 679 | ||||
| 680 | Rotated = true; | |||
| 681 | SimplifiedLatch = false; | |||
| 682 | ||||
| 683 | // Check that new latch is a deoptimizing exit and then repeat rotation if possible. | |||
| 684 | // Deoptimizing latch exit is not a generally typical case, so we just loop over. | |||
| 685 | // TODO: if it becomes a performance bottleneck extend rotation algorithm | |||
| 686 | // to handle multiple rotations in one go. | |||
| 687 | } while (MultiRotate && canRotateDeoptimizingLatchExit(L)); | |||
| 688 | ||||
| 689 | ||||
| 690 | return true; | |||
| 691 | } | |||
| 692 | ||||
| 693 | /// Determine whether the instructions in this range may be safely and cheaply | |||
| 694 | /// speculated. This is not an important enough situation to develop complex | |||
| 695 | /// heuristics. We handle a single arithmetic instruction along with any type | |||
| 696 | /// conversions. | |||
| 697 | static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, | |||
| 698 | BasicBlock::iterator End, Loop *L) { | |||
| 699 | bool seenIncrement = false; | |||
| 700 | bool MultiExitLoop = false; | |||
| 701 | ||||
| 702 | if (!L->getExitingBlock()) | |||
| 703 | MultiExitLoop = true; | |||
| 704 | ||||
| 705 | for (BasicBlock::iterator I = Begin; I != End; ++I) { | |||
| 706 | ||||
| 707 | if (!isSafeToSpeculativelyExecute(&*I)) | |||
| 708 | return false; | |||
| 709 | ||||
| 710 | if (isa<DbgInfoIntrinsic>(I)) | |||
| 711 | continue; | |||
| 712 | ||||
| 713 | switch (I->getOpcode()) { | |||
| 714 | default: | |||
| 715 | return false; | |||
| 716 | case Instruction::GetElementPtr: | |||
| 717 | // GEPs are cheap if all indices are constant. | |||
| 718 | if (!cast<GEPOperator>(I)->hasAllConstantIndices()) | |||
| 719 | return false; | |||
| 720 | // fall-thru to increment case | |||
| 721 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
| 722 | case Instruction::Add: | |||
| 723 | case Instruction::Sub: | |||
| 724 | case Instruction::And: | |||
| 725 | case Instruction::Or: | |||
| 726 | case Instruction::Xor: | |||
| 727 | case Instruction::Shl: | |||
| 728 | case Instruction::LShr: | |||
| 729 | case Instruction::AShr: { | |||
| 730 | Value *IVOpnd = | |||
| 731 | !isa<Constant>(I->getOperand(0)) | |||
| 732 | ? I->getOperand(0) | |||
| 733 | : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr; | |||
| 734 | if (!IVOpnd) | |||
| 735 | return false; | |||
| 736 | ||||
| 737 | // If increment operand is used outside of the loop, this speculation | |||
| 738 | // could cause extra live range interference. | |||
| 739 | if (MultiExitLoop) { | |||
| 740 | for (User *UseI : IVOpnd->users()) { | |||
| 741 | auto *UserInst = cast<Instruction>(UseI); | |||
| 742 | if (!L->contains(UserInst)) | |||
| 743 | return false; | |||
| 744 | } | |||
| 745 | } | |||
| 746 | ||||
| 747 | if (seenIncrement) | |||
| 748 | return false; | |||
| 749 | seenIncrement = true; | |||
| 750 | break; | |||
| 751 | } | |||
| 752 | case Instruction::Trunc: | |||
| 753 | case Instruction::ZExt: | |||
| 754 | case Instruction::SExt: | |||
| 755 | // ignore type conversions | |||
| 756 | break; | |||
| 757 | } | |||
| 758 | } | |||
| 759 | return true; | |||
| 760 | } | |||
| 761 | ||||
| 762 | /// Fold the loop tail into the loop exit by speculating the loop tail | |||
| 763 | /// instructions. Typically, this is a single post-increment. In the case of a | |||
| 764 | /// simple 2-block loop, hoisting the increment can be much better than | |||
| 765 | /// duplicating the entire loop header. In the case of loops with early exits, | |||
| 766 | /// rotation will not work anyway, but simplifyLoopLatch will put the loop in | |||
| 767 | /// canonical form so downstream passes can handle it. | |||
| 768 | /// | |||
| 769 | /// I don't believe this invalidates SCEV. | |||
| 770 | bool LoopRotate::simplifyLoopLatch(Loop *L) { | |||
| 771 | BasicBlock *Latch = L->getLoopLatch(); | |||
| 772 | if (!Latch || Latch->hasAddressTaken()) | |||
| 773 | return false; | |||
| 774 | ||||
| 775 | BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator()); | |||
| 776 | if (!Jmp || !Jmp->isUnconditional()) | |||
| 777 | return false; | |||
| 778 | ||||
| 779 | BasicBlock *LastExit = Latch->getSinglePredecessor(); | |||
| 780 | if (!LastExit || !L->isLoopExiting(LastExit)) | |||
| 781 | return false; | |||
| 782 | ||||
| 783 | BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator()); | |||
| 784 | if (!BI) | |||
| 785 | return false; | |||
| 786 | ||||
| 787 | if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L)) | |||
| 788 | return false; | |||
| 789 | ||||
| 790 | LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into "do { } while (false) | |||
| 791 | << LastExit->getName() << "\n")do { } while (false); | |||
| 792 | ||||
| 793 | DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); | |||
| 794 | MergeBlockIntoPredecessor(Latch, &DTU, LI, MSSAU, nullptr, | |||
| 795 | /*PredecessorWithTwoSuccessors=*/true); | |||
| 796 | ||||
| 797 | if (MSSAU && VerifyMemorySSA) | |||
| 798 | MSSAU->getMemorySSA()->verifyMemorySSA(); | |||
| 799 | ||||
| 800 | return true; | |||
| 801 | } | |||
| 802 | ||||
| 803 | /// Rotate \c L, and return true if any modification was made. | |||
| 804 | bool LoopRotate::processLoop(Loop *L) { | |||
| 805 | // Save the loop metadata. | |||
| 806 | MDNode *LoopMD = L->getLoopID(); | |||
| 807 | ||||
| 808 | bool SimplifiedLatch = false; | |||
| 809 | ||||
| 810 | // Simplify the loop latch before attempting to rotate the header | |||
| 811 | // upward. Rotation may not be needed if the loop tail can be folded into the | |||
| 812 | // loop exit. | |||
| 813 | if (!RotationOnly) | |||
| 814 | SimplifiedLatch = simplifyLoopLatch(L); | |||
| 815 | ||||
| 816 | bool MadeChange = rotateLoop(L, SimplifiedLatch); | |||
| 817 | assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) &&((void)0) | |||
| 818 | "Loop latch should be exiting after loop-rotate.")((void)0); | |||
| 819 | ||||
| 820 | // Restore the loop metadata. | |||
| 821 | // NB! We presume LoopRotation DOESN'T ADD its own metadata. | |||
| 822 | if ((MadeChange || SimplifiedLatch) && LoopMD) | |||
| 823 | L->setLoopID(LoopMD); | |||
| 824 | ||||
| 825 | return MadeChange || SimplifiedLatch; | |||
| 826 | } | |||
| 827 | ||||
| 828 | ||||
| 829 | /// The utility to convert a loop into a loop with bottom test. | |||
| 830 | bool llvm::LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI, | |||
| 831 | AssumptionCache *AC, DominatorTree *DT, | |||
| 832 | ScalarEvolution *SE, MemorySSAUpdater *MSSAU, | |||
| 833 | const SimplifyQuery &SQ, bool RotationOnly = true, | |||
| 834 | unsigned Threshold = unsigned(-1), | |||
| 835 | bool IsUtilMode = true, bool PrepareForLTO) { | |||
| 836 | LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly, | |||
| 837 | IsUtilMode, PrepareForLTO); | |||
| 838 | return LR.processLoop(L); | |||
| 839 | } |