File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/GenericDomTree.h |
Warning: | line 494, column 12 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // The code below implements dead store elimination using MemorySSA. It uses | |||
10 | // the following general approach: given a MemoryDef, walk upwards to find | |||
11 | // clobbering MemoryDefs that may be killed by the starting def. Then check | |||
12 | // that there are no uses that may read the location of the original MemoryDef | |||
13 | // in between both MemoryDefs. A bit more concretely: | |||
14 | // | |||
15 | // For all MemoryDefs StartDef: | |||
16 | // 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking | |||
17 | // upwards. | |||
18 | // 2. Check that there are no reads between EarlierAccess and the StartDef by | |||
19 | // checking all uses starting at EarlierAccess and walking until we see | |||
20 | // StartDef. | |||
21 | // 3. For each found CurrentDef, check that: | |||
22 | // 1. There are no barrier instructions between CurrentDef and StartDef (like | |||
23 | // throws or stores with ordering constraints). | |||
24 | // 2. StartDef is executed whenever CurrentDef is executed. | |||
25 | // 3. StartDef completely overwrites CurrentDef. | |||
26 | // 4. Erase CurrentDef from the function and MemorySSA. | |||
27 | // | |||
28 | //===----------------------------------------------------------------------===// | |||
29 | ||||
30 | #include "llvm/Transforms/Scalar/DeadStoreElimination.h" | |||
31 | #include "llvm/ADT/APInt.h" | |||
32 | #include "llvm/ADT/DenseMap.h" | |||
33 | #include "llvm/ADT/MapVector.h" | |||
34 | #include "llvm/ADT/PostOrderIterator.h" | |||
35 | #include "llvm/ADT/SetVector.h" | |||
36 | #include "llvm/ADT/SmallPtrSet.h" | |||
37 | #include "llvm/ADT/SmallVector.h" | |||
38 | #include "llvm/ADT/Statistic.h" | |||
39 | #include "llvm/ADT/StringRef.h" | |||
40 | #include "llvm/Analysis/AliasAnalysis.h" | |||
41 | #include "llvm/Analysis/CaptureTracking.h" | |||
42 | #include "llvm/Analysis/GlobalsModRef.h" | |||
43 | #include "llvm/Analysis/LoopInfo.h" | |||
44 | #include "llvm/Analysis/MemoryBuiltins.h" | |||
45 | #include "llvm/Analysis/MemoryLocation.h" | |||
46 | #include "llvm/Analysis/MemorySSA.h" | |||
47 | #include "llvm/Analysis/MemorySSAUpdater.h" | |||
48 | #include "llvm/Analysis/MustExecute.h" | |||
49 | #include "llvm/Analysis/PostDominators.h" | |||
50 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
51 | #include "llvm/Analysis/ValueTracking.h" | |||
52 | #include "llvm/IR/Argument.h" | |||
53 | #include "llvm/IR/BasicBlock.h" | |||
54 | #include "llvm/IR/Constant.h" | |||
55 | #include "llvm/IR/Constants.h" | |||
56 | #include "llvm/IR/DataLayout.h" | |||
57 | #include "llvm/IR/Dominators.h" | |||
58 | #include "llvm/IR/Function.h" | |||
59 | #include "llvm/IR/InstIterator.h" | |||
60 | #include "llvm/IR/InstrTypes.h" | |||
61 | #include "llvm/IR/Instruction.h" | |||
62 | #include "llvm/IR/Instructions.h" | |||
63 | #include "llvm/IR/IntrinsicInst.h" | |||
64 | #include "llvm/IR/Intrinsics.h" | |||
65 | #include "llvm/IR/LLVMContext.h" | |||
66 | #include "llvm/IR/Module.h" | |||
67 | #include "llvm/IR/PassManager.h" | |||
68 | #include "llvm/IR/PatternMatch.h" | |||
69 | #include "llvm/IR/Value.h" | |||
70 | #include "llvm/InitializePasses.h" | |||
71 | #include "llvm/Pass.h" | |||
72 | #include "llvm/Support/Casting.h" | |||
73 | #include "llvm/Support/CommandLine.h" | |||
74 | #include "llvm/Support/Debug.h" | |||
75 | #include "llvm/Support/DebugCounter.h" | |||
76 | #include "llvm/Support/ErrorHandling.h" | |||
77 | #include "llvm/Support/MathExtras.h" | |||
78 | #include "llvm/Support/raw_ostream.h" | |||
79 | #include "llvm/Transforms/Scalar.h" | |||
80 | #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" | |||
81 | #include "llvm/Transforms/Utils/Local.h" | |||
82 | #include <algorithm> | |||
83 | #include <cassert> | |||
84 | #include <cstddef> | |||
85 | #include <cstdint> | |||
86 | #include <iterator> | |||
87 | #include <map> | |||
88 | #include <utility> | |||
89 | ||||
90 | using namespace llvm; | |||
91 | using namespace PatternMatch; | |||
92 | ||||
93 | #define DEBUG_TYPE"dse" "dse" | |||
94 | ||||
95 | STATISTIC(NumRemainingStores, "Number of stores remaining after DSE")static llvm::Statistic NumRemainingStores = {"dse", "NumRemainingStores" , "Number of stores remaining after DSE"}; | |||
96 | STATISTIC(NumRedundantStores, "Number of redundant stores deleted")static llvm::Statistic NumRedundantStores = {"dse", "NumRedundantStores" , "Number of redundant stores deleted"}; | |||
97 | STATISTIC(NumFastStores, "Number of stores deleted")static llvm::Statistic NumFastStores = {"dse", "NumFastStores" , "Number of stores deleted"}; | |||
98 | STATISTIC(NumFastOther, "Number of other instrs removed")static llvm::Statistic NumFastOther = {"dse", "NumFastOther", "Number of other instrs removed"}; | |||
99 | STATISTIC(NumCompletePartials, "Number of stores dead by later partials")static llvm::Statistic NumCompletePartials = {"dse", "NumCompletePartials" , "Number of stores dead by later partials"}; | |||
100 | STATISTIC(NumModifiedStores, "Number of stores modified")static llvm::Statistic NumModifiedStores = {"dse", "NumModifiedStores" , "Number of stores modified"}; | |||
101 | STATISTIC(NumCFGChecks, "Number of stores modified")static llvm::Statistic NumCFGChecks = {"dse", "NumCFGChecks", "Number of stores modified"}; | |||
102 | STATISTIC(NumCFGTries, "Number of stores modified")static llvm::Statistic NumCFGTries = {"dse", "NumCFGTries", "Number of stores modified" }; | |||
103 | STATISTIC(NumCFGSuccess, "Number of stores modified")static llvm::Statistic NumCFGSuccess = {"dse", "NumCFGSuccess" , "Number of stores modified"}; | |||
104 | STATISTIC(NumGetDomMemoryDefPassed,static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed" , "Number of times a valid candidate is returned from getDomMemoryDef" } | |||
105 | "Number of times a valid candidate is returned from getDomMemoryDef")static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed" , "Number of times a valid candidate is returned from getDomMemoryDef" }; | |||
106 | STATISTIC(NumDomMemDefChecks,static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks" , "Number iterations check for reads in getDomMemoryDef"} | |||
107 | "Number iterations check for reads in getDomMemoryDef")static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks" , "Number iterations check for reads in getDomMemoryDef"}; | |||
108 | ||||
109 | DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",static const unsigned MemorySSACounter = DebugCounter::registerCounter ("dse-memoryssa", "Controls which MemoryDefs are eliminated." ) | |||
110 | "Controls which MemoryDefs are eliminated.")static const unsigned MemorySSACounter = DebugCounter::registerCounter ("dse-memoryssa", "Controls which MemoryDefs are eliminated." ); | |||
111 | ||||
112 | static cl::opt<bool> | |||
113 | EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", | |||
114 | cl::init(true), cl::Hidden, | |||
115 | cl::desc("Enable partial-overwrite tracking in DSE")); | |||
116 | ||||
117 | static cl::opt<bool> | |||
118 | EnablePartialStoreMerging("enable-dse-partial-store-merging", | |||
119 | cl::init(true), cl::Hidden, | |||
120 | cl::desc("Enable partial store merging in DSE")); | |||
121 | ||||
122 | static cl::opt<unsigned> | |||
123 | MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, | |||
124 | cl::desc("The number of memory instructions to scan for " | |||
125 | "dead store elimination (default = 100)")); | |||
126 | static cl::opt<unsigned> MemorySSAUpwardsStepLimit( | |||
127 | "dse-memoryssa-walklimit", cl::init(90), cl::Hidden, | |||
128 | cl::desc("The maximum number of steps while walking upwards to find " | |||
129 | "MemoryDefs that may be killed (default = 90)")); | |||
130 | ||||
131 | static cl::opt<unsigned> MemorySSAPartialStoreLimit( | |||
132 | "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, | |||
133 | cl::desc("The maximum number candidates that only partially overwrite the " | |||
134 | "killing MemoryDef to consider" | |||
135 | " (default = 5)")); | |||
136 | ||||
137 | static cl::opt<unsigned> MemorySSADefsPerBlockLimit( | |||
138 | "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, | |||
139 | cl::desc("The number of MemoryDefs we consider as candidates to eliminated " | |||
140 | "other stores per basic block (default = 5000)")); | |||
141 | ||||
142 | static cl::opt<unsigned> MemorySSASameBBStepCost( | |||
143 | "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, | |||
144 | cl::desc( | |||
145 | "The cost of a step in the same basic block as the killing MemoryDef" | |||
146 | "(default = 1)")); | |||
147 | ||||
148 | static cl::opt<unsigned> | |||
149 | MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), | |||
150 | cl::Hidden, | |||
151 | cl::desc("The cost of a step in a different basic " | |||
152 | "block than the killing MemoryDef" | |||
153 | "(default = 5)")); | |||
154 | ||||
155 | static cl::opt<unsigned> MemorySSAPathCheckLimit( | |||
156 | "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, | |||
157 | cl::desc("The maximum number of blocks to check when trying to prove that " | |||
158 | "all paths to an exit go through a killing block (default = 50)")); | |||
159 | ||||
160 | //===----------------------------------------------------------------------===// | |||
161 | // Helper functions | |||
162 | //===----------------------------------------------------------------------===// | |||
163 | using OverlapIntervalsTy = std::map<int64_t, int64_t>; | |||
164 | using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>; | |||
165 | ||||
166 | /// Does this instruction write some memory? This only returns true for things | |||
167 | /// that we can analyze with other helpers below. | |||
168 | static bool hasAnalyzableMemoryWrite(Instruction *I, | |||
169 | const TargetLibraryInfo &TLI) { | |||
170 | if (isa<StoreInst>(I)) | |||
171 | return true; | |||
172 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | |||
173 | switch (II->getIntrinsicID()) { | |||
174 | default: | |||
175 | return false; | |||
176 | case Intrinsic::memset: | |||
177 | case Intrinsic::memmove: | |||
178 | case Intrinsic::memcpy: | |||
179 | case Intrinsic::memcpy_inline: | |||
180 | case Intrinsic::memcpy_element_unordered_atomic: | |||
181 | case Intrinsic::memmove_element_unordered_atomic: | |||
182 | case Intrinsic::memset_element_unordered_atomic: | |||
183 | case Intrinsic::init_trampoline: | |||
184 | case Intrinsic::lifetime_end: | |||
185 | case Intrinsic::masked_store: | |||
186 | return true; | |||
187 | } | |||
188 | } | |||
189 | if (auto *CB = dyn_cast<CallBase>(I)) { | |||
190 | LibFunc LF; | |||
191 | if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) { | |||
192 | switch (LF) { | |||
193 | case LibFunc_strcpy: | |||
194 | case LibFunc_strncpy: | |||
195 | case LibFunc_strcat: | |||
196 | case LibFunc_strncat: | |||
197 | return true; | |||
198 | default: | |||
199 | return false; | |||
200 | } | |||
201 | } | |||
202 | } | |||
203 | return false; | |||
204 | } | |||
205 | ||||
206 | /// Return a Location stored to by the specified instruction. If isRemovable | |||
207 | /// returns true, this function and getLocForRead completely describe the memory | |||
208 | /// operations for this instruction. | |||
209 | static MemoryLocation getLocForWrite(Instruction *Inst, | |||
210 | const TargetLibraryInfo &TLI) { | |||
211 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) | |||
212 | return MemoryLocation::get(SI); | |||
213 | ||||
214 | // memcpy/memmove/memset. | |||
215 | if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) | |||
216 | return MemoryLocation::getForDest(MI); | |||
217 | ||||
218 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { | |||
219 | switch (II->getIntrinsicID()) { | |||
220 | default: | |||
221 | return MemoryLocation(); // Unhandled intrinsic. | |||
222 | case Intrinsic::init_trampoline: | |||
223 | return MemoryLocation::getAfter(II->getArgOperand(0)); | |||
224 | case Intrinsic::masked_store: | |||
225 | return MemoryLocation::getForArgument(II, 1, TLI); | |||
226 | case Intrinsic::lifetime_end: { | |||
227 | uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); | |||
228 | return MemoryLocation(II->getArgOperand(1), Len); | |||
229 | } | |||
230 | } | |||
231 | } | |||
232 | if (auto *CB = dyn_cast<CallBase>(Inst)) | |||
233 | // All the supported TLI functions so far happen to have dest as their | |||
234 | // first argument. | |||
235 | return MemoryLocation::getAfter(CB->getArgOperand(0)); | |||
236 | return MemoryLocation(); | |||
237 | } | |||
238 | ||||
239 | /// If the value of this instruction and the memory it writes to is unused, may | |||
240 | /// we delete this instruction? | |||
241 | static bool isRemovable(Instruction *I) { | |||
242 | // Don't remove volatile/atomic stores. | |||
243 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) | |||
244 | return SI->isUnordered(); | |||
245 | ||||
246 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | |||
247 | switch (II->getIntrinsicID()) { | |||
248 | default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate")__builtin_unreachable(); | |||
249 | case Intrinsic::lifetime_end: | |||
250 | // Never remove dead lifetime_end's, e.g. because it is followed by a | |||
251 | // free. | |||
252 | return false; | |||
253 | case Intrinsic::init_trampoline: | |||
254 | // Always safe to remove init_trampoline. | |||
255 | return true; | |||
256 | case Intrinsic::memset: | |||
257 | case Intrinsic::memmove: | |||
258 | case Intrinsic::memcpy: | |||
259 | case Intrinsic::memcpy_inline: | |||
260 | // Don't remove volatile memory intrinsics. | |||
261 | return !cast<MemIntrinsic>(II)->isVolatile(); | |||
262 | case Intrinsic::memcpy_element_unordered_atomic: | |||
263 | case Intrinsic::memmove_element_unordered_atomic: | |||
264 | case Intrinsic::memset_element_unordered_atomic: | |||
265 | case Intrinsic::masked_store: | |||
266 | return true; | |||
267 | } | |||
268 | } | |||
269 | ||||
270 | // note: only get here for calls with analyzable writes - i.e. libcalls | |||
271 | if (auto *CB = dyn_cast<CallBase>(I)) | |||
272 | return CB->use_empty(); | |||
273 | ||||
274 | return false; | |||
275 | } | |||
276 | ||||
277 | /// Returns true if the end of this instruction can be safely shortened in | |||
278 | /// length. | |||
279 | static bool isShortenableAtTheEnd(Instruction *I) { | |||
280 | // Don't shorten stores for now | |||
281 | if (isa<StoreInst>(I)) | |||
282 | return false; | |||
283 | ||||
284 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | |||
285 | switch (II->getIntrinsicID()) { | |||
286 | default: return false; | |||
287 | case Intrinsic::memset: | |||
288 | case Intrinsic::memcpy: | |||
289 | case Intrinsic::memcpy_element_unordered_atomic: | |||
290 | case Intrinsic::memset_element_unordered_atomic: | |||
291 | // Do shorten memory intrinsics. | |||
292 | // FIXME: Add memmove if it's also safe to transform. | |||
293 | return true; | |||
294 | } | |||
295 | } | |||
296 | ||||
297 | // Don't shorten libcalls calls for now. | |||
298 | ||||
299 | return false; | |||
300 | } | |||
301 | ||||
302 | /// Returns true if the beginning of this instruction can be safely shortened | |||
303 | /// in length. | |||
304 | static bool isShortenableAtTheBeginning(Instruction *I) { | |||
305 | // FIXME: Handle only memset for now. Supporting memcpy/memmove should be | |||
306 | // easily done by offsetting the source address. | |||
307 | return isa<AnyMemSetInst>(I); | |||
308 | } | |||
309 | ||||
310 | static uint64_t getPointerSize(const Value *V, const DataLayout &DL, | |||
311 | const TargetLibraryInfo &TLI, | |||
312 | const Function *F) { | |||
313 | uint64_t Size; | |||
314 | ObjectSizeOpts Opts; | |||
315 | Opts.NullIsUnknownSize = NullPointerIsDefined(F); | |||
316 | ||||
317 | if (getObjectSize(V, Size, DL, &TLI, Opts)) | |||
318 | return Size; | |||
319 | return MemoryLocation::UnknownSize; | |||
320 | } | |||
321 | ||||
322 | namespace { | |||
323 | ||||
324 | enum OverwriteResult { | |||
325 | OW_Begin, | |||
326 | OW_Complete, | |||
327 | OW_End, | |||
328 | OW_PartialEarlierWithFullLater, | |||
329 | OW_MaybePartial, | |||
330 | OW_Unknown | |||
331 | }; | |||
332 | ||||
333 | } // end anonymous namespace | |||
334 | ||||
335 | /// Check if two instruction are masked stores that completely | |||
336 | /// overwrite one another. More specifically, \p Later has to | |||
337 | /// overwrite \p Earlier. | |||
338 | static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later, | |||
339 | const Instruction *Earlier, | |||
340 | BatchAAResults &AA) { | |||
341 | const auto *IIL = dyn_cast<IntrinsicInst>(Later); | |||
342 | const auto *IIE = dyn_cast<IntrinsicInst>(Earlier); | |||
343 | if (IIL == nullptr || IIE == nullptr) | |||
344 | return OW_Unknown; | |||
345 | if (IIL->getIntrinsicID() != Intrinsic::masked_store || | |||
346 | IIE->getIntrinsicID() != Intrinsic::masked_store) | |||
347 | return OW_Unknown; | |||
348 | // Pointers. | |||
349 | Value *LP = IIL->getArgOperand(1)->stripPointerCasts(); | |||
350 | Value *EP = IIE->getArgOperand(1)->stripPointerCasts(); | |||
351 | if (LP != EP && !AA.isMustAlias(LP, EP)) | |||
352 | return OW_Unknown; | |||
353 | // Masks. | |||
354 | // TODO: check that Later's mask is a superset of the Earlier's mask. | |||
355 | if (IIL->getArgOperand(3) != IIE->getArgOperand(3)) | |||
356 | return OW_Unknown; | |||
357 | return OW_Complete; | |||
358 | } | |||
359 | ||||
360 | /// Return 'OW_Complete' if a store to the 'Later' location completely | |||
361 | /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the | |||
362 | /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the | |||
363 | /// beginning of the 'Earlier' location is overwritten by 'Later'. | |||
364 | /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was | |||
365 | /// overwritten by a latter (smaller) store which doesn't write outside the big | |||
366 | /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined. | |||
367 | /// NOTE: This function must only be called if both \p Later and \p Earlier | |||
368 | /// write to the same underlying object with valid \p EarlierOff and \p | |||
369 | /// LaterOff. | |||
370 | static OverwriteResult isPartialOverwrite(const MemoryLocation &Later, | |||
371 | const MemoryLocation &Earlier, | |||
372 | int64_t EarlierOff, int64_t LaterOff, | |||
373 | Instruction *DepWrite, | |||
374 | InstOverlapIntervalsTy &IOL) { | |||
375 | const uint64_t LaterSize = Later.Size.getValue(); | |||
376 | const uint64_t EarlierSize = Earlier.Size.getValue(); | |||
377 | // We may now overlap, although the overlap is not complete. There might also | |||
378 | // be other incomplete overlaps, and together, they might cover the complete | |||
379 | // earlier write. | |||
380 | // Note: The correctness of this logic depends on the fact that this function | |||
381 | // is not even called providing DepWrite when there are any intervening reads. | |||
382 | if (EnablePartialOverwriteTracking && | |||
383 | LaterOff < int64_t(EarlierOff + EarlierSize) && | |||
384 | int64_t(LaterOff + LaterSize) >= EarlierOff) { | |||
385 | ||||
386 | // Insert our part of the overlap into the map. | |||
387 | auto &IM = IOL[DepWrite]; | |||
388 | LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOffdo { } while (false) | |||
389 | << ", " << int64_t(EarlierOff + EarlierSize)do { } while (false) | |||
390 | << ") Later [" << LaterOff << ", "do { } while (false) | |||
391 | << int64_t(LaterOff + LaterSize) << ")\n")do { } while (false); | |||
392 | ||||
393 | // Make sure that we only insert non-overlapping intervals and combine | |||
394 | // adjacent intervals. The intervals are stored in the map with the ending | |||
395 | // offset as the key (in the half-open sense) and the starting offset as | |||
396 | // the value. | |||
397 | int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize; | |||
398 | ||||
399 | // Find any intervals ending at, or after, LaterIntStart which start | |||
400 | // before LaterIntEnd. | |||
401 | auto ILI = IM.lower_bound(LaterIntStart); | |||
402 | if (ILI != IM.end() && ILI->second <= LaterIntEnd) { | |||
403 | // This existing interval is overlapped with the current store somewhere | |||
404 | // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing | |||
405 | // intervals and adjusting our start and end. | |||
406 | LaterIntStart = std::min(LaterIntStart, ILI->second); | |||
407 | LaterIntEnd = std::max(LaterIntEnd, ILI->first); | |||
408 | ILI = IM.erase(ILI); | |||
409 | ||||
410 | // Continue erasing and adjusting our end in case other previous | |||
411 | // intervals are also overlapped with the current store. | |||
412 | // | |||
413 | // |--- ealier 1 ---| |--- ealier 2 ---| | |||
414 | // |------- later---------| | |||
415 | // | |||
416 | while (ILI != IM.end() && ILI->second <= LaterIntEnd) { | |||
417 | assert(ILI->second > LaterIntStart && "Unexpected interval")((void)0); | |||
418 | LaterIntEnd = std::max(LaterIntEnd, ILI->first); | |||
419 | ILI = IM.erase(ILI); | |||
420 | } | |||
421 | } | |||
422 | ||||
423 | IM[LaterIntEnd] = LaterIntStart; | |||
424 | ||||
425 | ILI = IM.begin(); | |||
426 | if (ILI->second <= EarlierOff && | |||
427 | ILI->first >= int64_t(EarlierOff + EarlierSize)) { | |||
428 | LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["do { } while (false) | |||
429 | << EarlierOff << ", "do { } while (false) | |||
430 | << int64_t(EarlierOff + EarlierSize)do { } while (false) | |||
431 | << ") Composite Later [" << ILI->second << ", "do { } while (false) | |||
432 | << ILI->first << ")\n")do { } while (false); | |||
433 | ++NumCompletePartials; | |||
434 | return OW_Complete; | |||
435 | } | |||
436 | } | |||
437 | ||||
438 | // Check for an earlier store which writes to all the memory locations that | |||
439 | // the later store writes to. | |||
440 | if (EnablePartialStoreMerging && LaterOff >= EarlierOff && | |||
441 | int64_t(EarlierOff + EarlierSize) > LaterOff && | |||
442 | uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) { | |||
443 | LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["do { } while (false) | |||
444 | << EarlierOff << ", "do { } while (false) | |||
445 | << int64_t(EarlierOff + EarlierSize)do { } while (false) | |||
446 | << ") by a later store [" << LaterOff << ", "do { } while (false) | |||
447 | << int64_t(LaterOff + LaterSize) << ")\n")do { } while (false); | |||
448 | // TODO: Maybe come up with a better name? | |||
449 | return OW_PartialEarlierWithFullLater; | |||
450 | } | |||
451 | ||||
452 | // Another interesting case is if the later store overwrites the end of the | |||
453 | // earlier store. | |||
454 | // | |||
455 | // |--earlier--| | |||
456 | // |-- later --| | |||
457 | // | |||
458 | // In this case we may want to trim the size of earlier to avoid generating | |||
459 | // writes to addresses which will definitely be overwritten later | |||
460 | if (!EnablePartialOverwriteTracking && | |||
461 | (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) && | |||
462 | int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize))) | |||
463 | return OW_End; | |||
464 | ||||
465 | // Finally, we also need to check if the later store overwrites the beginning | |||
466 | // of the earlier store. | |||
467 | // | |||
468 | // |--earlier--| | |||
469 | // |-- later --| | |||
470 | // | |||
471 | // In this case we may want to move the destination address and trim the size | |||
472 | // of earlier to avoid generating writes to addresses which will definitely | |||
473 | // be overwritten later. | |||
474 | if (!EnablePartialOverwriteTracking && | |||
475 | (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) { | |||
476 | assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&((void)0) | |||
477 | "Expect to be handled as OW_Complete")((void)0); | |||
478 | return OW_Begin; | |||
479 | } | |||
480 | // Otherwise, they don't completely overlap. | |||
481 | return OW_Unknown; | |||
482 | } | |||
483 | ||||
484 | /// Returns true if the memory which is accessed by the second instruction is not | |||
485 | /// modified between the first and the second instruction. | |||
486 | /// Precondition: Second instruction must be dominated by the first | |||
487 | /// instruction. | |||
488 | static bool | |||
489 | memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, | |||
490 | BatchAAResults &AA, const DataLayout &DL, | |||
491 | DominatorTree *DT) { | |||
492 | // Do a backwards scan through the CFG from SecondI to FirstI. Look for | |||
493 | // instructions which can modify the memory location accessed by SecondI. | |||
494 | // | |||
495 | // While doing the walk keep track of the address to check. It might be | |||
496 | // different in different basic blocks due to PHI translation. | |||
497 | using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>; | |||
498 | SmallVector<BlockAddressPair, 16> WorkList; | |||
499 | // Keep track of the address we visited each block with. Bail out if we | |||
500 | // visit a block with different addresses. | |||
501 | DenseMap<BasicBlock *, Value *> Visited; | |||
502 | ||||
503 | BasicBlock::iterator FirstBBI(FirstI); | |||
504 | ++FirstBBI; | |||
505 | BasicBlock::iterator SecondBBI(SecondI); | |||
506 | BasicBlock *FirstBB = FirstI->getParent(); | |||
507 | BasicBlock *SecondBB = SecondI->getParent(); | |||
508 | MemoryLocation MemLoc = MemoryLocation::get(SecondI); | |||
509 | auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr); | |||
510 | ||||
511 | // Start checking the SecondBB. | |||
512 | WorkList.push_back( | |||
513 | std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr))); | |||
514 | bool isFirstBlock = true; | |||
515 | ||||
516 | // Check all blocks going backward until we reach the FirstBB. | |||
517 | while (!WorkList.empty()) { | |||
518 | BlockAddressPair Current = WorkList.pop_back_val(); | |||
519 | BasicBlock *B = Current.first; | |||
520 | PHITransAddr &Addr = Current.second; | |||
521 | Value *Ptr = Addr.getAddr(); | |||
522 | ||||
523 | // Ignore instructions before FirstI if this is the FirstBB. | |||
524 | BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin()); | |||
525 | ||||
526 | BasicBlock::iterator EI; | |||
527 | if (isFirstBlock) { | |||
528 | // Ignore instructions after SecondI if this is the first visit of SecondBB. | |||
529 | assert(B == SecondBB && "first block is not the store block")((void)0); | |||
530 | EI = SecondBBI; | |||
531 | isFirstBlock = false; | |||
532 | } else { | |||
533 | // It's not SecondBB or (in case of a loop) the second visit of SecondBB. | |||
534 | // In this case we also have to look at instructions after SecondI. | |||
535 | EI = B->end(); | |||
536 | } | |||
537 | for (; BI != EI; ++BI) { | |||
538 | Instruction *I = &*BI; | |||
539 | if (I->mayWriteToMemory() && I != SecondI) | |||
540 | if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr)))) | |||
541 | return false; | |||
542 | } | |||
543 | if (B != FirstBB) { | |||
544 | assert(B != &FirstBB->getParent()->getEntryBlock() &&((void)0) | |||
545 | "Should not hit the entry block because SI must be dominated by LI")((void)0); | |||
546 | for (BasicBlock *Pred : predecessors(B)) { | |||
547 | PHITransAddr PredAddr = Addr; | |||
548 | if (PredAddr.NeedsPHITranslationFromBlock(B)) { | |||
549 | if (!PredAddr.IsPotentiallyPHITranslatable()) | |||
550 | return false; | |||
551 | if (PredAddr.PHITranslateValue(B, Pred, DT, false)) | |||
552 | return false; | |||
553 | } | |||
554 | Value *TranslatedPtr = PredAddr.getAddr(); | |||
555 | auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr)); | |||
556 | if (!Inserted.second) { | |||
557 | // We already visited this block before. If it was with a different | |||
558 | // address - bail out! | |||
559 | if (TranslatedPtr != Inserted.first->second) | |||
560 | return false; | |||
561 | // ... otherwise just skip it. | |||
562 | continue; | |||
563 | } | |||
564 | WorkList.push_back(std::make_pair(Pred, PredAddr)); | |||
565 | } | |||
566 | } | |||
567 | } | |||
568 | return true; | |||
569 | } | |||
570 | ||||
571 | static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart, | |||
572 | uint64_t &EarlierSize, int64_t LaterStart, | |||
573 | uint64_t LaterSize, bool IsOverwriteEnd) { | |||
574 | auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite); | |||
575 | Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne(); | |||
576 | ||||
577 | // We assume that memet/memcpy operates in chunks of the "largest" native | |||
578 | // type size and aligned on the same value. That means optimal start and size | |||
579 | // of memset/memcpy should be modulo of preferred alignment of that type. That | |||
580 | // is it there is no any sense in trying to reduce store size any further | |||
581 | // since any "extra" stores comes for free anyway. | |||
582 | // On the other hand, maximum alignment we can achieve is limited by alignment | |||
583 | // of initial store. | |||
584 | ||||
585 | // TODO: Limit maximum alignment by preferred (or abi?) alignment of the | |||
586 | // "largest" native type. | |||
587 | // Note: What is the proper way to get that value? | |||
588 | // Should TargetTransformInfo::getRegisterBitWidth be used or anything else? | |||
589 | // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign); | |||
590 | ||||
591 | int64_t ToRemoveStart = 0; | |||
592 | uint64_t ToRemoveSize = 0; | |||
593 | // Compute start and size of the region to remove. Make sure 'PrefAlign' is | |||
594 | // maintained on the remaining store. | |||
595 | if (IsOverwriteEnd) { | |||
596 | // Calculate required adjustment for 'LaterStart'in order to keep remaining | |||
597 | // store size aligned on 'PerfAlign'. | |||
598 | uint64_t Off = | |||
599 | offsetToAlignment(uint64_t(LaterStart - EarlierStart), PrefAlign); | |||
600 | ToRemoveStart = LaterStart + Off; | |||
601 | if (EarlierSize <= uint64_t(ToRemoveStart - EarlierStart)) | |||
602 | return false; | |||
603 | ToRemoveSize = EarlierSize - uint64_t(ToRemoveStart - EarlierStart); | |||
604 | } else { | |||
605 | ToRemoveStart = EarlierStart; | |||
606 | assert(LaterSize >= uint64_t(EarlierStart - LaterStart) &&((void)0) | |||
607 | "Not overlapping accesses?")((void)0); | |||
608 | ToRemoveSize = LaterSize - uint64_t(EarlierStart - LaterStart); | |||
609 | // Calculate required adjustment for 'ToRemoveSize'in order to keep | |||
610 | // start of the remaining store aligned on 'PerfAlign'. | |||
611 | uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign); | |||
612 | if (Off != 0) { | |||
613 | if (ToRemoveSize <= (PrefAlign.value() - Off)) | |||
614 | return false; | |||
615 | ToRemoveSize -= PrefAlign.value() - Off; | |||
616 | } | |||
617 | assert(isAligned(PrefAlign, ToRemoveSize) &&((void)0) | |||
618 | "Should preserve selected alignment")((void)0); | |||
619 | } | |||
620 | ||||
621 | assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove")((void)0); | |||
622 | assert(EarlierSize > ToRemoveSize && "Can't remove more than original size")((void)0); | |||
623 | ||||
624 | uint64_t NewSize = EarlierSize - ToRemoveSize; | |||
625 | if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) { | |||
626 | // When shortening an atomic memory intrinsic, the newly shortened | |||
627 | // length must remain an integer multiple of the element size. | |||
628 | const uint32_t ElementSize = AMI->getElementSizeInBytes(); | |||
629 | if (0 != NewSize % ElementSize) | |||
630 | return false; | |||
631 | } | |||
632 | ||||
633 | LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "do { } while (false) | |||
634 | << (IsOverwriteEnd ? "END" : "BEGIN") << ": "do { } while (false) | |||
635 | << *EarlierWrite << "\n KILLER [" << ToRemoveStart << ", "do { } while (false) | |||
636 | << int64_t(ToRemoveStart + ToRemoveSize) << ")\n")do { } while (false); | |||
637 | ||||
638 | Value *EarlierWriteLength = EarlierIntrinsic->getLength(); | |||
639 | Value *TrimmedLength = | |||
640 | ConstantInt::get(EarlierWriteLength->getType(), NewSize); | |||
641 | EarlierIntrinsic->setLength(TrimmedLength); | |||
642 | EarlierIntrinsic->setDestAlignment(PrefAlign); | |||
643 | ||||
644 | if (!IsOverwriteEnd) { | |||
645 | Value *OrigDest = EarlierIntrinsic->getRawDest(); | |||
646 | Type *Int8PtrTy = | |||
647 | Type::getInt8PtrTy(EarlierIntrinsic->getContext(), | |||
648 | OrigDest->getType()->getPointerAddressSpace()); | |||
649 | Value *Dest = OrigDest; | |||
650 | if (OrigDest->getType() != Int8PtrTy) | |||
651 | Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", EarlierWrite); | |||
652 | Value *Indices[1] = { | |||
653 | ConstantInt::get(EarlierWriteLength->getType(), ToRemoveSize)}; | |||
654 | Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds( | |||
655 | Type::getInt8Ty(EarlierIntrinsic->getContext()), | |||
656 | Dest, Indices, "", EarlierWrite); | |||
657 | NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc()); | |||
658 | if (NewDestGEP->getType() != OrigDest->getType()) | |||
659 | NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(), | |||
660 | "", EarlierWrite); | |||
661 | EarlierIntrinsic->setDest(NewDestGEP); | |||
662 | } | |||
663 | ||||
664 | // Finally update start and size of earlier access. | |||
665 | if (!IsOverwriteEnd) | |||
666 | EarlierStart += ToRemoveSize; | |||
667 | EarlierSize = NewSize; | |||
668 | ||||
669 | return true; | |||
670 | } | |||
671 | ||||
672 | static bool tryToShortenEnd(Instruction *EarlierWrite, | |||
673 | OverlapIntervalsTy &IntervalMap, | |||
674 | int64_t &EarlierStart, uint64_t &EarlierSize) { | |||
675 | if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite)) | |||
676 | return false; | |||
677 | ||||
678 | OverlapIntervalsTy::iterator OII = --IntervalMap.end(); | |||
679 | int64_t LaterStart = OII->second; | |||
680 | uint64_t LaterSize = OII->first - LaterStart; | |||
681 | ||||
682 | assert(OII->first - LaterStart >= 0 && "Size expected to be positive")((void)0); | |||
683 | ||||
684 | if (LaterStart > EarlierStart && | |||
685 | // Note: "LaterStart - EarlierStart" is known to be positive due to | |||
686 | // preceding check. | |||
687 | (uint64_t)(LaterStart - EarlierStart) < EarlierSize && | |||
688 | // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to | |||
689 | // be non negative due to preceding checks. | |||
690 | LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) { | |||
691 | if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart, | |||
692 | LaterSize, true)) { | |||
693 | IntervalMap.erase(OII); | |||
694 | return true; | |||
695 | } | |||
696 | } | |||
697 | return false; | |||
698 | } | |||
699 | ||||
700 | static bool tryToShortenBegin(Instruction *EarlierWrite, | |||
701 | OverlapIntervalsTy &IntervalMap, | |||
702 | int64_t &EarlierStart, uint64_t &EarlierSize) { | |||
703 | if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite)) | |||
704 | return false; | |||
705 | ||||
706 | OverlapIntervalsTy::iterator OII = IntervalMap.begin(); | |||
707 | int64_t LaterStart = OII->second; | |||
708 | uint64_t LaterSize = OII->first - LaterStart; | |||
709 | ||||
710 | assert(OII->first - LaterStart >= 0 && "Size expected to be positive")((void)0); | |||
711 | ||||
712 | if (LaterStart <= EarlierStart && | |||
713 | // Note: "EarlierStart - LaterStart" is known to be non negative due to | |||
714 | // preceding check. | |||
715 | LaterSize > (uint64_t)(EarlierStart - LaterStart)) { | |||
716 | // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be | |||
717 | // positive due to preceding checks. | |||
718 | assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&((void)0) | |||
719 | "Should have been handled as OW_Complete")((void)0); | |||
720 | if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart, | |||
721 | LaterSize, false)) { | |||
722 | IntervalMap.erase(OII); | |||
723 | return true; | |||
724 | } | |||
725 | } | |||
726 | return false; | |||
727 | } | |||
728 | ||||
729 | static bool removePartiallyOverlappedStores(const DataLayout &DL, | |||
730 | InstOverlapIntervalsTy &IOL, | |||
731 | const TargetLibraryInfo &TLI) { | |||
732 | bool Changed = false; | |||
733 | for (auto OI : IOL) { | |||
734 | Instruction *EarlierWrite = OI.first; | |||
735 | MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI); | |||
736 | assert(isRemovable(EarlierWrite) && "Expect only removable instruction")((void)0); | |||
737 | ||||
738 | const Value *Ptr = Loc.Ptr->stripPointerCasts(); | |||
739 | int64_t EarlierStart = 0; | |||
740 | uint64_t EarlierSize = Loc.Size.getValue(); | |||
741 | GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL); | |||
742 | OverlapIntervalsTy &IntervalMap = OI.second; | |||
743 | Changed |= | |||
744 | tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize); | |||
745 | if (IntervalMap.empty()) | |||
746 | continue; | |||
747 | Changed |= | |||
748 | tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize); | |||
749 | } | |||
750 | return Changed; | |||
751 | } | |||
752 | ||||
753 | static Constant *tryToMergePartialOverlappingStores( | |||
754 | StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset, | |||
755 | int64_t DepWriteOffset, const DataLayout &DL, BatchAAResults &AA, | |||
756 | DominatorTree *DT) { | |||
757 | ||||
758 | if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) && | |||
759 | DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) && | |||
760 | Later && isa<ConstantInt>(Later->getValueOperand()) && | |||
761 | DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) && | |||
762 | memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) { | |||
763 | // If the store we find is: | |||
764 | // a) partially overwritten by the store to 'Loc' | |||
765 | // b) the later store is fully contained in the earlier one and | |||
766 | // c) they both have a constant value | |||
767 | // d) none of the two stores need padding | |||
768 | // Merge the two stores, replacing the earlier store's value with a | |||
769 | // merge of both values. | |||
770 | // TODO: Deal with other constant types (vectors, etc), and probably | |||
771 | // some mem intrinsics (if needed) | |||
772 | ||||
773 | APInt EarlierValue = | |||
774 | cast<ConstantInt>(Earlier->getValueOperand())->getValue(); | |||
775 | APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue(); | |||
776 | unsigned LaterBits = LaterValue.getBitWidth(); | |||
777 | assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth())((void)0); | |||
778 | LaterValue = LaterValue.zext(EarlierValue.getBitWidth()); | |||
779 | ||||
780 | // Offset of the smaller store inside the larger store | |||
781 | unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8; | |||
782 | unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() - | |||
783 | BitOffsetDiff - LaterBits | |||
784 | : BitOffsetDiff; | |||
785 | APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount, | |||
786 | LShiftAmount + LaterBits); | |||
787 | // Clear the bits we'll be replacing, then OR with the smaller | |||
788 | // store, shifted appropriately. | |||
789 | APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount); | |||
790 | LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *Earlierdo { } while (false) | |||
791 | << "\n Later: " << *Laterdo { } while (false) | |||
792 | << "\n Merged Value: " << Merged << '\n')do { } while (false); | |||
793 | return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged); | |||
794 | } | |||
795 | return nullptr; | |||
796 | } | |||
797 | ||||
798 | namespace { | |||
799 | // Returns true if \p I is an intrisnic that does not read or write memory. | |||
800 | bool isNoopIntrinsic(Instruction *I) { | |||
801 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | |||
802 | switch (II->getIntrinsicID()) { | |||
803 | case Intrinsic::lifetime_start: | |||
804 | case Intrinsic::lifetime_end: | |||
805 | case Intrinsic::invariant_end: | |||
806 | case Intrinsic::launder_invariant_group: | |||
807 | case Intrinsic::assume: | |||
808 | return true; | |||
809 | case Intrinsic::dbg_addr: | |||
810 | case Intrinsic::dbg_declare: | |||
811 | case Intrinsic::dbg_label: | |||
812 | case Intrinsic::dbg_value: | |||
813 | llvm_unreachable("Intrinsic should not be modeled in MemorySSA")__builtin_unreachable(); | |||
814 | default: | |||
815 | return false; | |||
816 | } | |||
817 | } | |||
818 | return false; | |||
819 | } | |||
820 | ||||
821 | // Check if we can ignore \p D for DSE. | |||
822 | bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) { | |||
823 | Instruction *DI = D->getMemoryInst(); | |||
824 | // Calls that only access inaccessible memory cannot read or write any memory | |||
825 | // locations we consider for elimination. | |||
826 | if (auto *CB = dyn_cast<CallBase>(DI)) | |||
827 | if (CB->onlyAccessesInaccessibleMemory()) | |||
828 | return true; | |||
829 | ||||
830 | // We can eliminate stores to locations not visible to the caller across | |||
831 | // throwing instructions. | |||
832 | if (DI->mayThrow() && !DefVisibleToCaller) | |||
833 | return true; | |||
834 | ||||
835 | // We can remove the dead stores, irrespective of the fence and its ordering | |||
836 | // (release/acquire/seq_cst). Fences only constraints the ordering of | |||
837 | // already visible stores, it does not make a store visible to other | |||
838 | // threads. So, skipping over a fence does not change a store from being | |||
839 | // dead. | |||
840 | if (isa<FenceInst>(DI)) | |||
841 | return true; | |||
842 | ||||
843 | // Skip intrinsics that do not really read or modify memory. | |||
844 | if (isNoopIntrinsic(D->getMemoryInst())) | |||
845 | return true; | |||
846 | ||||
847 | return false; | |||
848 | } | |||
849 | ||||
850 | struct DSEState { | |||
851 | Function &F; | |||
852 | AliasAnalysis &AA; | |||
853 | ||||
854 | /// The single BatchAA instance that is used to cache AA queries. It will | |||
855 | /// not be invalidated over the whole run. This is safe, because: | |||
856 | /// 1. Only memory writes are removed, so the alias cache for memory | |||
857 | /// locations remains valid. | |||
858 | /// 2. No new instructions are added (only instructions removed), so cached | |||
859 | /// information for a deleted value cannot be accessed by a re-used new | |||
860 | /// value pointer. | |||
861 | BatchAAResults BatchAA; | |||
862 | ||||
863 | MemorySSA &MSSA; | |||
864 | DominatorTree &DT; | |||
865 | PostDominatorTree &PDT; | |||
866 | const TargetLibraryInfo &TLI; | |||
867 | const DataLayout &DL; | |||
868 | const LoopInfo &LI; | |||
869 | ||||
870 | // Whether the function contains any irreducible control flow, useful for | |||
871 | // being accurately able to detect loops. | |||
872 | bool ContainsIrreducibleLoops; | |||
873 | ||||
874 | // All MemoryDefs that potentially could kill other MemDefs. | |||
875 | SmallVector<MemoryDef *, 64> MemDefs; | |||
876 | // Any that should be skipped as they are already deleted | |||
877 | SmallPtrSet<MemoryAccess *, 4> SkipStores; | |||
878 | // Keep track of all of the objects that are invisible to the caller before | |||
879 | // the function returns. | |||
880 | // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet; | |||
881 | DenseMap<const Value *, bool> InvisibleToCallerBeforeRet; | |||
882 | // Keep track of all of the objects that are invisible to the caller after | |||
883 | // the function returns. | |||
884 | DenseMap<const Value *, bool> InvisibleToCallerAfterRet; | |||
885 | // Keep track of blocks with throwing instructions not modeled in MemorySSA. | |||
886 | SmallPtrSet<BasicBlock *, 16> ThrowingBlocks; | |||
887 | // Post-order numbers for each basic block. Used to figure out if memory | |||
888 | // accesses are executed before another access. | |||
889 | DenseMap<BasicBlock *, unsigned> PostOrderNumbers; | |||
890 | ||||
891 | /// Keep track of instructions (partly) overlapping with killing MemoryDefs per | |||
892 | /// basic block. | |||
893 | DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs; | |||
894 | ||||
895 | DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, | |||
896 | PostDominatorTree &PDT, const TargetLibraryInfo &TLI, | |||
897 | const LoopInfo &LI) | |||
898 | : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI), | |||
899 | DL(F.getParent()->getDataLayout()), LI(LI) {} | |||
900 | ||||
901 | static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, | |||
902 | DominatorTree &DT, PostDominatorTree &PDT, | |||
903 | const TargetLibraryInfo &TLI, const LoopInfo &LI) { | |||
904 | DSEState State(F, AA, MSSA, DT, PDT, TLI, LI); | |||
905 | // Collect blocks with throwing instructions not modeled in MemorySSA and | |||
906 | // alloc-like objects. | |||
907 | unsigned PO = 0; | |||
908 | for (BasicBlock *BB : post_order(&F)) { | |||
909 | State.PostOrderNumbers[BB] = PO++; | |||
910 | for (Instruction &I : *BB) { | |||
911 | MemoryAccess *MA = MSSA.getMemoryAccess(&I); | |||
912 | if (I.mayThrow() && !MA) | |||
913 | State.ThrowingBlocks.insert(I.getParent()); | |||
914 | ||||
915 | auto *MD = dyn_cast_or_null<MemoryDef>(MA); | |||
916 | if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit && | |||
917 | (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I))) | |||
918 | State.MemDefs.push_back(MD); | |||
919 | } | |||
920 | } | |||
921 | ||||
922 | // Treat byval or inalloca arguments the same as Allocas, stores to them are | |||
923 | // dead at the end of the function. | |||
924 | for (Argument &AI : F.args()) | |||
925 | if (AI.hasPassPointeeByValueCopyAttr()) { | |||
926 | // For byval, the caller doesn't know the address of the allocation. | |||
927 | if (AI.hasByValAttr()) | |||
928 | State.InvisibleToCallerBeforeRet.insert({&AI, true}); | |||
929 | State.InvisibleToCallerAfterRet.insert({&AI, true}); | |||
930 | } | |||
931 | ||||
932 | // Collect whether there is any irreducible control flow in the function. | |||
933 | State.ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI); | |||
934 | ||||
935 | return State; | |||
936 | } | |||
937 | ||||
938 | /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI | |||
939 | /// instruction) completely overwrites a store to the 'Earlier' location. | |||
940 | /// (by \p EarlierI instruction). | |||
941 | /// Return OW_MaybePartial if \p Later does not completely overwrite | |||
942 | /// \p Earlier, but they both write to the same underlying object. In that | |||
943 | /// case, use isPartialOverwrite to check if \p Later partially overwrites | |||
944 | /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined. | |||
945 | OverwriteResult | |||
946 | isOverwrite(const Instruction *LaterI, const Instruction *EarlierI, | |||
947 | const MemoryLocation &Later, const MemoryLocation &Earlier, | |||
948 | int64_t &EarlierOff, int64_t &LaterOff) { | |||
949 | // AliasAnalysis does not always account for loops. Limit overwrite checks | |||
950 | // to dependencies for which we can guarantee they are independant of any | |||
951 | // loops they are in. | |||
952 | if (!isGuaranteedLoopIndependent(EarlierI, LaterI, Earlier)) | |||
953 | return OW_Unknown; | |||
954 | ||||
955 | // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll | |||
956 | // get imprecise values here, though (except for unknown sizes). | |||
957 | if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) { | |||
958 | // In case no constant size is known, try to an IR values for the number | |||
959 | // of bytes written and check if they match. | |||
960 | const auto *LaterMemI = dyn_cast<MemIntrinsic>(LaterI); | |||
961 | const auto *EarlierMemI = dyn_cast<MemIntrinsic>(EarlierI); | |||
962 | if (LaterMemI && EarlierMemI) { | |||
963 | const Value *LaterV = LaterMemI->getLength(); | |||
964 | const Value *EarlierV = EarlierMemI->getLength(); | |||
965 | if (LaterV == EarlierV && BatchAA.isMustAlias(Earlier, Later)) | |||
966 | return OW_Complete; | |||
967 | } | |||
968 | ||||
969 | // Masked stores have imprecise locations, but we can reason about them | |||
970 | // to some extent. | |||
971 | return isMaskedStoreOverwrite(LaterI, EarlierI, BatchAA); | |||
972 | } | |||
973 | ||||
974 | const uint64_t LaterSize = Later.Size.getValue(); | |||
975 | const uint64_t EarlierSize = Earlier.Size.getValue(); | |||
976 | ||||
977 | // Query the alias information | |||
978 | AliasResult AAR = BatchAA.alias(Later, Earlier); | |||
979 | ||||
980 | // If the start pointers are the same, we just have to compare sizes to see if | |||
981 | // the later store was larger than the earlier store. | |||
982 | if (AAR == AliasResult::MustAlias) { | |||
983 | // Make sure that the Later size is >= the Earlier size. | |||
984 | if (LaterSize >= EarlierSize) | |||
985 | return OW_Complete; | |||
986 | } | |||
987 | ||||
988 | // If we hit a partial alias we may have a full overwrite | |||
989 | if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) { | |||
990 | int32_t Off = AAR.getOffset(); | |||
991 | if (Off >= 0 && (uint64_t)Off + EarlierSize <= LaterSize) | |||
992 | return OW_Complete; | |||
993 | } | |||
994 | ||||
995 | // Check to see if the later store is to the entire object (either a global, | |||
996 | // an alloca, or a byval/inalloca argument). If so, then it clearly | |||
997 | // overwrites any other store to the same object. | |||
998 | const Value *P1 = Earlier.Ptr->stripPointerCasts(); | |||
999 | const Value *P2 = Later.Ptr->stripPointerCasts(); | |||
1000 | const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2); | |||
1001 | ||||
1002 | // If we can't resolve the same pointers to the same object, then we can't | |||
1003 | // analyze them at all. | |||
1004 | if (UO1 != UO2) | |||
1005 | return OW_Unknown; | |||
1006 | ||||
1007 | // If the "Later" store is to a recognizable object, get its size. | |||
1008 | uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, &F); | |||
1009 | if (ObjectSize != MemoryLocation::UnknownSize) | |||
1010 | if (ObjectSize == LaterSize && ObjectSize >= EarlierSize) | |||
1011 | return OW_Complete; | |||
1012 | ||||
1013 | // Okay, we have stores to two completely different pointers. Try to | |||
1014 | // decompose the pointer into a "base + constant_offset" form. If the base | |||
1015 | // pointers are equal, then we can reason about the two stores. | |||
1016 | EarlierOff = 0; | |||
1017 | LaterOff = 0; | |||
1018 | const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL); | |||
1019 | const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL); | |||
1020 | ||||
1021 | // If the base pointers still differ, we have two completely different stores. | |||
1022 | if (BP1 != BP2) | |||
1023 | return OW_Unknown; | |||
1024 | ||||
1025 | // The later access completely overlaps the earlier store if and only if | |||
1026 | // both start and end of the earlier one is "inside" the later one: | |||
1027 | // |<->|--earlier--|<->| | |||
1028 | // |-------later-------| | |||
1029 | // Accesses may overlap if and only if start of one of them is "inside" | |||
1030 | // another one: | |||
1031 | // |<->|--earlier--|<----->| | |||
1032 | // |-------later-------| | |||
1033 | // OR | |||
1034 | // |----- earlier -----| | |||
1035 | // |<->|---later---|<----->| | |||
1036 | // | |||
1037 | // We have to be careful here as *Off is signed while *.Size is unsigned. | |||
1038 | ||||
1039 | // Check if the earlier access starts "not before" the later one. | |||
1040 | if (EarlierOff >= LaterOff) { | |||
1041 | // If the earlier access ends "not after" the later access then the earlier | |||
1042 | // one is completely overwritten by the later one. | |||
1043 | if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize) | |||
1044 | return OW_Complete; | |||
1045 | // If start of the earlier access is "before" end of the later access then | |||
1046 | // accesses overlap. | |||
1047 | else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize) | |||
1048 | return OW_MaybePartial; | |||
1049 | } | |||
1050 | // If start of the later access is "before" end of the earlier access then | |||
1051 | // accesses overlap. | |||
1052 | else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) { | |||
1053 | return OW_MaybePartial; | |||
1054 | } | |||
1055 | ||||
1056 | // Can reach here only if accesses are known not to overlap. There is no | |||
1057 | // dedicated code to indicate no overlap so signal "unknown". | |||
1058 | return OW_Unknown; | |||
1059 | } | |||
1060 | ||||
1061 | bool isInvisibleToCallerAfterRet(const Value *V) { | |||
1062 | if (isa<AllocaInst>(V)) | |||
1063 | return true; | |||
1064 | auto I = InvisibleToCallerAfterRet.insert({V, false}); | |||
1065 | if (I.second) { | |||
1066 | if (!isInvisibleToCallerBeforeRet(V)) { | |||
1067 | I.first->second = false; | |||
1068 | } else { | |||
1069 | auto *Inst = dyn_cast<Instruction>(V); | |||
1070 | if (Inst && isAllocLikeFn(Inst, &TLI)) | |||
1071 | I.first->second = !PointerMayBeCaptured(V, true, false); | |||
1072 | } | |||
1073 | } | |||
1074 | return I.first->second; | |||
1075 | } | |||
1076 | ||||
1077 | bool isInvisibleToCallerBeforeRet(const Value *V) { | |||
1078 | if (isa<AllocaInst>(V)) | |||
1079 | return true; | |||
1080 | auto I = InvisibleToCallerBeforeRet.insert({V, false}); | |||
1081 | if (I.second) { | |||
1082 | auto *Inst = dyn_cast<Instruction>(V); | |||
1083 | if (Inst && isAllocLikeFn(Inst, &TLI)) | |||
1084 | // NOTE: This could be made more precise by PointerMayBeCapturedBefore | |||
1085 | // with the killing MemoryDef. But we refrain from doing so for now to | |||
1086 | // limit compile-time and this does not cause any changes to the number | |||
1087 | // of stores removed on a large test set in practice. | |||
1088 | I.first->second = !PointerMayBeCaptured(V, false, true); | |||
1089 | } | |||
1090 | return I.first->second; | |||
1091 | } | |||
1092 | ||||
1093 | Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const { | |||
1094 | if (!I->mayWriteToMemory()) | |||
1095 | return None; | |||
1096 | ||||
1097 | if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I)) | |||
1098 | return {MemoryLocation::getForDest(MTI)}; | |||
1099 | ||||
1100 | if (auto *CB = dyn_cast<CallBase>(I)) { | |||
1101 | // If the functions may write to memory we do not know about, bail out. | |||
1102 | if (!CB->onlyAccessesArgMemory() && | |||
1103 | !CB->onlyAccessesInaccessibleMemOrArgMem()) | |||
1104 | return None; | |||
1105 | ||||
1106 | LibFunc LF; | |||
1107 | if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) { | |||
1108 | switch (LF) { | |||
1109 | case LibFunc_strcpy: | |||
1110 | case LibFunc_strncpy: | |||
1111 | case LibFunc_strcat: | |||
1112 | case LibFunc_strncat: | |||
1113 | return {MemoryLocation::getAfter(CB->getArgOperand(0))}; | |||
1114 | default: | |||
1115 | break; | |||
1116 | } | |||
1117 | } | |||
1118 | switch (CB->getIntrinsicID()) { | |||
1119 | case Intrinsic::init_trampoline: | |||
1120 | return {MemoryLocation::getAfter(CB->getArgOperand(0))}; | |||
1121 | case Intrinsic::masked_store: | |||
1122 | return {MemoryLocation::getForArgument(CB, 1, TLI)}; | |||
1123 | default: | |||
1124 | break; | |||
1125 | } | |||
1126 | return None; | |||
1127 | } | |||
1128 | ||||
1129 | return MemoryLocation::getOrNone(I); | |||
1130 | } | |||
1131 | ||||
1132 | /// Returns true if \p UseInst completely overwrites \p DefLoc | |||
1133 | /// (stored by \p DefInst). | |||
1134 | bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst, | |||
1135 | Instruction *UseInst) { | |||
1136 | // UseInst has a MemoryDef associated in MemorySSA. It's possible for a | |||
1137 | // MemoryDef to not write to memory, e.g. a volatile load is modeled as a | |||
1138 | // MemoryDef. | |||
1139 | if (!UseInst->mayWriteToMemory()) | |||
1140 | return false; | |||
1141 | ||||
1142 | if (auto *CB = dyn_cast<CallBase>(UseInst)) | |||
1143 | if (CB->onlyAccessesInaccessibleMemory()) | |||
1144 | return false; | |||
1145 | ||||
1146 | int64_t InstWriteOffset, DepWriteOffset; | |||
1147 | if (auto CC = getLocForWriteEx(UseInst)) | |||
1148 | return isOverwrite(UseInst, DefInst, *CC, DefLoc, DepWriteOffset, | |||
1149 | InstWriteOffset) == OW_Complete; | |||
1150 | return false; | |||
1151 | } | |||
1152 | ||||
1153 | /// Returns true if \p Def is not read before returning from the function. | |||
1154 | bool isWriteAtEndOfFunction(MemoryDef *Def) { | |||
1155 | LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("do { } while (false) | |||
1156 | << *Def->getMemoryInst()do { } while (false) | |||
1157 | << ") is at the end the function \n")do { } while (false); | |||
1158 | ||||
1159 | auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst()); | |||
1160 | if (!MaybeLoc) { | |||
1161 | LLVM_DEBUG(dbgs() << " ... could not get location for write.\n")do { } while (false); | |||
1162 | return false; | |||
1163 | } | |||
1164 | ||||
1165 | SmallVector<MemoryAccess *, 4> WorkList; | |||
1166 | SmallPtrSet<MemoryAccess *, 8> Visited; | |||
1167 | auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) { | |||
1168 | if (!Visited.insert(Acc).second) | |||
1169 | return; | |||
1170 | for (Use &U : Acc->uses()) | |||
1171 | WorkList.push_back(cast<MemoryAccess>(U.getUser())); | |||
1172 | }; | |||
1173 | PushMemUses(Def); | |||
1174 | for (unsigned I = 0; I < WorkList.size(); I++) { | |||
1175 | if (WorkList.size() >= MemorySSAScanLimit) { | |||
1176 | LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n")do { } while (false); | |||
1177 | return false; | |||
1178 | } | |||
1179 | ||||
1180 | MemoryAccess *UseAccess = WorkList[I]; | |||
1181 | // Simply adding the users of MemoryPhi to the worklist is not enough, | |||
1182 | // because we might miss read clobbers in different iterations of a loop, | |||
1183 | // for example. | |||
1184 | // TODO: Add support for phi translation to handle the loop case. | |||
1185 | if (isa<MemoryPhi>(UseAccess)) | |||
1186 | return false; | |||
1187 | ||||
1188 | // TODO: Checking for aliasing is expensive. Consider reducing the amount | |||
1189 | // of times this is called and/or caching it. | |||
1190 | Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); | |||
1191 | if (isReadClobber(*MaybeLoc, UseInst)) { | |||
1192 | LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n")do { } while (false); | |||
1193 | return false; | |||
1194 | } | |||
1195 | ||||
1196 | if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) | |||
1197 | PushMemUses(UseDef); | |||
1198 | } | |||
1199 | return true; | |||
1200 | } | |||
1201 | ||||
1202 | /// If \p I is a memory terminator like llvm.lifetime.end or free, return a | |||
1203 | /// pair with the MemoryLocation terminated by \p I and a boolean flag | |||
1204 | /// indicating whether \p I is a free-like call. | |||
1205 | Optional<std::pair<MemoryLocation, bool>> | |||
1206 | getLocForTerminator(Instruction *I) const { | |||
1207 | uint64_t Len; | |||
1208 | Value *Ptr; | |||
1209 | if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len), | |||
1210 | m_Value(Ptr)))) | |||
1211 | return {std::make_pair(MemoryLocation(Ptr, Len), false)}; | |||
1212 | ||||
1213 | if (auto *CB = dyn_cast<CallBase>(I)) { | |||
1214 | if (isFreeCall(I, &TLI)) | |||
1215 | return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)), | |||
1216 | true)}; | |||
1217 | } | |||
1218 | ||||
1219 | return None; | |||
1220 | } | |||
1221 | ||||
1222 | /// Returns true if \p I is a memory terminator instruction like | |||
1223 | /// llvm.lifetime.end or free. | |||
1224 | bool isMemTerminatorInst(Instruction *I) const { | |||
1225 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(I); | |||
1226 | return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) || | |||
1227 | isFreeCall(I, &TLI); | |||
1228 | } | |||
1229 | ||||
1230 | /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from | |||
1231 | /// instruction \p AccessI. | |||
1232 | bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI, | |||
1233 | Instruction *MaybeTerm) { | |||
1234 | Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc = | |||
1235 | getLocForTerminator(MaybeTerm); | |||
1236 | ||||
1237 | if (!MaybeTermLoc) | |||
1238 | return false; | |||
1239 | ||||
1240 | // If the terminator is a free-like call, all accesses to the underlying | |||
1241 | // object can be considered terminated. | |||
1242 | if (getUnderlyingObject(Loc.Ptr) != | |||
1243 | getUnderlyingObject(MaybeTermLoc->first.Ptr)) | |||
1244 | return false; | |||
1245 | ||||
1246 | auto TermLoc = MaybeTermLoc->first; | |||
1247 | if (MaybeTermLoc->second) { | |||
1248 | const Value *LocUO = getUnderlyingObject(Loc.Ptr); | |||
1249 | return BatchAA.isMustAlias(TermLoc.Ptr, LocUO); | |||
1250 | } | |||
1251 | int64_t InstWriteOffset, DepWriteOffset; | |||
1252 | return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DepWriteOffset, | |||
1253 | InstWriteOffset) == OW_Complete; | |||
1254 | } | |||
1255 | ||||
1256 | // Returns true if \p Use may read from \p DefLoc. | |||
1257 | bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) { | |||
1258 | if (isNoopIntrinsic(UseInst)) | |||
1259 | return false; | |||
1260 | ||||
1261 | // Monotonic or weaker atomic stores can be re-ordered and do not need to be | |||
1262 | // treated as read clobber. | |||
1263 | if (auto SI = dyn_cast<StoreInst>(UseInst)) | |||
1264 | return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic); | |||
1265 | ||||
1266 | if (!UseInst->mayReadFromMemory()) | |||
1267 | return false; | |||
1268 | ||||
1269 | if (auto *CB = dyn_cast<CallBase>(UseInst)) | |||
1270 | if (CB->onlyAccessesInaccessibleMemory()) | |||
1271 | return false; | |||
1272 | ||||
1273 | // NOTE: For calls, the number of stores removed could be slightly improved | |||
1274 | // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to | |||
1275 | // be expensive compared to the benefits in practice. For now, avoid more | |||
1276 | // expensive analysis to limit compile-time. | |||
1277 | return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc)); | |||
1278 | } | |||
1279 | ||||
1280 | /// Returns true if a dependency between \p Current and \p KillingDef is | |||
1281 | /// guaranteed to be loop invariant for the loops that they are in. Either | |||
1282 | /// because they are known to be in the same block, in the same loop level or | |||
1283 | /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation | |||
1284 | /// during execution of the containing function. | |||
1285 | bool isGuaranteedLoopIndependent(const Instruction *Current, | |||
1286 | const Instruction *KillingDef, | |||
1287 | const MemoryLocation &CurrentLoc) { | |||
1288 | // If the dependency is within the same block or loop level (being careful | |||
1289 | // of irreducible loops), we know that AA will return a valid result for the | |||
1290 | // memory dependency. (Both at the function level, outside of any loop, | |||
1291 | // would also be valid but we currently disable that to limit compile time). | |||
1292 | if (Current->getParent() == KillingDef->getParent()) | |||
1293 | return true; | |||
1294 | const Loop *CurrentLI = LI.getLoopFor(Current->getParent()); | |||
1295 | if (!ContainsIrreducibleLoops && CurrentLI && | |||
1296 | CurrentLI == LI.getLoopFor(KillingDef->getParent())) | |||
1297 | return true; | |||
1298 | // Otherwise check the memory location is invariant to any loops. | |||
1299 | return isGuaranteedLoopInvariant(CurrentLoc.Ptr); | |||
1300 | } | |||
1301 | ||||
1302 | /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible | |||
1303 | /// loop. In particular, this guarantees that it only references a single | |||
1304 | /// MemoryLocation during execution of the containing function. | |||
1305 | bool isGuaranteedLoopInvariant(const Value *Ptr) { | |||
1306 | auto IsGuaranteedLoopInvariantBase = [this](const Value *Ptr) { | |||
1307 | Ptr = Ptr->stripPointerCasts(); | |||
1308 | if (auto *I = dyn_cast<Instruction>(Ptr)) { | |||
1309 | if (isa<AllocaInst>(Ptr)) | |||
1310 | return true; | |||
1311 | ||||
1312 | if (isAllocLikeFn(I, &TLI)) | |||
1313 | return true; | |||
1314 | ||||
1315 | return false; | |||
1316 | } | |||
1317 | return true; | |||
1318 | }; | |||
1319 | ||||
1320 | Ptr = Ptr->stripPointerCasts(); | |||
1321 | if (auto *I = dyn_cast<Instruction>(Ptr)) { | |||
1322 | if (I->getParent()->isEntryBlock()) | |||
1323 | return true; | |||
1324 | } | |||
1325 | if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { | |||
1326 | return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) && | |||
1327 | GEP->hasAllConstantIndices(); | |||
1328 | } | |||
1329 | return IsGuaranteedLoopInvariantBase(Ptr); | |||
1330 | } | |||
1331 | ||||
1332 | // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with | |||
1333 | // no read access between them or on any other path to a function exit block | |||
1334 | // if \p DefLoc is not accessible after the function returns. If there is no | |||
1335 | // such MemoryDef, return None. The returned value may not (completely) | |||
1336 | // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing | |||
1337 | // MemoryUse (read). | |||
1338 | Optional<MemoryAccess *> | |||
1339 | getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess, | |||
1340 | const MemoryLocation &DefLoc, const Value *DefUO, | |||
1341 | unsigned &ScanLimit, unsigned &WalkerStepLimit, | |||
1342 | bool IsMemTerm, unsigned &PartialLimit) { | |||
1343 | if (ScanLimit == 0 || WalkerStepLimit == 0) { | |||
| ||||
1344 | LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { } while (false); | |||
1345 | return None; | |||
1346 | } | |||
1347 | ||||
1348 | MemoryAccess *Current = StartAccess; | |||
1349 | Instruction *KillingI = KillingDef->getMemoryInst(); | |||
1350 | LLVM_DEBUG(dbgs() << " trying to get dominating access\n")do { } while (false); | |||
1351 | ||||
1352 | // Find the next clobbering Mod access for DefLoc, starting at StartAccess. | |||
1353 | Optional<MemoryLocation> CurrentLoc; | |||
1354 | for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) { | |||
1355 | LLVM_DEBUG({do { } while (false) | |||
1356 | dbgs() << " visiting " << *Current;do { } while (false) | |||
1357 | if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))do { } while (false) | |||
1358 | dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()do { } while (false) | |||
1359 | << ")";do { } while (false) | |||
1360 | dbgs() << "\n";do { } while (false) | |||
1361 | })do { } while (false); | |||
1362 | ||||
1363 | // Reached TOP. | |||
1364 | if (MSSA.isLiveOnEntryDef(Current)) { | |||
1365 | LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n")do { } while (false); | |||
1366 | return None; | |||
1367 | } | |||
1368 | ||||
1369 | // Cost of a step. Accesses in the same block are more likely to be valid | |||
1370 | // candidates for elimination, hence consider them cheaper. | |||
1371 | unsigned StepCost = KillingDef->getBlock() == Current->getBlock() | |||
1372 | ? MemorySSASameBBStepCost | |||
1373 | : MemorySSAOtherBBStepCost; | |||
1374 | if (WalkerStepLimit <= StepCost) { | |||
1375 | LLVM_DEBUG(dbgs() << " ... hit walker step limit\n")do { } while (false); | |||
1376 | return None; | |||
1377 | } | |||
1378 | WalkerStepLimit -= StepCost; | |||
1379 | ||||
1380 | // Return for MemoryPhis. They cannot be eliminated directly and the | |||
1381 | // caller is responsible for traversing them. | |||
1382 | if (isa<MemoryPhi>(Current)) { | |||
1383 | LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n")do { } while (false); | |||
1384 | return Current; | |||
1385 | } | |||
1386 | ||||
1387 | // Below, check if CurrentDef is a valid candidate to be eliminated by | |||
1388 | // KillingDef. If it is not, check the next candidate. | |||
1389 | MemoryDef *CurrentDef = cast<MemoryDef>(Current); | |||
1390 | Instruction *CurrentI = CurrentDef->getMemoryInst(); | |||
1391 | ||||
1392 | if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO))) | |||
1393 | continue; | |||
1394 | ||||
1395 | // Before we try to remove anything, check for any extra throwing | |||
1396 | // instructions that block us from DSEing | |||
1397 | if (mayThrowBetween(KillingI, CurrentI, DefUO)) { | |||
1398 | LLVM_DEBUG(dbgs() << " ... skip, may throw!\n")do { } while (false); | |||
1399 | return None; | |||
1400 | } | |||
1401 | ||||
1402 | // Check for anything that looks like it will be a barrier to further | |||
1403 | // removal | |||
1404 | if (isDSEBarrier(DefUO, CurrentI)) { | |||
1405 | LLVM_DEBUG(dbgs() << " ... skip, barrier\n")do { } while (false); | |||
1406 | return None; | |||
1407 | } | |||
1408 | ||||
1409 | // If Current is known to be on path that reads DefLoc or is a read | |||
1410 | // clobber, bail out, as the path is not profitable. We skip this check | |||
1411 | // for intrinsic calls, because the code knows how to handle memcpy | |||
1412 | // intrinsics. | |||
1413 | if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(DefLoc, CurrentI)) | |||
1414 | return None; | |||
1415 | ||||
1416 | // Quick check if there are direct uses that are read-clobbers. | |||
1417 | if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) { | |||
1418 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser())) | |||
1419 | return !MSSA.dominates(StartAccess, UseOrDef) && | |||
1420 | isReadClobber(DefLoc, UseOrDef->getMemoryInst()); | |||
1421 | return false; | |||
1422 | })) { | |||
1423 | LLVM_DEBUG(dbgs() << " ... found a read clobber\n")do { } while (false); | |||
1424 | return None; | |||
1425 | } | |||
1426 | ||||
1427 | // If Current cannot be analyzed or is not removable, check the next | |||
1428 | // candidate. | |||
1429 | if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI)) | |||
1430 | continue; | |||
1431 | ||||
1432 | // If Current does not have an analyzable write location, skip it | |||
1433 | CurrentLoc = getLocForWriteEx(CurrentI); | |||
1434 | if (!CurrentLoc) | |||
1435 | continue; | |||
1436 | ||||
1437 | // AliasAnalysis does not account for loops. Limit elimination to | |||
1438 | // candidates for which we can guarantee they always store to the same | |||
1439 | // memory location and not located in different loops. | |||
1440 | if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) { | |||
1441 | LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n")do { } while (false); | |||
1442 | WalkerStepLimit -= 1; | |||
1443 | continue; | |||
1444 | } | |||
1445 | ||||
1446 | if (IsMemTerm) { | |||
1447 | // If the killing def is a memory terminator (e.g. lifetime.end), check | |||
1448 | // the next candidate if the current Current does not write the same | |||
1449 | // underlying object as the terminator. | |||
1450 | if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) | |||
1451 | continue; | |||
1452 | } else { | |||
1453 | int64_t InstWriteOffset, DepWriteOffset; | |||
1454 | auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc, | |||
1455 | DepWriteOffset, InstWriteOffset); | |||
1456 | // If Current does not write to the same object as KillingDef, check | |||
1457 | // the next candidate. | |||
1458 | if (OR == OW_Unknown) | |||
1459 | continue; | |||
1460 | else if (OR == OW_MaybePartial) { | |||
1461 | // If KillingDef only partially overwrites Current, check the next | |||
1462 | // candidate if the partial step limit is exceeded. This aggressively | |||
1463 | // limits the number of candidates for partial store elimination, | |||
1464 | // which are less likely to be removable in the end. | |||
1465 | if (PartialLimit <= 1) { | |||
1466 | WalkerStepLimit -= 1; | |||
1467 | continue; | |||
1468 | } | |||
1469 | PartialLimit -= 1; | |||
1470 | } | |||
1471 | } | |||
1472 | break; | |||
1473 | }; | |||
1474 | ||||
1475 | // Accesses to objects accessible after the function returns can only be | |||
1476 | // eliminated if the access is killed along all paths to the exit. Collect | |||
1477 | // the blocks with killing (=completely overwriting MemoryDefs) and check if | |||
1478 | // they cover all paths from EarlierAccess to any function exit. | |||
1479 | SmallPtrSet<Instruction *, 16> KillingDefs; | |||
1480 | KillingDefs.insert(KillingDef->getMemoryInst()); | |||
1481 | MemoryAccess *EarlierAccess = Current; | |||
1482 | Instruction *EarlierMemInst = | |||
1483 | cast<MemoryDef>(EarlierAccess)->getMemoryInst(); | |||
1484 | LLVM_DEBUG(dbgs() << " Checking for reads of " << *EarlierAccess << " ("do { } while (false) | |||
1485 | << *EarlierMemInst << ")\n")do { } while (false); | |||
1486 | ||||
1487 | SmallSetVector<MemoryAccess *, 32> WorkList; | |||
1488 | auto PushMemUses = [&WorkList](MemoryAccess *Acc) { | |||
1489 | for (Use &U : Acc->uses()) | |||
1490 | WorkList.insert(cast<MemoryAccess>(U.getUser())); | |||
1491 | }; | |||
1492 | PushMemUses(EarlierAccess); | |||
1493 | ||||
1494 | // Optimistically collect all accesses for reads. If we do not find any | |||
1495 | // read clobbers, add them to the cache. | |||
1496 | SmallPtrSet<MemoryAccess *, 16> KnownNoReads; | |||
1497 | if (!EarlierMemInst->mayReadFromMemory()) | |||
1498 | KnownNoReads.insert(EarlierAccess); | |||
1499 | // Check if EarlierDef may be read. | |||
1500 | for (unsigned I = 0; I < WorkList.size(); I++) { | |||
1501 | MemoryAccess *UseAccess = WorkList[I]; | |||
1502 | ||||
1503 | LLVM_DEBUG(dbgs() << " " << *UseAccess)do { } while (false); | |||
1504 | // Bail out if the number of accesses to check exceeds the scan limit. | |||
1505 | if (ScanLimit < (WorkList.size() - I)) { | |||
1506 | LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { } while (false); | |||
1507 | return None; | |||
1508 | } | |||
1509 | --ScanLimit; | |||
1510 | NumDomMemDefChecks++; | |||
1511 | KnownNoReads.insert(UseAccess); | |||
1512 | ||||
1513 | if (isa<MemoryPhi>(UseAccess)) { | |||
1514 | if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) { | |||
1515 | return DT.properlyDominates(KI->getParent(), | |||
1516 | UseAccess->getBlock()); | |||
1517 | })) { | |||
1518 | LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n")do { } while (false); | |||
1519 | continue; | |||
1520 | } | |||
1521 | LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n")do { } while (false); | |||
1522 | PushMemUses(UseAccess); | |||
1523 | continue; | |||
1524 | } | |||
1525 | ||||
1526 | Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); | |||
1527 | LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n")do { } while (false); | |||
1528 | ||||
1529 | if (any_of(KillingDefs, [this, UseInst](Instruction *KI) { | |||
1530 | return DT.dominates(KI, UseInst); | |||
1531 | })) { | |||
1532 | LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n")do { } while (false); | |||
1533 | continue; | |||
1534 | } | |||
1535 | ||||
1536 | // A memory terminator kills all preceeding MemoryDefs and all succeeding | |||
1537 | // MemoryAccesses. We do not have to check it's users. | |||
1538 | if (isMemTerminator(*CurrentLoc, EarlierMemInst, UseInst)) { | |||
1539 | LLVM_DEBUG(do { } while (false) | |||
1540 | dbgs()do { } while (false) | |||
1541 | << " ... skipping, memterminator invalidates following accesses\n")do { } while (false); | |||
1542 | continue; | |||
1543 | } | |||
1544 | ||||
1545 | if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) { | |||
1546 | LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n")do { } while (false); | |||
1547 | PushMemUses(UseAccess); | |||
1548 | continue; | |||
1549 | } | |||
1550 | ||||
1551 | if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) { | |||
1552 | LLVM_DEBUG(dbgs() << " ... found throwing instruction\n")do { } while (false); | |||
1553 | return None; | |||
1554 | } | |||
1555 | ||||
1556 | // Uses which may read the original MemoryDef mean we cannot eliminate the | |||
1557 | // original MD. Stop walk. | |||
1558 | if (isReadClobber(*CurrentLoc, UseInst)) { | |||
1559 | LLVM_DEBUG(dbgs() << " ... found read clobber\n")do { } while (false); | |||
1560 | return None; | |||
1561 | } | |||
1562 | ||||
1563 | // If this worklist walks back to the original memory access (and the | |||
1564 | // pointer is not guarenteed loop invariant) then we cannot assume that a | |||
1565 | // store kills itself. | |||
1566 | if (EarlierAccess == UseAccess && | |||
1567 | !isGuaranteedLoopInvariant(CurrentLoc->Ptr)) { | |||
1568 | LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n")do { } while (false); | |||
1569 | return None; | |||
1570 | } | |||
1571 | // Otherwise, for the KillingDef and EarlierAccess we only have to check | |||
1572 | // if it reads the memory location. | |||
1573 | // TODO: It would probably be better to check for self-reads before | |||
1574 | // calling the function. | |||
1575 | if (KillingDef == UseAccess || EarlierAccess == UseAccess) { | |||
1576 | LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n")do { } while (false); | |||
1577 | continue; | |||
1578 | } | |||
1579 | ||||
1580 | // Check all uses for MemoryDefs, except for defs completely overwriting | |||
1581 | // the original location. Otherwise we have to check uses of *all* | |||
1582 | // MemoryDefs we discover, including non-aliasing ones. Otherwise we might | |||
1583 | // miss cases like the following | |||
1584 | // 1 = Def(LoE) ; <----- EarlierDef stores [0,1] | |||
1585 | // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3] | |||
1586 | // Use(2) ; MayAlias 2 *and* 1, loads [0, 3]. | |||
1587 | // (The Use points to the *first* Def it may alias) | |||
1588 | // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias, | |||
1589 | // stores [0,1] | |||
1590 | if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) { | |||
1591 | if (isCompleteOverwrite(*CurrentLoc, EarlierMemInst, UseInst)) { | |||
1592 | BasicBlock *MaybeKillingBlock = UseInst->getParent(); | |||
1593 | if (PostOrderNumbers.find(MaybeKillingBlock)->second < | |||
1594 | PostOrderNumbers.find(EarlierAccess->getBlock())->second) { | |||
1595 | if (!isInvisibleToCallerAfterRet(DefUO)) { | |||
1596 | LLVM_DEBUG(dbgs()do { } while (false) | |||
1597 | << " ... found killing def " << *UseInst << "\n")do { } while (false); | |||
1598 | KillingDefs.insert(UseInst); | |||
1599 | } | |||
1600 | } else { | |||
1601 | LLVM_DEBUG(dbgs()do { } while (false) | |||
1602 | << " ... found preceeding def " << *UseInst << "\n")do { } while (false); | |||
1603 | return None; | |||
1604 | } | |||
1605 | } else | |||
1606 | PushMemUses(UseDef); | |||
1607 | } | |||
1608 | } | |||
1609 | ||||
1610 | // For accesses to locations visible after the function returns, make sure | |||
1611 | // that the location is killed (=overwritten) along all paths from | |||
1612 | // EarlierAccess to the exit. | |||
1613 | if (!isInvisibleToCallerAfterRet(DefUO)) { | |||
1614 | SmallPtrSet<BasicBlock *, 16> KillingBlocks; | |||
1615 | for (Instruction *KD : KillingDefs) | |||
1616 | KillingBlocks.insert(KD->getParent()); | |||
1617 | assert(!KillingBlocks.empty() &&((void)0) | |||
1618 | "Expected at least a single killing block")((void)0); | |||
1619 | ||||
1620 | // Find the common post-dominator of all killing blocks. | |||
1621 | BasicBlock *CommonPred = *KillingBlocks.begin(); | |||
1622 | for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end(); | |||
1623 | I != E; I++) { | |||
1624 | if (!CommonPred) | |||
1625 | break; | |||
1626 | CommonPred = PDT.findNearestCommonDominator(CommonPred, *I); | |||
1627 | } | |||
1628 | ||||
1629 | // If CommonPred is in the set of killing blocks, just check if it | |||
1630 | // post-dominates EarlierAccess. | |||
1631 | if (KillingBlocks.count(CommonPred)) { | |||
1632 | if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) | |||
1633 | return {EarlierAccess}; | |||
1634 | return None; | |||
1635 | } | |||
1636 | ||||
1637 | // If the common post-dominator does not post-dominate EarlierAccess, | |||
1638 | // there is a path from EarlierAccess to an exit not going through a | |||
1639 | // killing block. | |||
1640 | if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) { | |||
1641 | SetVector<BasicBlock *> WorkList; | |||
1642 | ||||
1643 | // If CommonPred is null, there are multiple exits from the function. | |||
1644 | // They all have to be added to the worklist. | |||
1645 | if (CommonPred) | |||
1646 | WorkList.insert(CommonPred); | |||
1647 | else | |||
1648 | for (BasicBlock *R : PDT.roots()) | |||
1649 | WorkList.insert(R); | |||
1650 | ||||
1651 | NumCFGTries++; | |||
1652 | // Check if all paths starting from an exit node go through one of the | |||
1653 | // killing blocks before reaching EarlierAccess. | |||
1654 | for (unsigned I = 0; I < WorkList.size(); I++) { | |||
1655 | NumCFGChecks++; | |||
1656 | BasicBlock *Current = WorkList[I]; | |||
1657 | if (KillingBlocks.count(Current)) | |||
1658 | continue; | |||
1659 | if (Current == EarlierAccess->getBlock()) | |||
1660 | return None; | |||
1661 | ||||
1662 | // EarlierAccess is reachable from the entry, so we don't have to | |||
1663 | // explore unreachable blocks further. | |||
1664 | if (!DT.isReachableFromEntry(Current)) | |||
1665 | continue; | |||
1666 | ||||
1667 | for (BasicBlock *Pred : predecessors(Current)) | |||
1668 | WorkList.insert(Pred); | |||
1669 | ||||
1670 | if (WorkList.size() >= MemorySSAPathCheckLimit) | |||
1671 | return None; | |||
1672 | } | |||
1673 | NumCFGSuccess++; | |||
1674 | return {EarlierAccess}; | |||
1675 | } | |||
1676 | return None; | |||
1677 | } | |||
1678 | ||||
1679 | // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is | |||
1680 | // potentially dead. | |||
1681 | return {EarlierAccess}; | |||
1682 | } | |||
1683 | ||||
1684 | // Delete dead memory defs | |||
1685 | void deleteDeadInstruction(Instruction *SI) { | |||
1686 | MemorySSAUpdater Updater(&MSSA); | |||
1687 | SmallVector<Instruction *, 32> NowDeadInsts; | |||
1688 | NowDeadInsts.push_back(SI); | |||
1689 | --NumFastOther; | |||
1690 | ||||
1691 | while (!NowDeadInsts.empty()) { | |||
1692 | Instruction *DeadInst = NowDeadInsts.pop_back_val(); | |||
1693 | ++NumFastOther; | |||
1694 | ||||
1695 | // Try to preserve debug information attached to the dead instruction. | |||
1696 | salvageDebugInfo(*DeadInst); | |||
1697 | salvageKnowledge(DeadInst); | |||
1698 | ||||
1699 | // Remove the Instruction from MSSA. | |||
1700 | if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) { | |||
1701 | if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) { | |||
1702 | SkipStores.insert(MD); | |||
1703 | } | |||
1704 | Updater.removeMemoryAccess(MA); | |||
1705 | } | |||
1706 | ||||
1707 | auto I = IOLs.find(DeadInst->getParent()); | |||
1708 | if (I != IOLs.end()) | |||
1709 | I->second.erase(DeadInst); | |||
1710 | // Remove its operands | |||
1711 | for (Use &O : DeadInst->operands()) | |||
1712 | if (Instruction *OpI = dyn_cast<Instruction>(O)) { | |||
1713 | O = nullptr; | |||
1714 | if (isInstructionTriviallyDead(OpI, &TLI)) | |||
1715 | NowDeadInsts.push_back(OpI); | |||
1716 | } | |||
1717 | ||||
1718 | DeadInst->eraseFromParent(); | |||
1719 | } | |||
1720 | } | |||
1721 | ||||
1722 | // Check for any extra throws between SI and NI that block DSE. This only | |||
1723 | // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may | |||
1724 | // throw are handled during the walk from one def to the next. | |||
1725 | bool mayThrowBetween(Instruction *SI, Instruction *NI, | |||
1726 | const Value *SILocUnd) { | |||
1727 | // First see if we can ignore it by using the fact that SI is an | |||
1728 | // alloca/alloca like object that is not visible to the caller during | |||
1729 | // execution of the function. | |||
1730 | if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd)) | |||
1731 | return false; | |||
1732 | ||||
1733 | if (SI->getParent() == NI->getParent()) | |||
1734 | return ThrowingBlocks.count(SI->getParent()); | |||
1735 | return !ThrowingBlocks.empty(); | |||
1736 | } | |||
1737 | ||||
1738 | // Check if \p NI acts as a DSE barrier for \p SI. The following instructions | |||
1739 | // act as barriers: | |||
1740 | // * A memory instruction that may throw and \p SI accesses a non-stack | |||
1741 | // object. | |||
1742 | // * Atomic stores stronger that monotonic. | |||
1743 | bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) { | |||
1744 | // If NI may throw it acts as a barrier, unless we are to an alloca/alloca | |||
1745 | // like object that does not escape. | |||
1746 | if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd)) | |||
1747 | return true; | |||
1748 | ||||
1749 | // If NI is an atomic load/store stronger than monotonic, do not try to | |||
1750 | // eliminate/reorder it. | |||
1751 | if (NI->isAtomic()) { | |||
1752 | if (auto *LI = dyn_cast<LoadInst>(NI)) | |||
1753 | return isStrongerThanMonotonic(LI->getOrdering()); | |||
1754 | if (auto *SI = dyn_cast<StoreInst>(NI)) | |||
1755 | return isStrongerThanMonotonic(SI->getOrdering()); | |||
1756 | if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI)) | |||
1757 | return isStrongerThanMonotonic(ARMW->getOrdering()); | |||
1758 | if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI)) | |||
1759 | return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) || | |||
1760 | isStrongerThanMonotonic(CmpXchg->getFailureOrdering()); | |||
1761 | llvm_unreachable("other instructions should be skipped in MemorySSA")__builtin_unreachable(); | |||
1762 | } | |||
1763 | return false; | |||
1764 | } | |||
1765 | ||||
1766 | /// Eliminate writes to objects that are not visible in the caller and are not | |||
1767 | /// accessed before returning from the function. | |||
1768 | bool eliminateDeadWritesAtEndOfFunction() { | |||
1769 | bool MadeChange = false; | |||
1770 | LLVM_DEBUG(do { } while (false) | |||
1771 | dbgs()do { } while (false) | |||
1772 | << "Trying to eliminate MemoryDefs at the end of the function\n")do { } while (false); | |||
1773 | for (int I = MemDefs.size() - 1; I >= 0; I--) { | |||
1774 | MemoryDef *Def = MemDefs[I]; | |||
1775 | if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst())) | |||
1776 | continue; | |||
1777 | ||||
1778 | Instruction *DefI = Def->getMemoryInst(); | |||
1779 | SmallVector<const Value *, 4> Pointers; | |||
1780 | auto DefLoc = getLocForWriteEx(DefI); | |||
1781 | if (!DefLoc) | |||
1782 | continue; | |||
1783 | ||||
1784 | // NOTE: Currently eliminating writes at the end of a function is limited | |||
1785 | // to MemoryDefs with a single underlying object, to save compile-time. In | |||
1786 | // practice it appears the case with multiple underlying objects is very | |||
1787 | // uncommon. If it turns out to be important, we can use | |||
1788 | // getUnderlyingObjects here instead. | |||
1789 | const Value *UO = getUnderlyingObject(DefLoc->Ptr); | |||
1790 | if (!UO || !isInvisibleToCallerAfterRet(UO)) | |||
1791 | continue; | |||
1792 | ||||
1793 | if (isWriteAtEndOfFunction(Def)) { | |||
1794 | // See through pointer-to-pointer bitcasts | |||
1795 | LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "do { } while (false) | |||
1796 | "of the function\n")do { } while (false); | |||
1797 | deleteDeadInstruction(DefI); | |||
1798 | ++NumFastStores; | |||
1799 | MadeChange = true; | |||
1800 | } | |||
1801 | } | |||
1802 | return MadeChange; | |||
1803 | } | |||
1804 | ||||
1805 | /// \returns true if \p Def is a no-op store, either because it | |||
1806 | /// directly stores back a loaded value or stores zero to a calloced object. | |||
1807 | bool storeIsNoop(MemoryDef *Def, const MemoryLocation &DefLoc, | |||
1808 | const Value *DefUO) { | |||
1809 | StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst()); | |||
1810 | MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst()); | |||
1811 | Constant *StoredConstant = nullptr; | |||
1812 | if (Store) | |||
1813 | StoredConstant = dyn_cast<Constant>(Store->getOperand(0)); | |||
1814 | if (MemSet) | |||
1815 | StoredConstant = dyn_cast<Constant>(MemSet->getValue()); | |||
1816 | ||||
1817 | if (StoredConstant && StoredConstant->isNullValue()) { | |||
1818 | auto *DefUOInst = dyn_cast<Instruction>(DefUO); | |||
1819 | if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) { | |||
1820 | auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst)); | |||
1821 | // If UnderlyingDef is the clobbering access of Def, no instructions | |||
1822 | // between them can modify the memory location. | |||
1823 | auto *ClobberDef = | |||
1824 | MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def); | |||
1825 | return UnderlyingDef == ClobberDef; | |||
1826 | } | |||
1827 | } | |||
1828 | ||||
1829 | if (!Store) | |||
1830 | return false; | |||
1831 | ||||
1832 | if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) { | |||
1833 | if (LoadI->getPointerOperand() == Store->getOperand(1)) { | |||
1834 | // Get the defining access for the load. | |||
1835 | auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess(); | |||
1836 | // Fast path: the defining accesses are the same. | |||
1837 | if (LoadAccess == Def->getDefiningAccess()) | |||
1838 | return true; | |||
1839 | ||||
1840 | // Look through phi accesses. Recursively scan all phi accesses by | |||
1841 | // adding them to a worklist. Bail when we run into a memory def that | |||
1842 | // does not match LoadAccess. | |||
1843 | SetVector<MemoryAccess *> ToCheck; | |||
1844 | MemoryAccess *Current = | |||
1845 | MSSA.getWalker()->getClobberingMemoryAccess(Def); | |||
1846 | // We don't want to bail when we run into the store memory def. But, | |||
1847 | // the phi access may point to it. So, pretend like we've already | |||
1848 | // checked it. | |||
1849 | ToCheck.insert(Def); | |||
1850 | ToCheck.insert(Current); | |||
1851 | // Start at current (1) to simulate already having checked Def. | |||
1852 | for (unsigned I = 1; I < ToCheck.size(); ++I) { | |||
1853 | Current = ToCheck[I]; | |||
1854 | if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) { | |||
1855 | // Check all the operands. | |||
1856 | for (auto &Use : PhiAccess->incoming_values()) | |||
1857 | ToCheck.insert(cast<MemoryAccess>(&Use)); | |||
1858 | continue; | |||
1859 | } | |||
1860 | ||||
1861 | // If we found a memory def, bail. This happens when we have an | |||
1862 | // unrelated write in between an otherwise noop store. | |||
1863 | assert(isa<MemoryDef>(Current) &&((void)0) | |||
1864 | "Only MemoryDefs should reach here.")((void)0); | |||
1865 | // TODO: Skip no alias MemoryDefs that have no aliasing reads. | |||
1866 | // We are searching for the definition of the store's destination. | |||
1867 | // So, if that is the same definition as the load, then this is a | |||
1868 | // noop. Otherwise, fail. | |||
1869 | if (LoadAccess != Current) | |||
1870 | return false; | |||
1871 | } | |||
1872 | return true; | |||
1873 | } | |||
1874 | } | |||
1875 | ||||
1876 | return false; | |||
1877 | } | |||
1878 | }; | |||
1879 | ||||
1880 | static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, | |||
1881 | DominatorTree &DT, PostDominatorTree &PDT, | |||
1882 | const TargetLibraryInfo &TLI, | |||
1883 | const LoopInfo &LI) { | |||
1884 | bool MadeChange = false; | |||
1885 | ||||
1886 | DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI, LI); | |||
1887 | // For each store: | |||
1888 | for (unsigned I = 0; I < State.MemDefs.size(); I++) { | |||
1889 | MemoryDef *KillingDef = State.MemDefs[I]; | |||
1890 | if (State.SkipStores.count(KillingDef)) | |||
1891 | continue; | |||
1892 | Instruction *SI = KillingDef->getMemoryInst(); | |||
1893 | ||||
1894 | Optional<MemoryLocation> MaybeSILoc; | |||
1895 | if (State.isMemTerminatorInst(SI)) | |||
1896 | MaybeSILoc = State.getLocForTerminator(SI).map( | |||
1897 | [](const std::pair<MemoryLocation, bool> &P) { return P.first; }); | |||
1898 | else | |||
1899 | MaybeSILoc = State.getLocForWriteEx(SI); | |||
1900 | ||||
1901 | if (!MaybeSILoc) { | |||
1902 | LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "do { } while (false) | |||
1903 | << *SI << "\n")do { } while (false); | |||
1904 | continue; | |||
1905 | } | |||
1906 | MemoryLocation SILoc = *MaybeSILoc; | |||
1907 | assert(SILoc.Ptr && "SILoc should not be null")((void)0); | |||
1908 | const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr); | |||
1909 | ||||
1910 | MemoryAccess *Current = KillingDef; | |||
1911 | LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "do { } while (false) | |||
1912 | << *Current << " (" << *SI << ")\n")do { } while (false); | |||
1913 | ||||
1914 | unsigned ScanLimit = MemorySSAScanLimit; | |||
1915 | unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit; | |||
1916 | unsigned PartialLimit = MemorySSAPartialStoreLimit; | |||
1917 | // Worklist of MemoryAccesses that may be killed by KillingDef. | |||
1918 | SetVector<MemoryAccess *> ToCheck; | |||
1919 | ||||
1920 | if (SILocUnd) | |||
1921 | ToCheck.insert(KillingDef->getDefiningAccess()); | |||
1922 | ||||
1923 | bool Shortend = false; | |||
1924 | bool IsMemTerm = State.isMemTerminatorInst(SI); | |||
1925 | // Check if MemoryAccesses in the worklist are killed by KillingDef. | |||
1926 | for (unsigned I = 0; I < ToCheck.size(); I++) { | |||
1927 | Current = ToCheck[I]; | |||
1928 | if (State.SkipStores.count(Current)) | |||
1929 | continue; | |||
1930 | ||||
1931 | Optional<MemoryAccess *> Next = State.getDomMemoryDef( | |||
1932 | KillingDef, Current, SILoc, SILocUnd, ScanLimit, WalkerStepLimit, | |||
1933 | IsMemTerm, PartialLimit); | |||
1934 | ||||
1935 | if (!Next) { | |||
1936 | LLVM_DEBUG(dbgs() << " finished walk\n")do { } while (false); | |||
1937 | continue; | |||
1938 | } | |||
1939 | ||||
1940 | MemoryAccess *EarlierAccess = *Next; | |||
1941 | LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess)do { } while (false); | |||
1942 | if (isa<MemoryPhi>(EarlierAccess)) { | |||
1943 | LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n")do { } while (false); | |||
1944 | for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) { | |||
1945 | MemoryAccess *IncomingAccess = cast<MemoryAccess>(V); | |||
1946 | BasicBlock *IncomingBlock = IncomingAccess->getBlock(); | |||
1947 | BasicBlock *PhiBlock = EarlierAccess->getBlock(); | |||
1948 | ||||
1949 | // We only consider incoming MemoryAccesses that come before the | |||
1950 | // MemoryPhi. Otherwise we could discover candidates that do not | |||
1951 | // strictly dominate our starting def. | |||
1952 | if (State.PostOrderNumbers[IncomingBlock] > | |||
1953 | State.PostOrderNumbers[PhiBlock]) | |||
1954 | ToCheck.insert(IncomingAccess); | |||
1955 | } | |||
1956 | continue; | |||
1957 | } | |||
1958 | auto *NextDef = cast<MemoryDef>(EarlierAccess); | |||
1959 | Instruction *NI = NextDef->getMemoryInst(); | |||
1960 | LLVM_DEBUG(dbgs() << " (" << *NI << ")\n")do { } while (false); | |||
1961 | ToCheck.insert(NextDef->getDefiningAccess()); | |||
1962 | NumGetDomMemoryDefPassed++; | |||
1963 | ||||
1964 | if (!DebugCounter::shouldExecute(MemorySSACounter)) | |||
1965 | continue; | |||
1966 | ||||
1967 | MemoryLocation NILoc = *State.getLocForWriteEx(NI); | |||
1968 | ||||
1969 | if (IsMemTerm) { | |||
1970 | const Value *NIUnd = getUnderlyingObject(NILoc.Ptr); | |||
1971 | if (SILocUnd != NIUnd) | |||
1972 | continue; | |||
1973 | LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { } while (false) | |||
1974 | << "\n KILLER: " << *SI << '\n')do { } while (false); | |||
1975 | State.deleteDeadInstruction(NI); | |||
1976 | ++NumFastStores; | |||
1977 | MadeChange = true; | |||
1978 | } else { | |||
1979 | // Check if NI overwrites SI. | |||
1980 | int64_t InstWriteOffset, DepWriteOffset; | |||
1981 | OverwriteResult OR = State.isOverwrite(SI, NI, SILoc, NILoc, | |||
1982 | DepWriteOffset, InstWriteOffset); | |||
1983 | if (OR == OW_MaybePartial) { | |||
1984 | auto Iter = State.IOLs.insert( | |||
1985 | std::make_pair<BasicBlock *, InstOverlapIntervalsTy>( | |||
1986 | NI->getParent(), InstOverlapIntervalsTy())); | |||
1987 | auto &IOL = Iter.first->second; | |||
1988 | OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset, | |||
1989 | NI, IOL); | |||
1990 | } | |||
1991 | ||||
1992 | if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) { | |||
1993 | auto *Earlier = dyn_cast<StoreInst>(NI); | |||
1994 | auto *Later = dyn_cast<StoreInst>(SI); | |||
1995 | // We are re-using tryToMergePartialOverlappingStores, which requires | |||
1996 | // Earlier to domiante Later. | |||
1997 | // TODO: implement tryToMergeParialOverlappingStores using MemorySSA. | |||
1998 | if (Earlier && Later && DT.dominates(Earlier, Later)) { | |||
1999 | if (Constant *Merged = tryToMergePartialOverlappingStores( | |||
2000 | Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL, | |||
2001 | State.BatchAA, &DT)) { | |||
2002 | ||||
2003 | // Update stored value of earlier store to merged constant. | |||
2004 | Earlier->setOperand(0, Merged); | |||
2005 | ++NumModifiedStores; | |||
2006 | MadeChange = true; | |||
2007 | ||||
2008 | Shortend = true; | |||
2009 | // Remove later store and remove any outstanding overlap intervals | |||
2010 | // for the updated store. | |||
2011 | State.deleteDeadInstruction(Later); | |||
2012 | auto I = State.IOLs.find(Earlier->getParent()); | |||
2013 | if (I != State.IOLs.end()) | |||
2014 | I->second.erase(Earlier); | |||
2015 | break; | |||
2016 | } | |||
2017 | } | |||
2018 | } | |||
2019 | ||||
2020 | if (OR == OW_Complete) { | |||
2021 | LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { } while (false) | |||
2022 | << "\n KILLER: " << *SI << '\n')do { } while (false); | |||
2023 | State.deleteDeadInstruction(NI); | |||
2024 | ++NumFastStores; | |||
2025 | MadeChange = true; | |||
2026 | } | |||
2027 | } | |||
2028 | } | |||
2029 | ||||
2030 | // Check if the store is a no-op. | |||
2031 | if (!Shortend && isRemovable(SI) && | |||
2032 | State.storeIsNoop(KillingDef, SILoc, SILocUnd)) { | |||
2033 | LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *SI << '\n')do { } while (false); | |||
2034 | State.deleteDeadInstruction(SI); | |||
2035 | NumRedundantStores++; | |||
2036 | MadeChange = true; | |||
2037 | continue; | |||
2038 | } | |||
2039 | } | |||
2040 | ||||
2041 | if (EnablePartialOverwriteTracking) | |||
2042 | for (auto &KV : State.IOLs) | |||
2043 | MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI); | |||
2044 | ||||
2045 | MadeChange |= State.eliminateDeadWritesAtEndOfFunction(); | |||
2046 | return MadeChange; | |||
2047 | } | |||
2048 | } // end anonymous namespace | |||
2049 | ||||
2050 | //===----------------------------------------------------------------------===// | |||
2051 | // DSE Pass | |||
2052 | //===----------------------------------------------------------------------===// | |||
2053 | PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) { | |||
2054 | AliasAnalysis &AA = AM.getResult<AAManager>(F); | |||
2055 | const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F); | |||
2056 | DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); | |||
2057 | MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); | |||
2058 | PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F); | |||
2059 | LoopInfo &LI = AM.getResult<LoopAnalysis>(F); | |||
2060 | ||||
2061 | bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI); | |||
2062 | ||||
2063 | #ifdef LLVM_ENABLE_STATS0 | |||
2064 | if (AreStatisticsEnabled()) | |||
2065 | for (auto &I : instructions(F)) | |||
2066 | NumRemainingStores += isa<StoreInst>(&I); | |||
2067 | #endif | |||
2068 | ||||
2069 | if (!Changed) | |||
2070 | return PreservedAnalyses::all(); | |||
2071 | ||||
2072 | PreservedAnalyses PA; | |||
2073 | PA.preserveSet<CFGAnalyses>(); | |||
2074 | PA.preserve<MemorySSAAnalysis>(); | |||
2075 | PA.preserve<LoopAnalysis>(); | |||
2076 | return PA; | |||
2077 | } | |||
2078 | ||||
2079 | namespace { | |||
2080 | ||||
2081 | /// A legacy pass for the legacy pass manager that wraps \c DSEPass. | |||
2082 | class DSELegacyPass : public FunctionPass { | |||
2083 | public: | |||
2084 | static char ID; // Pass identification, replacement for typeid | |||
2085 | ||||
2086 | DSELegacyPass() : FunctionPass(ID) { | |||
2087 | initializeDSELegacyPassPass(*PassRegistry::getPassRegistry()); | |||
2088 | } | |||
2089 | ||||
2090 | bool runOnFunction(Function &F) override { | |||
2091 | if (skipFunction(F)) | |||
2092 | return false; | |||
2093 | ||||
2094 | AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); | |||
2095 | DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | |||
2096 | const TargetLibraryInfo &TLI = | |||
2097 | getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | |||
2098 | MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); | |||
2099 | PostDominatorTree &PDT = | |||
2100 | getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); | |||
2101 | LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | |||
2102 | ||||
2103 | bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI); | |||
2104 | ||||
2105 | #ifdef LLVM_ENABLE_STATS0 | |||
2106 | if (AreStatisticsEnabled()) | |||
2107 | for (auto &I : instructions(F)) | |||
2108 | NumRemainingStores += isa<StoreInst>(&I); | |||
2109 | #endif | |||
2110 | ||||
2111 | return Changed; | |||
2112 | } | |||
2113 | ||||
2114 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
2115 | AU.setPreservesCFG(); | |||
2116 | AU.addRequired<AAResultsWrapperPass>(); | |||
2117 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | |||
2118 | AU.addPreserved<GlobalsAAWrapperPass>(); | |||
2119 | AU.addRequired<DominatorTreeWrapperPass>(); | |||
2120 | AU.addPreserved<DominatorTreeWrapperPass>(); | |||
2121 | AU.addRequired<PostDominatorTreeWrapperPass>(); | |||
2122 | AU.addRequired<MemorySSAWrapperPass>(); | |||
2123 | AU.addPreserved<PostDominatorTreeWrapperPass>(); | |||
2124 | AU.addPreserved<MemorySSAWrapperPass>(); | |||
2125 | AU.addRequired<LoopInfoWrapperPass>(); | |||
2126 | AU.addPreserved<LoopInfoWrapperPass>(); | |||
2127 | } | |||
2128 | }; | |||
2129 | ||||
2130 | } // end anonymous namespace | |||
2131 | ||||
2132 | char DSELegacyPass::ID = 0; | |||
2133 | ||||
2134 | INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,static void *initializeDSELegacyPassPassOnce(PassRegistry & Registry) { | |||
2135 | false)static void *initializeDSELegacyPassPassOnce(PassRegistry & Registry) { | |||
2136 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | |||
2137 | INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)initializePostDominatorTreeWrapperPassPass(Registry); | |||
2138 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | |||
2139 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); | |||
2140 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | |||
2141 | INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry); | |||
2142 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | |||
2143 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry); | |||
2144 | INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse", &DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <DSELegacyPass>), false, false); Registry.registerPass( *PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag ; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce , std::ref(Registry)); } | |||
2145 | false)PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse", &DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <DSELegacyPass>), false, false); Registry.registerPass( *PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag ; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce , std::ref(Registry)); } | |||
2146 | ||||
2147 | FunctionPass *llvm::createDeadStoreEliminationPass() { | |||
2148 | return new DSELegacyPass(); | |||
2149 | } |
1 | //===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | /// \file | |||
9 | /// | |||
10 | /// This file defines a set of templates that efficiently compute a dominator | |||
11 | /// tree over a generic graph. This is used typically in LLVM for fast | |||
12 | /// dominance queries on the CFG, but is fully generic w.r.t. the underlying | |||
13 | /// graph types. | |||
14 | /// | |||
15 | /// Unlike ADT/* graph algorithms, generic dominator tree has more requirements | |||
16 | /// on the graph's NodeRef. The NodeRef should be a pointer and, | |||
17 | /// NodeRef->getParent() must return the parent node that is also a pointer. | |||
18 | /// | |||
19 | /// FIXME: Maybe GenericDomTree needs a TreeTraits, instead of GraphTraits. | |||
20 | /// | |||
21 | //===----------------------------------------------------------------------===// | |||
22 | ||||
23 | #ifndef LLVM_SUPPORT_GENERICDOMTREE_H | |||
24 | #define LLVM_SUPPORT_GENERICDOMTREE_H | |||
25 | ||||
26 | #include "llvm/ADT/DenseMap.h" | |||
27 | #include "llvm/ADT/GraphTraits.h" | |||
28 | #include "llvm/ADT/STLExtras.h" | |||
29 | #include "llvm/ADT/SmallPtrSet.h" | |||
30 | #include "llvm/ADT/SmallVector.h" | |||
31 | #include "llvm/Support/CFGDiff.h" | |||
32 | #include "llvm/Support/CFGUpdate.h" | |||
33 | #include "llvm/Support/raw_ostream.h" | |||
34 | #include <algorithm> | |||
35 | #include <cassert> | |||
36 | #include <cstddef> | |||
37 | #include <iterator> | |||
38 | #include <memory> | |||
39 | #include <type_traits> | |||
40 | #include <utility> | |||
41 | ||||
42 | namespace llvm { | |||
43 | ||||
44 | template <typename NodeT, bool IsPostDom> | |||
45 | class DominatorTreeBase; | |||
46 | ||||
47 | namespace DomTreeBuilder { | |||
48 | template <typename DomTreeT> | |||
49 | struct SemiNCAInfo; | |||
50 | } // namespace DomTreeBuilder | |||
51 | ||||
52 | /// Base class for the actual dominator tree node. | |||
53 | template <class NodeT> class DomTreeNodeBase { | |||
54 | friend class PostDominatorTree; | |||
55 | friend class DominatorTreeBase<NodeT, false>; | |||
56 | friend class DominatorTreeBase<NodeT, true>; | |||
57 | friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, false>>; | |||
58 | friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, true>>; | |||
59 | ||||
60 | NodeT *TheBB; | |||
61 | DomTreeNodeBase *IDom; | |||
62 | unsigned Level; | |||
63 | SmallVector<DomTreeNodeBase *, 4> Children; | |||
64 | mutable unsigned DFSNumIn = ~0; | |||
65 | mutable unsigned DFSNumOut = ~0; | |||
66 | ||||
67 | public: | |||
68 | DomTreeNodeBase(NodeT *BB, DomTreeNodeBase *iDom) | |||
69 | : TheBB(BB), IDom(iDom), Level(IDom ? IDom->Level + 1 : 0) {} | |||
70 | ||||
71 | using iterator = typename SmallVector<DomTreeNodeBase *, 4>::iterator; | |||
72 | using const_iterator = | |||
73 | typename SmallVector<DomTreeNodeBase *, 4>::const_iterator; | |||
74 | ||||
75 | iterator begin() { return Children.begin(); } | |||
76 | iterator end() { return Children.end(); } | |||
77 | const_iterator begin() const { return Children.begin(); } | |||
78 | const_iterator end() const { return Children.end(); } | |||
79 | ||||
80 | DomTreeNodeBase *const &back() const { return Children.back(); } | |||
81 | DomTreeNodeBase *&back() { return Children.back(); } | |||
82 | ||||
83 | iterator_range<iterator> children() { return make_range(begin(), end()); } | |||
84 | iterator_range<const_iterator> children() const { | |||
85 | return make_range(begin(), end()); | |||
86 | } | |||
87 | ||||
88 | NodeT *getBlock() const { return TheBB; } | |||
89 | DomTreeNodeBase *getIDom() const { return IDom; } | |||
90 | unsigned getLevel() const { return Level; } | |||
91 | ||||
92 | std::unique_ptr<DomTreeNodeBase> addChild( | |||
93 | std::unique_ptr<DomTreeNodeBase> C) { | |||
94 | Children.push_back(C.get()); | |||
95 | return C; | |||
96 | } | |||
97 | ||||
98 | bool isLeaf() const { return Children.empty(); } | |||
99 | size_t getNumChildren() const { return Children.size(); } | |||
100 | ||||
101 | void clearAllChildren() { Children.clear(); } | |||
102 | ||||
103 | bool compare(const DomTreeNodeBase *Other) const { | |||
104 | if (getNumChildren() != Other->getNumChildren()) | |||
105 | return true; | |||
106 | ||||
107 | if (Level != Other->Level) return true; | |||
108 | ||||
109 | SmallPtrSet<const NodeT *, 4> OtherChildren; | |||
110 | for (const DomTreeNodeBase *I : *Other) { | |||
111 | const NodeT *Nd = I->getBlock(); | |||
112 | OtherChildren.insert(Nd); | |||
113 | } | |||
114 | ||||
115 | for (const DomTreeNodeBase *I : *this) { | |||
116 | const NodeT *N = I->getBlock(); | |||
117 | if (OtherChildren.count(N) == 0) | |||
118 | return true; | |||
119 | } | |||
120 | return false; | |||
121 | } | |||
122 | ||||
123 | void setIDom(DomTreeNodeBase *NewIDom) { | |||
124 | assert(IDom && "No immediate dominator?")((void)0); | |||
125 | if (IDom == NewIDom) return; | |||
126 | ||||
127 | auto I = find(IDom->Children, this); | |||
128 | assert(I != IDom->Children.end() &&((void)0) | |||
129 | "Not in immediate dominator children set!")((void)0); | |||
130 | // I am no longer your child... | |||
131 | IDom->Children.erase(I); | |||
132 | ||||
133 | // Switch to new dominator | |||
134 | IDom = NewIDom; | |||
135 | IDom->Children.push_back(this); | |||
136 | ||||
137 | UpdateLevel(); | |||
138 | } | |||
139 | ||||
140 | /// getDFSNumIn/getDFSNumOut - These return the DFS visitation order for nodes | |||
141 | /// in the dominator tree. They are only guaranteed valid if | |||
142 | /// updateDFSNumbers() has been called. | |||
143 | unsigned getDFSNumIn() const { return DFSNumIn; } | |||
144 | unsigned getDFSNumOut() const { return DFSNumOut; } | |||
145 | ||||
146 | private: | |||
147 | // Return true if this node is dominated by other. Use this only if DFS info | |||
148 | // is valid. | |||
149 | bool DominatedBy(const DomTreeNodeBase *other) const { | |||
150 | return this->DFSNumIn >= other->DFSNumIn && | |||
151 | this->DFSNumOut <= other->DFSNumOut; | |||
152 | } | |||
153 | ||||
154 | void UpdateLevel() { | |||
155 | assert(IDom)((void)0); | |||
156 | if (Level == IDom->Level + 1) return; | |||
157 | ||||
158 | SmallVector<DomTreeNodeBase *, 64> WorkStack = {this}; | |||
159 | ||||
160 | while (!WorkStack.empty()) { | |||
161 | DomTreeNodeBase *Current = WorkStack.pop_back_val(); | |||
162 | Current->Level = Current->IDom->Level + 1; | |||
163 | ||||
164 | for (DomTreeNodeBase *C : *Current) { | |||
165 | assert(C->IDom)((void)0); | |||
166 | if (C->Level != C->IDom->Level + 1) WorkStack.push_back(C); | |||
167 | } | |||
168 | } | |||
169 | } | |||
170 | }; | |||
171 | ||||
172 | template <class NodeT> | |||
173 | raw_ostream &operator<<(raw_ostream &O, const DomTreeNodeBase<NodeT> *Node) { | |||
174 | if (Node->getBlock()) | |||
175 | Node->getBlock()->printAsOperand(O, false); | |||
176 | else | |||
177 | O << " <<exit node>>"; | |||
178 | ||||
179 | O << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "} [" | |||
180 | << Node->getLevel() << "]\n"; | |||
181 | ||||
182 | return O; | |||
183 | } | |||
184 | ||||
185 | template <class NodeT> | |||
186 | void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &O, | |||
187 | unsigned Lev) { | |||
188 | O.indent(2 * Lev) << "[" << Lev << "] " << N; | |||
189 | for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(), | |||
190 | E = N->end(); | |||
191 | I != E; ++I) | |||
192 | PrintDomTree<NodeT>(*I, O, Lev + 1); | |||
193 | } | |||
194 | ||||
195 | namespace DomTreeBuilder { | |||
196 | // The routines below are provided in a separate header but referenced here. | |||
197 | template <typename DomTreeT> | |||
198 | void Calculate(DomTreeT &DT); | |||
199 | ||||
200 | template <typename DomTreeT> | |||
201 | void CalculateWithUpdates(DomTreeT &DT, | |||
202 | ArrayRef<typename DomTreeT::UpdateType> Updates); | |||
203 | ||||
204 | template <typename DomTreeT> | |||
205 | void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From, | |||
206 | typename DomTreeT::NodePtr To); | |||
207 | ||||
208 | template <typename DomTreeT> | |||
209 | void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From, | |||
210 | typename DomTreeT::NodePtr To); | |||
211 | ||||
212 | template <typename DomTreeT> | |||
213 | void ApplyUpdates(DomTreeT &DT, | |||
214 | GraphDiff<typename DomTreeT::NodePtr, | |||
215 | DomTreeT::IsPostDominator> &PreViewCFG, | |||
216 | GraphDiff<typename DomTreeT::NodePtr, | |||
217 | DomTreeT::IsPostDominator> *PostViewCFG); | |||
218 | ||||
219 | template <typename DomTreeT> | |||
220 | bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL); | |||
221 | } // namespace DomTreeBuilder | |||
222 | ||||
223 | /// Core dominator tree base class. | |||
224 | /// | |||
225 | /// This class is a generic template over graph nodes. It is instantiated for | |||
226 | /// various graphs in the LLVM IR or in the code generator. | |||
227 | template <typename NodeT, bool IsPostDom> | |||
228 | class DominatorTreeBase { | |||
229 | public: | |||
230 | static_assert(std::is_pointer<typename GraphTraits<NodeT *>::NodeRef>::value, | |||
231 | "Currently DominatorTreeBase supports only pointer nodes"); | |||
232 | using NodeType = NodeT; | |||
233 | using NodePtr = NodeT *; | |||
234 | using ParentPtr = decltype(std::declval<NodeT *>()->getParent()); | |||
235 | static_assert(std::is_pointer<ParentPtr>::value, | |||
236 | "Currently NodeT's parent must be a pointer type"); | |||
237 | using ParentType = std::remove_pointer_t<ParentPtr>; | |||
238 | static constexpr bool IsPostDominator = IsPostDom; | |||
239 | ||||
240 | using UpdateType = cfg::Update<NodePtr>; | |||
241 | using UpdateKind = cfg::UpdateKind; | |||
242 | static constexpr UpdateKind Insert = UpdateKind::Insert; | |||
243 | static constexpr UpdateKind Delete = UpdateKind::Delete; | |||
244 | ||||
245 | enum class VerificationLevel { Fast, Basic, Full }; | |||
246 | ||||
247 | protected: | |||
248 | // Dominators always have a single root, postdominators can have more. | |||
249 | SmallVector<NodeT *, IsPostDom ? 4 : 1> Roots; | |||
250 | ||||
251 | using DomTreeNodeMapType = | |||
252 | DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>; | |||
253 | DomTreeNodeMapType DomTreeNodes; | |||
254 | DomTreeNodeBase<NodeT> *RootNode = nullptr; | |||
255 | ParentPtr Parent = nullptr; | |||
256 | ||||
257 | mutable bool DFSInfoValid = false; | |||
258 | mutable unsigned int SlowQueries = 0; | |||
259 | ||||
260 | friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>; | |||
261 | ||||
262 | public: | |||
263 | DominatorTreeBase() {} | |||
264 | ||||
265 | DominatorTreeBase(DominatorTreeBase &&Arg) | |||
266 | : Roots(std::move(Arg.Roots)), | |||
267 | DomTreeNodes(std::move(Arg.DomTreeNodes)), | |||
268 | RootNode(Arg.RootNode), | |||
269 | Parent(Arg.Parent), | |||
270 | DFSInfoValid(Arg.DFSInfoValid), | |||
271 | SlowQueries(Arg.SlowQueries) { | |||
272 | Arg.wipe(); | |||
273 | } | |||
274 | ||||
275 | DominatorTreeBase &operator=(DominatorTreeBase &&RHS) { | |||
276 | Roots = std::move(RHS.Roots); | |||
277 | DomTreeNodes = std::move(RHS.DomTreeNodes); | |||
278 | RootNode = RHS.RootNode; | |||
279 | Parent = RHS.Parent; | |||
280 | DFSInfoValid = RHS.DFSInfoValid; | |||
281 | SlowQueries = RHS.SlowQueries; | |||
282 | RHS.wipe(); | |||
283 | return *this; | |||
284 | } | |||
285 | ||||
286 | DominatorTreeBase(const DominatorTreeBase &) = delete; | |||
287 | DominatorTreeBase &operator=(const DominatorTreeBase &) = delete; | |||
288 | ||||
289 | /// Iteration over roots. | |||
290 | /// | |||
291 | /// This may include multiple blocks if we are computing post dominators. | |||
292 | /// For forward dominators, this will always be a single block (the entry | |||
293 | /// block). | |||
294 | using root_iterator = typename SmallVectorImpl<NodeT *>::iterator; | |||
295 | using const_root_iterator = typename SmallVectorImpl<NodeT *>::const_iterator; | |||
296 | ||||
297 | root_iterator root_begin() { return Roots.begin(); } | |||
298 | const_root_iterator root_begin() const { return Roots.begin(); } | |||
299 | root_iterator root_end() { return Roots.end(); } | |||
300 | const_root_iterator root_end() const { return Roots.end(); } | |||
301 | ||||
302 | size_t root_size() const { return Roots.size(); } | |||
303 | ||||
304 | iterator_range<root_iterator> roots() { | |||
305 | return make_range(root_begin(), root_end()); | |||
306 | } | |||
307 | iterator_range<const_root_iterator> roots() const { | |||
308 | return make_range(root_begin(), root_end()); | |||
309 | } | |||
310 | ||||
311 | /// isPostDominator - Returns true if analysis based of postdoms | |||
312 | /// | |||
313 | bool isPostDominator() const { return IsPostDominator; } | |||
314 | ||||
315 | /// compare - Return false if the other dominator tree base matches this | |||
316 | /// dominator tree base. Otherwise return true. | |||
317 | bool compare(const DominatorTreeBase &Other) const { | |||
318 | if (Parent != Other.Parent) return true; | |||
319 | ||||
320 | if (Roots.size() != Other.Roots.size()) | |||
321 | return true; | |||
322 | ||||
323 | if (!std::is_permutation(Roots.begin(), Roots.end(), Other.Roots.begin())) | |||
324 | return true; | |||
325 | ||||
326 | const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes; | |||
327 | if (DomTreeNodes.size() != OtherDomTreeNodes.size()) | |||
328 | return true; | |||
329 | ||||
330 | for (const auto &DomTreeNode : DomTreeNodes) { | |||
331 | NodeT *BB = DomTreeNode.first; | |||
332 | typename DomTreeNodeMapType::const_iterator OI = | |||
333 | OtherDomTreeNodes.find(BB); | |||
334 | if (OI == OtherDomTreeNodes.end()) | |||
335 | return true; | |||
336 | ||||
337 | DomTreeNodeBase<NodeT> &MyNd = *DomTreeNode.second; | |||
338 | DomTreeNodeBase<NodeT> &OtherNd = *OI->second; | |||
339 | ||||
340 | if (MyNd.compare(&OtherNd)) | |||
341 | return true; | |||
342 | } | |||
343 | ||||
344 | return false; | |||
345 | } | |||
346 | ||||
347 | /// getNode - return the (Post)DominatorTree node for the specified basic | |||
348 | /// block. This is the same as using operator[] on this class. The result | |||
349 | /// may (but is not required to) be null for a forward (backwards) | |||
350 | /// statically unreachable block. | |||
351 | DomTreeNodeBase<NodeT> *getNode(const NodeT *BB) const { | |||
352 | auto I = DomTreeNodes.find(BB); | |||
353 | if (I != DomTreeNodes.end()) | |||
354 | return I->second.get(); | |||
355 | return nullptr; | |||
356 | } | |||
357 | ||||
358 | /// See getNode. | |||
359 | DomTreeNodeBase<NodeT> *operator[](const NodeT *BB) const { | |||
360 | return getNode(BB); | |||
361 | } | |||
362 | ||||
363 | /// getRootNode - This returns the entry node for the CFG of the function. If | |||
364 | /// this tree represents the post-dominance relations for a function, however, | |||
365 | /// this root may be a node with the block == NULL. This is the case when | |||
366 | /// there are multiple exit nodes from a particular function. Consumers of | |||
367 | /// post-dominance information must be capable of dealing with this | |||
368 | /// possibility. | |||
369 | /// | |||
370 | DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; } | |||
371 | const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; } | |||
372 | ||||
373 | /// Get all nodes dominated by R, including R itself. | |||
374 | void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const { | |||
375 | Result.clear(); | |||
376 | const DomTreeNodeBase<NodeT> *RN = getNode(R); | |||
377 | if (!RN) | |||
378 | return; // If R is unreachable, it will not be present in the DOM tree. | |||
379 | SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL; | |||
380 | WL.push_back(RN); | |||
381 | ||||
382 | while (!WL.empty()) { | |||
383 | const DomTreeNodeBase<NodeT> *N = WL.pop_back_val(); | |||
384 | Result.push_back(N->getBlock()); | |||
385 | WL.append(N->begin(), N->end()); | |||
386 | } | |||
387 | } | |||
388 | ||||
389 | /// properlyDominates - Returns true iff A dominates B and A != B. | |||
390 | /// Note that this is not a constant time operation! | |||
391 | /// | |||
392 | bool properlyDominates(const DomTreeNodeBase<NodeT> *A, | |||
393 | const DomTreeNodeBase<NodeT> *B) const { | |||
394 | if (!A || !B) | |||
395 | return false; | |||
396 | if (A == B) | |||
397 | return false; | |||
398 | return dominates(A, B); | |||
399 | } | |||
400 | ||||
401 | bool properlyDominates(const NodeT *A, const NodeT *B) const; | |||
402 | ||||
403 | /// isReachableFromEntry - Return true if A is dominated by the entry | |||
404 | /// block of the function containing it. | |||
405 | bool isReachableFromEntry(const NodeT *A) const { | |||
406 | assert(!this->isPostDominator() &&((void)0) | |||
407 | "This is not implemented for post dominators")((void)0); | |||
408 | return isReachableFromEntry(getNode(const_cast<NodeT *>(A))); | |||
409 | } | |||
410 | ||||
411 | bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; } | |||
412 | ||||
413 | /// dominates - Returns true iff A dominates B. Note that this is not a | |||
414 | /// constant time operation! | |||
415 | /// | |||
416 | bool dominates(const DomTreeNodeBase<NodeT> *A, | |||
417 | const DomTreeNodeBase<NodeT> *B) const { | |||
418 | // A node trivially dominates itself. | |||
419 | if (B == A) | |||
420 | return true; | |||
421 | ||||
422 | // An unreachable node is dominated by anything. | |||
423 | if (!isReachableFromEntry(B)) | |||
424 | return true; | |||
425 | ||||
426 | // And dominates nothing. | |||
427 | if (!isReachableFromEntry(A)) | |||
428 | return false; | |||
429 | ||||
430 | if (B->getIDom() == A) return true; | |||
431 | ||||
432 | if (A->getIDom() == B) return false; | |||
433 | ||||
434 | // A can only dominate B if it is higher in the tree. | |||
435 | if (A->getLevel() >= B->getLevel()) return false; | |||
436 | ||||
437 | // Compare the result of the tree walk and the dfs numbers, if expensive | |||
438 | // checks are enabled. | |||
439 | #ifdef EXPENSIVE_CHECKS | |||
440 | assert((!DFSInfoValid ||((void)0) | |||
441 | (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&((void)0) | |||
442 | "Tree walk disagrees with dfs numbers!")((void)0); | |||
443 | #endif | |||
444 | ||||
445 | if (DFSInfoValid) | |||
446 | return B->DominatedBy(A); | |||
447 | ||||
448 | // If we end up with too many slow queries, just update the | |||
449 | // DFS numbers on the theory that we are going to keep querying. | |||
450 | SlowQueries++; | |||
451 | if (SlowQueries > 32) { | |||
452 | updateDFSNumbers(); | |||
453 | return B->DominatedBy(A); | |||
454 | } | |||
455 | ||||
456 | return dominatedBySlowTreeWalk(A, B); | |||
457 | } | |||
458 | ||||
459 | bool dominates(const NodeT *A, const NodeT *B) const; | |||
460 | ||||
461 | NodeT *getRoot() const { | |||
462 | assert(this->Roots.size() == 1 && "Should always have entry node!")((void)0); | |||
463 | return this->Roots[0]; | |||
464 | } | |||
465 | ||||
466 | /// Find nearest common dominator basic block for basic block A and B. A and B | |||
467 | /// must have tree nodes. | |||
468 | NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) const { | |||
469 | assert(A && B && "Pointers are not valid")((void)0); | |||
470 | assert(A->getParent() == B->getParent() &&((void)0) | |||
471 | "Two blocks are not in same function")((void)0); | |||
472 | ||||
473 | // If either A or B is a entry block then it is nearest common dominator | |||
474 | // (for forward-dominators). | |||
475 | if (!isPostDominator()) { | |||
476 | NodeT &Entry = A->getParent()->front(); | |||
477 | if (A == &Entry || B == &Entry) | |||
478 | return &Entry; | |||
479 | } | |||
480 | ||||
481 | DomTreeNodeBase<NodeT> *NodeA = getNode(A); | |||
482 | DomTreeNodeBase<NodeT> *NodeB = getNode(B); | |||
483 | assert(NodeA && "A must be in the tree")((void)0); | |||
484 | assert(NodeB && "B must be in the tree")((void)0); | |||
485 | ||||
486 | // Use level information to go up the tree until the levels match. Then | |||
487 | // continue going up til we arrive at the same node. | |||
488 | while (NodeA != NodeB) { | |||
489 | if (NodeA->getLevel() < NodeB->getLevel()) std::swap(NodeA, NodeB); | |||
490 | ||||
491 | NodeA = NodeA->IDom; | |||
492 | } | |||
493 | ||||
494 | return NodeA->getBlock(); | |||
| ||||
495 | } | |||
496 | ||||
497 | const NodeT *findNearestCommonDominator(const NodeT *A, | |||
498 | const NodeT *B) const { | |||
499 | // Cast away the const qualifiers here. This is ok since | |||
500 | // const is re-introduced on the return type. | |||
501 | return findNearestCommonDominator(const_cast<NodeT *>(A), | |||
502 | const_cast<NodeT *>(B)); | |||
503 | } | |||
504 | ||||
505 | bool isVirtualRoot(const DomTreeNodeBase<NodeT> *A) const { | |||
506 | return isPostDominator() && !A->getBlock(); | |||
507 | } | |||
508 | ||||
509 | //===--------------------------------------------------------------------===// | |||
510 | // API to update (Post)DominatorTree information based on modifications to | |||
511 | // the CFG... | |||
512 | ||||
513 | /// Inform the dominator tree about a sequence of CFG edge insertions and | |||
514 | /// deletions and perform a batch update on the tree. | |||
515 | /// | |||
516 | /// This function should be used when there were multiple CFG updates after | |||
517 | /// the last dominator tree update. It takes care of performing the updates | |||
518 | /// in sync with the CFG and optimizes away the redundant operations that | |||
519 | /// cancel each other. | |||
520 | /// The functions expects the sequence of updates to be balanced. Eg.: | |||
521 | /// - {{Insert, A, B}, {Delete, A, B}, {Insert, A, B}} is fine, because | |||
522 | /// logically it results in a single insertions. | |||
523 | /// - {{Insert, A, B}, {Insert, A, B}} is invalid, because it doesn't make | |||
524 | /// sense to insert the same edge twice. | |||
525 | /// | |||
526 | /// What's more, the functions assumes that it's safe to ask every node in the | |||
527 | /// CFG about its children and inverse children. This implies that deletions | |||
528 | /// of CFG edges must not delete the CFG nodes before calling this function. | |||
529 | /// | |||
530 | /// The applyUpdates function can reorder the updates and remove redundant | |||
531 | /// ones internally. The batch updater is also able to detect sequences of | |||
532 | /// zero and exactly one update -- it's optimized to do less work in these | |||
533 | /// cases. | |||
534 | /// | |||
535 | /// Note that for postdominators it automatically takes care of applying | |||
536 | /// updates on reverse edges internally (so there's no need to swap the | |||
537 | /// From and To pointers when constructing DominatorTree::UpdateType). | |||
538 | /// The type of updates is the same for DomTreeBase<T> and PostDomTreeBase<T> | |||
539 | /// with the same template parameter T. | |||
540 | /// | |||
541 | /// \param Updates An unordered sequence of updates to perform. The current | |||
542 | /// CFG and the reverse of these updates provides the pre-view of the CFG. | |||
543 | /// | |||
544 | void applyUpdates(ArrayRef<UpdateType> Updates) { | |||
545 | GraphDiff<NodePtr, IsPostDominator> PreViewCFG( | |||
546 | Updates, /*ReverseApplyUpdates=*/true); | |||
547 | DomTreeBuilder::ApplyUpdates(*this, PreViewCFG, nullptr); | |||
548 | } | |||
549 | ||||
550 | /// \param Updates An unordered sequence of updates to perform. The current | |||
551 | /// CFG and the reverse of these updates provides the pre-view of the CFG. | |||
552 | /// \param PostViewUpdates An unordered sequence of update to perform in order | |||
553 | /// to obtain a post-view of the CFG. The DT will be updated assuming the | |||
554 | /// obtained PostViewCFG is the desired end state. | |||
555 | void applyUpdates(ArrayRef<UpdateType> Updates, | |||
556 | ArrayRef<UpdateType> PostViewUpdates) { | |||
557 | if (Updates.empty()) { | |||
558 | GraphDiff<NodePtr, IsPostDom> PostViewCFG(PostViewUpdates); | |||
559 | DomTreeBuilder::ApplyUpdates(*this, PostViewCFG, &PostViewCFG); | |||
560 | } else { | |||
561 | // PreViewCFG needs to merge Updates and PostViewCFG. The updates in | |||
562 | // Updates need to be reversed, and match the direction in PostViewCFG. | |||
563 | // The PostViewCFG is created with updates reversed (equivalent to changes | |||
564 | // made to the CFG), so the PreViewCFG needs all the updates reverse | |||
565 | // applied. | |||
566 | SmallVector<UpdateType> AllUpdates(Updates.begin(), Updates.end()); | |||
567 | append_range(AllUpdates, PostViewUpdates); | |||
568 | GraphDiff<NodePtr, IsPostDom> PreViewCFG(AllUpdates, | |||
569 | /*ReverseApplyUpdates=*/true); | |||
570 | GraphDiff<NodePtr, IsPostDom> PostViewCFG(PostViewUpdates); | |||
571 | DomTreeBuilder::ApplyUpdates(*this, PreViewCFG, &PostViewCFG); | |||
572 | } | |||
573 | } | |||
574 | ||||
575 | /// Inform the dominator tree about a CFG edge insertion and update the tree. | |||
576 | /// | |||
577 | /// This function has to be called just before or just after making the update | |||
578 | /// on the actual CFG. There cannot be any other updates that the dominator | |||
579 | /// tree doesn't know about. | |||
580 | /// | |||
581 | /// Note that for postdominators it automatically takes care of inserting | |||
582 | /// a reverse edge internally (so there's no need to swap the parameters). | |||
583 | /// | |||
584 | void insertEdge(NodeT *From, NodeT *To) { | |||
585 | assert(From)((void)0); | |||
586 | assert(To)((void)0); | |||
587 | assert(From->getParent() == Parent)((void)0); | |||
588 | assert(To->getParent() == Parent)((void)0); | |||
589 | DomTreeBuilder::InsertEdge(*this, From, To); | |||
590 | } | |||
591 | ||||
592 | /// Inform the dominator tree about a CFG edge deletion and update the tree. | |||
593 | /// | |||
594 | /// This function has to be called just after making the update on the actual | |||
595 | /// CFG. An internal functions checks if the edge doesn't exist in the CFG in | |||
596 | /// DEBUG mode. There cannot be any other updates that the | |||
597 | /// dominator tree doesn't know about. | |||
598 | /// | |||
599 | /// Note that for postdominators it automatically takes care of deleting | |||
600 | /// a reverse edge internally (so there's no need to swap the parameters). | |||
601 | /// | |||
602 | void deleteEdge(NodeT *From, NodeT *To) { | |||
603 | assert(From)((void)0); | |||
604 | assert(To)((void)0); | |||
605 | assert(From->getParent() == Parent)((void)0); | |||
606 | assert(To->getParent() == Parent)((void)0); | |||
607 | DomTreeBuilder::DeleteEdge(*this, From, To); | |||
608 | } | |||
609 | ||||
610 | /// Add a new node to the dominator tree information. | |||
611 | /// | |||
612 | /// This creates a new node as a child of DomBB dominator node, linking it | |||
613 | /// into the children list of the immediate dominator. | |||
614 | /// | |||
615 | /// \param BB New node in CFG. | |||
616 | /// \param DomBB CFG node that is dominator for BB. | |||
617 | /// \returns New dominator tree node that represents new CFG node. | |||
618 | /// | |||
619 | DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) { | |||
620 | assert(getNode(BB) == nullptr && "Block already in dominator tree!")((void)0); | |||
621 | DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB); | |||
622 | assert(IDomNode && "Not immediate dominator specified for block!")((void)0); | |||
623 | DFSInfoValid = false; | |||
624 | return createChild(BB, IDomNode); | |||
625 | } | |||
626 | ||||
627 | /// Add a new node to the forward dominator tree and make it a new root. | |||
628 | /// | |||
629 | /// \param BB New node in CFG. | |||
630 | /// \returns New dominator tree node that represents new CFG node. | |||
631 | /// | |||
632 | DomTreeNodeBase<NodeT> *setNewRoot(NodeT *BB) { | |||
633 | assert(getNode(BB) == nullptr && "Block already in dominator tree!")((void)0); | |||
634 | assert(!this->isPostDominator() &&((void)0) | |||
635 | "Cannot change root of post-dominator tree")((void)0); | |||
636 | DFSInfoValid = false; | |||
637 | DomTreeNodeBase<NodeT> *NewNode = createNode(BB); | |||
638 | if (Roots.empty()) { | |||
639 | addRoot(BB); | |||
640 | } else { | |||
641 | assert(Roots.size() == 1)((void)0); | |||
642 | NodeT *OldRoot = Roots.front(); | |||
643 | auto &OldNode = DomTreeNodes[OldRoot]; | |||
644 | OldNode = NewNode->addChild(std::move(DomTreeNodes[OldRoot])); | |||
645 | OldNode->IDom = NewNode; | |||
646 | OldNode->UpdateLevel(); | |||
647 | Roots[0] = BB; | |||
648 | } | |||
649 | return RootNode = NewNode; | |||
650 | } | |||
651 | ||||
652 | /// changeImmediateDominator - This method is used to update the dominator | |||
653 | /// tree information when a node's immediate dominator changes. | |||
654 | /// | |||
655 | void changeImmediateDominator(DomTreeNodeBase<NodeT> *N, | |||
656 | DomTreeNodeBase<NodeT> *NewIDom) { | |||
657 | assert(N && NewIDom && "Cannot change null node pointers!")((void)0); | |||
658 | DFSInfoValid = false; | |||
659 | N->setIDom(NewIDom); | |||
660 | } | |||
661 | ||||
662 | void changeImmediateDominator(NodeT *BB, NodeT *NewBB) { | |||
663 | changeImmediateDominator(getNode(BB), getNode(NewBB)); | |||
664 | } | |||
665 | ||||
666 | /// eraseNode - Removes a node from the dominator tree. Block must not | |||
667 | /// dominate any other blocks. Removes node from its immediate dominator's | |||
668 | /// children list. Deletes dominator node associated with basic block BB. | |||
669 | void eraseNode(NodeT *BB) { | |||
670 | DomTreeNodeBase<NodeT> *Node = getNode(BB); | |||
671 | assert(Node && "Removing node that isn't in dominator tree.")((void)0); | |||
672 | assert(Node->isLeaf() && "Node is not a leaf node.")((void)0); | |||
673 | ||||
674 | DFSInfoValid = false; | |||
675 | ||||
676 | // Remove node from immediate dominator's children list. | |||
677 | DomTreeNodeBase<NodeT> *IDom = Node->getIDom(); | |||
678 | if (IDom) { | |||
679 | const auto I = find(IDom->Children, Node); | |||
680 | assert(I != IDom->Children.end() &&((void)0) | |||
681 | "Not in immediate dominator children set!")((void)0); | |||
682 | // I am no longer your child... | |||
683 | IDom->Children.erase(I); | |||
684 | } | |||
685 | ||||
686 | DomTreeNodes.erase(BB); | |||
687 | ||||
688 | if (!IsPostDom) return; | |||
689 | ||||
690 | // Remember to update PostDominatorTree roots. | |||
691 | auto RIt = llvm::find(Roots, BB); | |||
692 | if (RIt != Roots.end()) { | |||
693 | std::swap(*RIt, Roots.back()); | |||
694 | Roots.pop_back(); | |||
695 | } | |||
696 | } | |||
697 | ||||
698 | /// splitBlock - BB is split and now it has one successor. Update dominator | |||
699 | /// tree to reflect this change. | |||
700 | void splitBlock(NodeT *NewBB) { | |||
701 | if (IsPostDominator) | |||
702 | Split<Inverse<NodeT *>>(NewBB); | |||
703 | else | |||
704 | Split<NodeT *>(NewBB); | |||
705 | } | |||
706 | ||||
707 | /// print - Convert to human readable form | |||
708 | /// | |||
709 | void print(raw_ostream &O) const { | |||
710 | O << "=============================--------------------------------\n"; | |||
711 | if (IsPostDominator) | |||
712 | O << "Inorder PostDominator Tree: "; | |||
713 | else | |||
714 | O << "Inorder Dominator Tree: "; | |||
715 | if (!DFSInfoValid) | |||
716 | O << "DFSNumbers invalid: " << SlowQueries << " slow queries."; | |||
717 | O << "\n"; | |||
718 | ||||
719 | // The postdom tree can have a null root if there are no returns. | |||
720 | if (getRootNode()) PrintDomTree<NodeT>(getRootNode(), O, 1); | |||
721 | O << "Roots: "; | |||
722 | for (const NodePtr Block : Roots) { | |||
723 | Block->printAsOperand(O, false); | |||
724 | O << " "; | |||
725 | } | |||
726 | O << "\n"; | |||
727 | } | |||
728 | ||||
729 | public: | |||
730 | /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking | |||
731 | /// dominator tree in dfs order. | |||
732 | void updateDFSNumbers() const { | |||
733 | if (DFSInfoValid) { | |||
734 | SlowQueries = 0; | |||
735 | return; | |||
736 | } | |||
737 | ||||
738 | SmallVector<std::pair<const DomTreeNodeBase<NodeT> *, | |||
739 | typename DomTreeNodeBase<NodeT>::const_iterator>, | |||
740 | 32> WorkStack; | |||
741 | ||||
742 | const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode(); | |||
743 | assert((!Parent || ThisRoot) && "Empty constructed DomTree")((void)0); | |||
744 | if (!ThisRoot) | |||
745 | return; | |||
746 | ||||
747 | // Both dominators and postdominators have a single root node. In the case | |||
748 | // case of PostDominatorTree, this node is a virtual root. | |||
749 | WorkStack.push_back({ThisRoot, ThisRoot->begin()}); | |||
750 | ||||
751 | unsigned DFSNum = 0; | |||
752 | ThisRoot->DFSNumIn = DFSNum++; | |||
753 | ||||
754 | while (!WorkStack.empty()) { | |||
755 | const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first; | |||
756 | const auto ChildIt = WorkStack.back().second; | |||
757 | ||||
758 | // If we visited all of the children of this node, "recurse" back up the | |||
759 | // stack setting the DFOutNum. | |||
760 | if (ChildIt == Node->end()) { | |||
761 | Node->DFSNumOut = DFSNum++; | |||
762 | WorkStack.pop_back(); | |||
763 | } else { | |||
764 | // Otherwise, recursively visit this child. | |||
765 | const DomTreeNodeBase<NodeT> *Child = *ChildIt; | |||
766 | ++WorkStack.back().second; | |||
767 | ||||
768 | WorkStack.push_back({Child, Child->begin()}); | |||
769 | Child->DFSNumIn = DFSNum++; | |||
770 | } | |||
771 | } | |||
772 | ||||
773 | SlowQueries = 0; | |||
774 | DFSInfoValid = true; | |||
775 | } | |||
776 | ||||
777 | /// recalculate - compute a dominator tree for the given function | |||
778 | void recalculate(ParentType &Func) { | |||
779 | Parent = &Func; | |||
780 | DomTreeBuilder::Calculate(*this); | |||
781 | } | |||
782 | ||||
783 | void recalculate(ParentType &Func, ArrayRef<UpdateType> Updates) { | |||
784 | Parent = &Func; | |||
785 | DomTreeBuilder::CalculateWithUpdates(*this, Updates); | |||
786 | } | |||
787 | ||||
788 | /// verify - checks if the tree is correct. There are 3 level of verification: | |||
789 | /// - Full -- verifies if the tree is correct by making sure all the | |||
790 | /// properties (including the parent and the sibling property) | |||
791 | /// hold. | |||
792 | /// Takes O(N^3) time. | |||
793 | /// | |||
794 | /// - Basic -- checks if the tree is correct, but compares it to a freshly | |||
795 | /// constructed tree instead of checking the sibling property. | |||
796 | /// Takes O(N^2) time. | |||
797 | /// | |||
798 | /// - Fast -- checks basic tree structure and compares it with a freshly | |||
799 | /// constructed tree. | |||
800 | /// Takes O(N^2) time worst case, but is faster in practise (same | |||
801 | /// as tree construction). | |||
802 | bool verify(VerificationLevel VL = VerificationLevel::Full) const { | |||
803 | return DomTreeBuilder::Verify(*this, VL); | |||
804 | } | |||
805 | ||||
806 | void reset() { | |||
807 | DomTreeNodes.clear(); | |||
808 | Roots.clear(); | |||
809 | RootNode = nullptr; | |||
810 | Parent = nullptr; | |||
811 | DFSInfoValid = false; | |||
812 | SlowQueries = 0; | |||
813 | } | |||
814 | ||||
815 | protected: | |||
816 | void addRoot(NodeT *BB) { this->Roots.push_back(BB); } | |||
817 | ||||
818 | DomTreeNodeBase<NodeT> *createChild(NodeT *BB, DomTreeNodeBase<NodeT> *IDom) { | |||
819 | return (DomTreeNodes[BB] = IDom->addChild( | |||
820 | std::make_unique<DomTreeNodeBase<NodeT>>(BB, IDom))) | |||
821 | .get(); | |||
822 | } | |||
823 | ||||
824 | DomTreeNodeBase<NodeT> *createNode(NodeT *BB) { | |||
825 | return (DomTreeNodes[BB] = | |||
826 | std::make_unique<DomTreeNodeBase<NodeT>>(BB, nullptr)) | |||
827 | .get(); | |||
828 | } | |||
829 | ||||
830 | // NewBB is split and now it has one successor. Update dominator tree to | |||
831 | // reflect this change. | |||
832 | template <class N> | |||
833 | void Split(typename GraphTraits<N>::NodeRef NewBB) { | |||
834 | using GraphT = GraphTraits<N>; | |||
835 | using NodeRef = typename GraphT::NodeRef; | |||
836 | assert(std::distance(GraphT::child_begin(NewBB),((void)0) | |||
837 | GraphT::child_end(NewBB)) == 1 &&((void)0) | |||
838 | "NewBB should have a single successor!")((void)0); | |||
839 | NodeRef NewBBSucc = *GraphT::child_begin(NewBB); | |||
840 | ||||
841 | SmallVector<NodeRef, 4> PredBlocks(children<Inverse<N>>(NewBB)); | |||
842 | ||||
843 | assert(!PredBlocks.empty() && "No predblocks?")((void)0); | |||
844 | ||||
845 | bool NewBBDominatesNewBBSucc = true; | |||
846 | for (auto Pred : children<Inverse<N>>(NewBBSucc)) { | |||
847 | if (Pred != NewBB && !dominates(NewBBSucc, Pred) && | |||
848 | isReachableFromEntry(Pred)) { | |||
849 | NewBBDominatesNewBBSucc = false; | |||
850 | break; | |||
851 | } | |||
852 | } | |||
853 | ||||
854 | // Find NewBB's immediate dominator and create new dominator tree node for | |||
855 | // NewBB. | |||
856 | NodeT *NewBBIDom = nullptr; | |||
857 | unsigned i = 0; | |||
858 | for (i = 0; i < PredBlocks.size(); ++i) | |||
859 | if (isReachableFromEntry(PredBlocks[i])) { | |||
860 | NewBBIDom = PredBlocks[i]; | |||
861 | break; | |||
862 | } | |||
863 | ||||
864 | // It's possible that none of the predecessors of NewBB are reachable; | |||
865 | // in that case, NewBB itself is unreachable, so nothing needs to be | |||
866 | // changed. | |||
867 | if (!NewBBIDom) return; | |||
868 | ||||
869 | for (i = i + 1; i < PredBlocks.size(); ++i) { | |||
870 | if (isReachableFromEntry(PredBlocks[i])) | |||
871 | NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]); | |||
872 | } | |||
873 | ||||
874 | // Create the new dominator tree node... and set the idom of NewBB. | |||
875 | DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom); | |||
876 | ||||
877 | // If NewBB strictly dominates other blocks, then it is now the immediate | |||
878 | // dominator of NewBBSucc. Update the dominator tree as appropriate. | |||
879 | if (NewBBDominatesNewBBSucc) { | |||
880 | DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc); | |||
881 | changeImmediateDominator(NewBBSuccNode, NewBBNode); | |||
882 | } | |||
883 | } | |||
884 | ||||
885 | private: | |||
886 | bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A, | |||
887 | const DomTreeNodeBase<NodeT> *B) const { | |||
888 | assert(A != B)((void)0); | |||
889 | assert(isReachableFromEntry(B))((void)0); | |||
890 | assert(isReachableFromEntry(A))((void)0); | |||
891 | ||||
892 | const unsigned ALevel = A->getLevel(); | |||
893 | const DomTreeNodeBase<NodeT> *IDom; | |||
894 | ||||
895 | // Don't walk nodes above A's subtree. When we reach A's level, we must | |||
896 | // either find A or be in some other subtree not dominated by A. | |||
897 | while ((IDom = B->getIDom()) != nullptr && IDom->getLevel() >= ALevel) | |||
898 | B = IDom; // Walk up the tree | |||
899 | ||||
900 | return B == A; | |||
901 | } | |||
902 | ||||
903 | /// Wipe this tree's state without releasing any resources. | |||
904 | /// | |||
905 | /// This is essentially a post-move helper only. It leaves the object in an | |||
906 | /// assignable and destroyable state, but otherwise invalid. | |||
907 | void wipe() { | |||
908 | DomTreeNodes.clear(); | |||
909 | RootNode = nullptr; | |||
910 | Parent = nullptr; | |||
911 | } | |||
912 | }; | |||
913 | ||||
914 | template <typename T> | |||
915 | using DomTreeBase = DominatorTreeBase<T, false>; | |||
916 | ||||
917 | template <typename T> | |||
918 | using PostDomTreeBase = DominatorTreeBase<T, true>; | |||
919 | ||||
920 | // These two functions are declared out of line as a workaround for building | |||
921 | // with old (< r147295) versions of clang because of pr11642. | |||
922 | template <typename NodeT, bool IsPostDom> | |||
923 | bool DominatorTreeBase<NodeT, IsPostDom>::dominates(const NodeT *A, | |||
924 | const NodeT *B) const { | |||
925 | if (A == B) | |||
926 | return true; | |||
927 | ||||
928 | // Cast away the const qualifiers here. This is ok since | |||
929 | // this function doesn't actually return the values returned | |||
930 | // from getNode. | |||
931 | return dominates(getNode(const_cast<NodeT *>(A)), | |||
932 | getNode(const_cast<NodeT *>(B))); | |||
933 | } | |||
934 | template <typename NodeT, bool IsPostDom> | |||
935 | bool DominatorTreeBase<NodeT, IsPostDom>::properlyDominates( | |||
936 | const NodeT *A, const NodeT *B) const { | |||
937 | if (A == B) | |||
938 | return false; | |||
939 | ||||
940 | // Cast away the const qualifiers here. This is ok since | |||
941 | // this function doesn't actually return the values returned | |||
942 | // from getNode. | |||
943 | return dominates(getNode(const_cast<NodeT *>(A)), | |||
944 | getNode(const_cast<NodeT *>(B))); | |||
945 | } | |||
946 | ||||
947 | } // end namespace llvm | |||
948 | ||||
949 | #endif // LLVM_SUPPORT_GENERICDOMTREE_H |
1 | //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the DenseMap class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_ADT_DENSEMAP_H |
14 | #define LLVM_ADT_DENSEMAP_H |
15 | |
16 | #include "llvm/ADT/DenseMapInfo.h" |
17 | #include "llvm/ADT/EpochTracker.h" |
18 | #include "llvm/Support/AlignOf.h" |
19 | #include "llvm/Support/Compiler.h" |
20 | #include "llvm/Support/MathExtras.h" |
21 | #include "llvm/Support/MemAlloc.h" |
22 | #include "llvm/Support/ReverseIteration.h" |
23 | #include "llvm/Support/type_traits.h" |
24 | #include <algorithm> |
25 | #include <cassert> |
26 | #include <cstddef> |
27 | #include <cstring> |
28 | #include <initializer_list> |
29 | #include <iterator> |
30 | #include <new> |
31 | #include <type_traits> |
32 | #include <utility> |
33 | |
34 | namespace llvm { |
35 | |
36 | namespace detail { |
37 | |
38 | // We extend a pair to allow users to override the bucket type with their own |
39 | // implementation without requiring two members. |
40 | template <typename KeyT, typename ValueT> |
41 | struct DenseMapPair : public std::pair<KeyT, ValueT> { |
42 | using std::pair<KeyT, ValueT>::pair; |
43 | |
44 | KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; } |
45 | const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; } |
46 | ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; } |
47 | const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; } |
48 | }; |
49 | |
50 | } // end namespace detail |
51 | |
52 | template <typename KeyT, typename ValueT, |
53 | typename KeyInfoT = DenseMapInfo<KeyT>, |
54 | typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>, |
55 | bool IsConst = false> |
56 | class DenseMapIterator; |
57 | |
58 | template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT, |
59 | typename BucketT> |
60 | class DenseMapBase : public DebugEpochBase { |
61 | template <typename T> |
62 | using const_arg_type_t = typename const_pointer_or_const_ref<T>::type; |
63 | |
64 | public: |
65 | using size_type = unsigned; |
66 | using key_type = KeyT; |
67 | using mapped_type = ValueT; |
68 | using value_type = BucketT; |
69 | |
70 | using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>; |
71 | using const_iterator = |
72 | DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>; |
73 | |
74 | inline iterator begin() { |
75 | // When the map is empty, avoid the overhead of advancing/retreating past |
76 | // empty buckets. |
77 | if (empty()) |
78 | return end(); |
79 | if (shouldReverseIterate<KeyT>()) |
80 | return makeIterator(getBucketsEnd() - 1, getBuckets(), *this); |
81 | return makeIterator(getBuckets(), getBucketsEnd(), *this); |
82 | } |
83 | inline iterator end() { |
84 | return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true); |
85 | } |
86 | inline const_iterator begin() const { |
87 | if (empty()) |
88 | return end(); |
89 | if (shouldReverseIterate<KeyT>()) |
90 | return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this); |
91 | return makeConstIterator(getBuckets(), getBucketsEnd(), *this); |
92 | } |
93 | inline const_iterator end() const { |
94 | return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true); |
95 | } |
96 | |
97 | LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { |
98 | return getNumEntries() == 0; |
99 | } |
100 | unsigned size() const { return getNumEntries(); } |
101 | |
102 | /// Grow the densemap so that it can contain at least \p NumEntries items |
103 | /// before resizing again. |
104 | void reserve(size_type NumEntries) { |
105 | auto NumBuckets = getMinBucketToReserveForEntries(NumEntries); |
106 | incrementEpoch(); |
107 | if (NumBuckets > getNumBuckets()) |
108 | grow(NumBuckets); |
109 | } |
110 | |
111 | void clear() { |
112 | incrementEpoch(); |
113 | if (getNumEntries() == 0 && getNumTombstones() == 0) return; |
114 | |
115 | // If the capacity of the array is huge, and the # elements used is small, |
116 | // shrink the array. |
117 | if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { |
118 | shrink_and_clear(); |
119 | return; |
120 | } |
121 | |
122 | const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); |
123 | if (std::is_trivially_destructible<ValueT>::value) { |
124 | // Use a simpler loop when values don't need destruction. |
125 | for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) |
126 | P->getFirst() = EmptyKey; |
127 | } else { |
128 | unsigned NumEntries = getNumEntries(); |
129 | for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { |
130 | if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) { |
131 | if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { |
132 | P->getSecond().~ValueT(); |
133 | --NumEntries; |
134 | } |
135 | P->getFirst() = EmptyKey; |
136 | } |
137 | } |
138 | assert(NumEntries == 0 && "Node count imbalance!")((void)0); |
139 | } |
140 | setNumEntries(0); |
141 | setNumTombstones(0); |
142 | } |
143 | |
144 | /// Return 1 if the specified key is in the map, 0 otherwise. |
145 | size_type count(const_arg_type_t<KeyT> Val) const { |
146 | const BucketT *TheBucket; |
147 | return LookupBucketFor(Val, TheBucket) ? 1 : 0; |
148 | } |
149 | |
150 | iterator find(const_arg_type_t<KeyT> Val) { |
151 | BucketT *TheBucket; |
152 | if (LookupBucketFor(Val, TheBucket)) |
153 | return makeIterator(TheBucket, |
154 | shouldReverseIterate<KeyT>() ? getBuckets() |
155 | : getBucketsEnd(), |
156 | *this, true); |
157 | return end(); |
158 | } |
159 | const_iterator find(const_arg_type_t<KeyT> Val) const { |
160 | const BucketT *TheBucket; |
161 | if (LookupBucketFor(Val, TheBucket)) |
162 | return makeConstIterator(TheBucket, |
163 | shouldReverseIterate<KeyT>() ? getBuckets() |
164 | : getBucketsEnd(), |
165 | *this, true); |
166 | return end(); |
167 | } |
168 | |
169 | /// Alternate version of find() which allows a different, and possibly |
170 | /// less expensive, key type. |
171 | /// The DenseMapInfo is responsible for supplying methods |
172 | /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key |
173 | /// type used. |
174 | template<class LookupKeyT> |
175 | iterator find_as(const LookupKeyT &Val) { |
176 | BucketT *TheBucket; |
177 | if (LookupBucketFor(Val, TheBucket)) |
178 | return makeIterator(TheBucket, |
179 | shouldReverseIterate<KeyT>() ? getBuckets() |
180 | : getBucketsEnd(), |
181 | *this, true); |
182 | return end(); |
183 | } |
184 | template<class LookupKeyT> |
185 | const_iterator find_as(const LookupKeyT &Val) const { |
186 | const BucketT *TheBucket; |
187 | if (LookupBucketFor(Val, TheBucket)) |
188 | return makeConstIterator(TheBucket, |
189 | shouldReverseIterate<KeyT>() ? getBuckets() |
190 | : getBucketsEnd(), |
191 | *this, true); |
192 | return end(); |
193 | } |
194 | |
195 | /// lookup - Return the entry for the specified key, or a default |
196 | /// constructed value if no such entry exists. |
197 | ValueT lookup(const_arg_type_t<KeyT> Val) const { |
198 | const BucketT *TheBucket; |
199 | if (LookupBucketFor(Val, TheBucket)) |
200 | return TheBucket->getSecond(); |
201 | return ValueT(); |
202 | } |
203 | |
204 | // Inserts key,value pair into the map if the key isn't already in the map. |
205 | // If the key is already in the map, it returns false and doesn't update the |
206 | // value. |
207 | std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { |
208 | return try_emplace(KV.first, KV.second); |
209 | } |
210 | |
211 | // Inserts key,value pair into the map if the key isn't already in the map. |
212 | // If the key is already in the map, it returns false and doesn't update the |
213 | // value. |
214 | std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) { |
215 | return try_emplace(std::move(KV.first), std::move(KV.second)); |
216 | } |
217 | |
218 | // Inserts key,value pair into the map if the key isn't already in the map. |
219 | // The value is constructed in-place if the key is not in the map, otherwise |
220 | // it is not moved. |
221 | template <typename... Ts> |
222 | std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) { |
223 | BucketT *TheBucket; |
224 | if (LookupBucketFor(Key, TheBucket)) |
225 | return std::make_pair(makeIterator(TheBucket, |
226 | shouldReverseIterate<KeyT>() |
227 | ? getBuckets() |
228 | : getBucketsEnd(), |
229 | *this, true), |
230 | false); // Already in map. |
231 | |
232 | // Otherwise, insert the new element. |
233 | TheBucket = |
234 | InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...); |
235 | return std::make_pair(makeIterator(TheBucket, |
236 | shouldReverseIterate<KeyT>() |
237 | ? getBuckets() |
238 | : getBucketsEnd(), |
239 | *this, true), |
240 | true); |
241 | } |
242 | |
243 | // Inserts key,value pair into the map if the key isn't already in the map. |
244 | // The value is constructed in-place if the key is not in the map, otherwise |
245 | // it is not moved. |
246 | template <typename... Ts> |
247 | std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) { |
248 | BucketT *TheBucket; |
249 | if (LookupBucketFor(Key, TheBucket)) |
250 | return std::make_pair(makeIterator(TheBucket, |
251 | shouldReverseIterate<KeyT>() |
252 | ? getBuckets() |
253 | : getBucketsEnd(), |
254 | *this, true), |
255 | false); // Already in map. |
256 | |
257 | // Otherwise, insert the new element. |
258 | TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...); |
259 | return std::make_pair(makeIterator(TheBucket, |
260 | shouldReverseIterate<KeyT>() |
261 | ? getBuckets() |
262 | : getBucketsEnd(), |
263 | *this, true), |
264 | true); |
265 | } |
266 | |
267 | /// Alternate version of insert() which allows a different, and possibly |
268 | /// less expensive, key type. |
269 | /// The DenseMapInfo is responsible for supplying methods |
270 | /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key |
271 | /// type used. |
272 | template <typename LookupKeyT> |
273 | std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV, |
274 | const LookupKeyT &Val) { |
275 | BucketT *TheBucket; |
276 | if (LookupBucketFor(Val, TheBucket)) |
277 | return std::make_pair(makeIterator(TheBucket, |
278 | shouldReverseIterate<KeyT>() |
279 | ? getBuckets() |
280 | : getBucketsEnd(), |
281 | *this, true), |
282 | false); // Already in map. |
283 | |
284 | // Otherwise, insert the new element. |
285 | TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first), |
286 | std::move(KV.second), Val); |
287 | return std::make_pair(makeIterator(TheBucket, |
288 | shouldReverseIterate<KeyT>() |
289 | ? getBuckets() |
290 | : getBucketsEnd(), |
291 | *this, true), |
292 | true); |
293 | } |
294 | |
295 | /// insert - Range insertion of pairs. |
296 | template<typename InputIt> |
297 | void insert(InputIt I, InputIt E) { |
298 | for (; I != E; ++I) |
299 | insert(*I); |
300 | } |
301 | |
302 | bool erase(const KeyT &Val) { |
303 | BucketT *TheBucket; |
304 | if (!LookupBucketFor(Val, TheBucket)) |
305 | return false; // not in map. |
306 | |
307 | TheBucket->getSecond().~ValueT(); |
308 | TheBucket->getFirst() = getTombstoneKey(); |
309 | decrementNumEntries(); |
310 | incrementNumTombstones(); |
311 | return true; |
312 | } |
313 | void erase(iterator I) { |
314 | BucketT *TheBucket = &*I; |
315 | TheBucket->getSecond().~ValueT(); |
316 | TheBucket->getFirst() = getTombstoneKey(); |
317 | decrementNumEntries(); |
318 | incrementNumTombstones(); |
319 | } |
320 | |
321 | value_type& FindAndConstruct(const KeyT &Key) { |
322 | BucketT *TheBucket; |
323 | if (LookupBucketFor(Key, TheBucket)) |
324 | return *TheBucket; |
325 | |
326 | return *InsertIntoBucket(TheBucket, Key); |
327 | } |
328 | |
329 | ValueT &operator[](const KeyT &Key) { |
330 | return FindAndConstruct(Key).second; |
331 | } |
332 | |
333 | value_type& FindAndConstruct(KeyT &&Key) { |
334 | BucketT *TheBucket; |
335 | if (LookupBucketFor(Key, TheBucket)) |
336 | return *TheBucket; |
337 | |
338 | return *InsertIntoBucket(TheBucket, std::move(Key)); |
339 | } |
340 | |
341 | ValueT &operator[](KeyT &&Key) { |
342 | return FindAndConstruct(std::move(Key)).second; |
343 | } |
344 | |
345 | /// isPointerIntoBucketsArray - Return true if the specified pointer points |
346 | /// somewhere into the DenseMap's array of buckets (i.e. either to a key or |
347 | /// value in the DenseMap). |
348 | bool isPointerIntoBucketsArray(const void *Ptr) const { |
349 | return Ptr >= getBuckets() && Ptr < getBucketsEnd(); |
350 | } |
351 | |
352 | /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets |
353 | /// array. In conjunction with the previous method, this can be used to |
354 | /// determine whether an insertion caused the DenseMap to reallocate. |
355 | const void *getPointerIntoBucketsArray() const { return getBuckets(); } |
356 | |
357 | protected: |
358 | DenseMapBase() = default; |
359 | |
360 | void destroyAll() { |
361 | if (getNumBuckets() == 0) // Nothing to do. |
362 | return; |
363 | |
364 | const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); |
365 | for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { |
366 | if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && |
367 | !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) |
368 | P->getSecond().~ValueT(); |
369 | P->getFirst().~KeyT(); |
370 | } |
371 | } |
372 | |
373 | void initEmpty() { |
374 | setNumEntries(0); |
375 | setNumTombstones(0); |
376 | |
377 | assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&((void)0) |
378 | "# initial buckets must be a power of two!")((void)0); |
379 | const KeyT EmptyKey = getEmptyKey(); |
380 | for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) |
381 | ::new (&B->getFirst()) KeyT(EmptyKey); |
382 | } |
383 | |
384 | /// Returns the number of buckets to allocate to ensure that the DenseMap can |
385 | /// accommodate \p NumEntries without need to grow(). |
386 | unsigned getMinBucketToReserveForEntries(unsigned NumEntries) { |
387 | // Ensure that "NumEntries * 4 < NumBuckets * 3" |
388 | if (NumEntries == 0) |
389 | return 0; |
390 | // +1 is required because of the strict equality. |
391 | // For example if NumEntries is 48, we need to return 401. |
392 | return NextPowerOf2(NumEntries * 4 / 3 + 1); |
393 | } |
394 | |
395 | void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { |
396 | initEmpty(); |
397 | |
398 | // Insert all the old elements. |
399 | const KeyT EmptyKey = getEmptyKey(); |
400 | const KeyT TombstoneKey = getTombstoneKey(); |
401 | for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { |
402 | if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) && |
403 | !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) { |
404 | // Insert the key/value into the new table. |
405 | BucketT *DestBucket; |
406 | bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket); |
407 | (void)FoundVal; // silence warning. |
408 | assert(!FoundVal && "Key already in new map?")((void)0); |
409 | DestBucket->getFirst() = std::move(B->getFirst()); |
410 | ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond())); |
411 | incrementNumEntries(); |
412 | |
413 | // Free the value. |
414 | B->getSecond().~ValueT(); |
415 | } |
416 | B->getFirst().~KeyT(); |
417 | } |
418 | } |
419 | |
420 | template <typename OtherBaseT> |
421 | void copyFrom( |
422 | const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) { |
423 | assert(&other != this)((void)0); |
424 | assert(getNumBuckets() == other.getNumBuckets())((void)0); |
425 | |
426 | setNumEntries(other.getNumEntries()); |
427 | setNumTombstones(other.getNumTombstones()); |
428 | |
429 | if (std::is_trivially_copyable<KeyT>::value && |
430 | std::is_trivially_copyable<ValueT>::value) |
431 | memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(), |
432 | getNumBuckets() * sizeof(BucketT)); |
433 | else |
434 | for (size_t i = 0; i < getNumBuckets(); ++i) { |
435 | ::new (&getBuckets()[i].getFirst()) |
436 | KeyT(other.getBuckets()[i].getFirst()); |
437 | if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) && |
438 | !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey())) |
439 | ::new (&getBuckets()[i].getSecond()) |
440 | ValueT(other.getBuckets()[i].getSecond()); |
441 | } |
442 | } |
443 | |
444 | static unsigned getHashValue(const KeyT &Val) { |
445 | return KeyInfoT::getHashValue(Val); |
446 | } |
447 | |
448 | template<typename LookupKeyT> |
449 | static unsigned getHashValue(const LookupKeyT &Val) { |
450 | return KeyInfoT::getHashValue(Val); |
451 | } |
452 | |
453 | static const KeyT getEmptyKey() { |
454 | static_assert(std::is_base_of<DenseMapBase, DerivedT>::value, |
455 | "Must pass the derived type to this template!"); |
456 | return KeyInfoT::getEmptyKey(); |
457 | } |
458 | |
459 | static const KeyT getTombstoneKey() { |
460 | return KeyInfoT::getTombstoneKey(); |
461 | } |
462 | |
463 | private: |
464 | iterator makeIterator(BucketT *P, BucketT *E, |
465 | DebugEpochBase &Epoch, |
466 | bool NoAdvance=false) { |
467 | if (shouldReverseIterate<KeyT>()) { |
468 | BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1; |
469 | return iterator(B, E, Epoch, NoAdvance); |
470 | } |
471 | return iterator(P, E, Epoch, NoAdvance); |
472 | } |
473 | |
474 | const_iterator makeConstIterator(const BucketT *P, const BucketT *E, |
475 | const DebugEpochBase &Epoch, |
476 | const bool NoAdvance=false) const { |
477 | if (shouldReverseIterate<KeyT>()) { |
478 | const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1; |
479 | return const_iterator(B, E, Epoch, NoAdvance); |
480 | } |
481 | return const_iterator(P, E, Epoch, NoAdvance); |
482 | } |
483 | |
484 | unsigned getNumEntries() const { |
485 | return static_cast<const DerivedT *>(this)->getNumEntries(); |
486 | } |
487 | |
488 | void setNumEntries(unsigned Num) { |
489 | static_cast<DerivedT *>(this)->setNumEntries(Num); |
490 | } |
491 | |
492 | void incrementNumEntries() { |
493 | setNumEntries(getNumEntries() + 1); |
494 | } |
495 | |
496 | void decrementNumEntries() { |
497 | setNumEntries(getNumEntries() - 1); |
498 | } |
499 | |
500 | unsigned getNumTombstones() const { |
501 | return static_cast<const DerivedT *>(this)->getNumTombstones(); |
502 | } |
503 | |
504 | void setNumTombstones(unsigned Num) { |
505 | static_cast<DerivedT *>(this)->setNumTombstones(Num); |
506 | } |
507 | |
508 | void incrementNumTombstones() { |
509 | setNumTombstones(getNumTombstones() + 1); |
510 | } |
511 | |
512 | void decrementNumTombstones() { |
513 | setNumTombstones(getNumTombstones() - 1); |
514 | } |
515 | |
516 | const BucketT *getBuckets() const { |
517 | return static_cast<const DerivedT *>(this)->getBuckets(); |
518 | } |
519 | |
520 | BucketT *getBuckets() { |
521 | return static_cast<DerivedT *>(this)->getBuckets(); |
522 | } |
523 | |
524 | unsigned getNumBuckets() const { |
525 | return static_cast<const DerivedT *>(this)->getNumBuckets(); |
526 | } |
527 | |
528 | BucketT *getBucketsEnd() { |
529 | return getBuckets() + getNumBuckets(); |
530 | } |
531 | |
532 | const BucketT *getBucketsEnd() const { |
533 | return getBuckets() + getNumBuckets(); |
534 | } |
535 | |
536 | void grow(unsigned AtLeast) { |
537 | static_cast<DerivedT *>(this)->grow(AtLeast); |
538 | } |
539 | |
540 | void shrink_and_clear() { |
541 | static_cast<DerivedT *>(this)->shrink_and_clear(); |
542 | } |
543 | |
544 | template <typename KeyArg, typename... ValueArgs> |
545 | BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key, |
546 | ValueArgs &&... Values) { |
547 | TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket); |
548 | |
549 | TheBucket->getFirst() = std::forward<KeyArg>(Key); |
550 | ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...); |
551 | return TheBucket; |
552 | } |
553 | |
554 | template <typename LookupKeyT> |
555 | BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key, |
556 | ValueT &&Value, LookupKeyT &Lookup) { |
557 | TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket); |
558 | |
559 | TheBucket->getFirst() = std::move(Key); |
560 | ::new (&TheBucket->getSecond()) ValueT(std::move(Value)); |
561 | return TheBucket; |
562 | } |
563 | |
564 | template <typename LookupKeyT> |
565 | BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup, |
566 | BucketT *TheBucket) { |
567 | incrementEpoch(); |
568 | |
569 | // If the load of the hash table is more than 3/4, or if fewer than 1/8 of |
570 | // the buckets are empty (meaning that many are filled with tombstones), |
571 | // grow the table. |
572 | // |
573 | // The later case is tricky. For example, if we had one empty bucket with |
574 | // tons of tombstones, failing lookups (e.g. for insertion) would have to |
575 | // probe almost the entire table until it found the empty bucket. If the |
576 | // table completely filled with tombstones, no lookup would ever succeed, |
577 | // causing infinite loops in lookup. |
578 | unsigned NewNumEntries = getNumEntries() + 1; |
579 | unsigned NumBuckets = getNumBuckets(); |
580 | if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)__builtin_expect((bool)(NewNumEntries * 4 >= NumBuckets * 3 ), false)) { |
581 | this->grow(NumBuckets * 2); |
582 | LookupBucketFor(Lookup, TheBucket); |
583 | NumBuckets = getNumBuckets(); |
584 | } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=__builtin_expect((bool)(NumBuckets-(NewNumEntries+getNumTombstones ()) <= NumBuckets/8), false) |
585 | NumBuckets/8)__builtin_expect((bool)(NumBuckets-(NewNumEntries+getNumTombstones ()) <= NumBuckets/8), false)) { |
586 | this->grow(NumBuckets); |
587 | LookupBucketFor(Lookup, TheBucket); |
588 | } |
589 | assert(TheBucket)((void)0); |
590 | |
591 | // Only update the state after we've grown our bucket space appropriately |
592 | // so that when growing buckets we have self-consistent entry count. |
593 | incrementNumEntries(); |
594 | |
595 | // If we are writing over a tombstone, remember this. |
596 | const KeyT EmptyKey = getEmptyKey(); |
597 | if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey)) |
598 | decrementNumTombstones(); |
599 | |
600 | return TheBucket; |
601 | } |
602 | |
603 | /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in |
604 | /// FoundBucket. If the bucket contains the key and a value, this returns |
605 | /// true, otherwise it returns a bucket with an empty marker or tombstone and |
606 | /// returns false. |
607 | template<typename LookupKeyT> |
608 | bool LookupBucketFor(const LookupKeyT &Val, |
609 | const BucketT *&FoundBucket) const { |
610 | const BucketT *BucketsPtr = getBuckets(); |
611 | const unsigned NumBuckets = getNumBuckets(); |
612 | |
613 | if (NumBuckets == 0) { |
614 | FoundBucket = nullptr; |
615 | return false; |
616 | } |
617 | |
618 | // FoundTombstone - Keep track of whether we find a tombstone while probing. |
619 | const BucketT *FoundTombstone = nullptr; |
620 | const KeyT EmptyKey = getEmptyKey(); |
621 | const KeyT TombstoneKey = getTombstoneKey(); |
622 | assert(!KeyInfoT::isEqual(Val, EmptyKey) &&((void)0) |
623 | !KeyInfoT::isEqual(Val, TombstoneKey) &&((void)0) |
624 | "Empty/Tombstone value shouldn't be inserted into map!")((void)0); |
625 | |
626 | unsigned BucketNo = getHashValue(Val) & (NumBuckets-1); |
627 | unsigned ProbeAmt = 1; |
628 | while (true) { |
629 | const BucketT *ThisBucket = BucketsPtr + BucketNo; |
630 | // Found Val's bucket? If so, return it. |
631 | if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))__builtin_expect((bool)(KeyInfoT::isEqual(Val, ThisBucket-> getFirst())), true)) { |
632 | FoundBucket = ThisBucket; |
633 | return true; |
634 | } |
635 | |
636 | // If we found an empty bucket, the key doesn't exist in the set. |
637 | // Insert it and return the default value. |
638 | if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))__builtin_expect((bool)(KeyInfoT::isEqual(ThisBucket->getFirst (), EmptyKey)), true)) { |
639 | // If we've already seen a tombstone while probing, fill it in instead |
640 | // of the empty bucket we eventually probed to. |
641 | FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; |
642 | return false; |
643 | } |
644 | |
645 | // If this is a tombstone, remember it. If Val ends up not in the map, we |
646 | // prefer to return it than something that would require more probing. |
647 | if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) && |
648 | !FoundTombstone) |
649 | FoundTombstone = ThisBucket; // Remember the first tombstone found. |
650 | |
651 | // Otherwise, it's a hash collision or a tombstone, continue quadratic |
652 | // probing. |
653 | BucketNo += ProbeAmt++; |
654 | BucketNo &= (NumBuckets-1); |
655 | } |
656 | } |
657 | |
658 | template <typename LookupKeyT> |
659 | bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { |
660 | const BucketT *ConstFoundBucket; |
661 | bool Result = const_cast<const DenseMapBase *>(this) |
662 | ->LookupBucketFor(Val, ConstFoundBucket); |
663 | FoundBucket = const_cast<BucketT *>(ConstFoundBucket); |
664 | return Result; |
665 | } |
666 | |
667 | public: |
668 | /// Return the approximate size (in bytes) of the actual map. |
669 | /// This is just the raw memory used by DenseMap. |
670 | /// If entries are pointers to objects, the size of the referenced objects |
671 | /// are not included. |
672 | size_t getMemorySize() const { |
673 | return getNumBuckets() * sizeof(BucketT); |
674 | } |
675 | }; |
676 | |
677 | /// Equality comparison for DenseMap. |
678 | /// |
679 | /// Iterates over elements of LHS confirming that each (key, value) pair in LHS |
680 | /// is also in RHS, and that no additional pairs are in RHS. |
681 | /// Equivalent to N calls to RHS.find and N value comparisons. Amortized |
682 | /// complexity is linear, worst case is O(N^2) (if every hash collides). |
683 | template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT, |
684 | typename BucketT> |
685 | bool operator==( |
686 | const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS, |
687 | const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) { |
688 | if (LHS.size() != RHS.size()) |
689 | return false; |
690 | |
691 | for (auto &KV : LHS) { |
692 | auto I = RHS.find(KV.first); |
693 | if (I == RHS.end() || I->second != KV.second) |
694 | return false; |
695 | } |
696 | |
697 | return true; |
698 | } |
699 | |
700 | /// Inequality comparison for DenseMap. |
701 | /// |
702 | /// Equivalent to !(LHS == RHS). See operator== for performance notes. |
703 | template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT, |
704 | typename BucketT> |
705 | bool operator!=( |
706 | const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS, |
707 | const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) { |
708 | return !(LHS == RHS); |
709 | } |
710 | |
711 | template <typename KeyT, typename ValueT, |
712 | typename KeyInfoT = DenseMapInfo<KeyT>, |
713 | typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>> |
714 | class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>, |
715 | KeyT, ValueT, KeyInfoT, BucketT> { |
716 | friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>; |
717 | |
718 | // Lift some types from the dependent base class into this class for |
719 | // simplicity of referring to them. |
720 | using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>; |
721 | |
722 | BucketT *Buckets; |
723 | unsigned NumEntries; |
724 | unsigned NumTombstones; |
725 | unsigned NumBuckets; |
726 | |
727 | public: |
728 | /// Create a DenseMap with an optional \p InitialReserve that guarantee that |
729 | /// this number of elements can be inserted in the map without grow() |
730 | explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); } |
731 | |
732 | DenseMap(const DenseMap &other) : BaseT() { |
733 | init(0); |
734 | copyFrom(other); |
735 | } |
736 | |
737 | DenseMap(DenseMap &&other) : BaseT() { |
738 | init(0); |
739 | swap(other); |
740 | } |
741 | |
742 | template<typename InputIt> |
743 | DenseMap(const InputIt &I, const InputIt &E) { |
744 | init(std::distance(I, E)); |
745 | this->insert(I, E); |
746 | } |
747 | |
748 | DenseMap(std::initializer_list<typename BaseT::value_type> Vals) { |
749 | init(Vals.size()); |
750 | this->insert(Vals.begin(), Vals.end()); |
751 | } |
752 | |
753 | ~DenseMap() { |
754 | this->destroyAll(); |
755 | deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); |
756 | } |
757 | |
758 | void swap(DenseMap& RHS) { |
759 | this->incrementEpoch(); |
760 | RHS.incrementEpoch(); |
761 | std::swap(Buckets, RHS.Buckets); |
762 | std::swap(NumEntries, RHS.NumEntries); |
763 | std::swap(NumTombstones, RHS.NumTombstones); |
764 | std::swap(NumBuckets, RHS.NumBuckets); |
765 | } |
766 | |
767 | DenseMap& operator=(const DenseMap& other) { |
768 | if (&other != this) |
769 | copyFrom(other); |
770 | return *this; |
771 | } |
772 | |
773 | DenseMap& operator=(DenseMap &&other) { |
774 | this->destroyAll(); |
775 | deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); |
776 | init(0); |
777 | swap(other); |
778 | return *this; |
779 | } |
780 | |
781 | void copyFrom(const DenseMap& other) { |
782 | this->destroyAll(); |
783 | deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); |
784 | if (allocateBuckets(other.NumBuckets)) { |
785 | this->BaseT::copyFrom(other); |
786 | } else { |
787 | NumEntries = 0; |
788 | NumTombstones = 0; |
789 | } |
790 | } |
791 | |
792 | void init(unsigned InitNumEntries) { |
793 | auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); |
794 | if (allocateBuckets(InitBuckets)) { |
795 | this->BaseT::initEmpty(); |
796 | } else { |
797 | NumEntries = 0; |
798 | NumTombstones = 0; |
799 | } |
800 | } |
801 | |
802 | void grow(unsigned AtLeast) { |
803 | unsigned OldNumBuckets = NumBuckets; |
804 | BucketT *OldBuckets = Buckets; |
805 | |
806 | allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1)))); |
807 | assert(Buckets)((void)0); |
808 | if (!OldBuckets) { |
809 | this->BaseT::initEmpty(); |
810 | return; |
811 | } |
812 | |
813 | this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); |
814 | |
815 | // Free the old table. |
816 | deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets, |
817 | alignof(BucketT)); |
818 | } |
819 | |
820 | void shrink_and_clear() { |
821 | unsigned OldNumBuckets = NumBuckets; |
822 | unsigned OldNumEntries = NumEntries; |
823 | this->destroyAll(); |
824 | |
825 | // Reduce the number of buckets. |
826 | unsigned NewNumBuckets = 0; |
827 | if (OldNumEntries) |
828 | NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); |
829 | if (NewNumBuckets == NumBuckets) { |
830 | this->BaseT::initEmpty(); |
831 | return; |
832 | } |
833 | |
834 | deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets, |
835 | alignof(BucketT)); |
836 | init(NewNumBuckets); |
837 | } |
838 | |
839 | private: |
840 | unsigned getNumEntries() const { |
841 | return NumEntries; |
842 | } |
843 | |
844 | void setNumEntries(unsigned Num) { |
845 | NumEntries = Num; |
846 | } |
847 | |
848 | unsigned getNumTombstones() const { |
849 | return NumTombstones; |
850 | } |
851 | |
852 | void setNumTombstones(unsigned Num) { |
853 | NumTombstones = Num; |
854 | } |
855 | |
856 | BucketT *getBuckets() const { |
857 | return Buckets; |
858 | } |
859 | |
860 | unsigned getNumBuckets() const { |
861 | return NumBuckets; |
862 | } |
863 | |
864 | bool allocateBuckets(unsigned Num) { |
865 | NumBuckets = Num; |
866 | if (NumBuckets == 0) { |
867 | Buckets = nullptr; |
868 | return false; |
869 | } |
870 | |
871 | Buckets = static_cast<BucketT *>( |
872 | allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT))); |
873 | return true; |
874 | } |
875 | }; |
876 | |
877 | template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4, |
878 | typename KeyInfoT = DenseMapInfo<KeyT>, |
879 | typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>> |
880 | class SmallDenseMap |
881 | : public DenseMapBase< |
882 | SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT, |
883 | ValueT, KeyInfoT, BucketT> { |
884 | friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>; |
885 | |
886 | // Lift some types from the dependent base class into this class for |
887 | // simplicity of referring to them. |
888 | using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>; |
889 | |
890 | static_assert(isPowerOf2_64(InlineBuckets), |
891 | "InlineBuckets must be a power of 2."); |
892 | |
893 | unsigned Small : 1; |
894 | unsigned NumEntries : 31; |
895 | unsigned NumTombstones; |
896 | |
897 | struct LargeRep { |
898 | BucketT *Buckets; |
899 | unsigned NumBuckets; |
900 | }; |
901 | |
902 | /// A "union" of an inline bucket array and the struct representing |
903 | /// a large bucket. This union will be discriminated by the 'Small' bit. |
904 | AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage; |
905 | |
906 | public: |
907 | explicit SmallDenseMap(unsigned NumInitBuckets = 0) { |
908 | init(NumInitBuckets); |
909 | } |
910 | |
911 | SmallDenseMap(const SmallDenseMap &other) : BaseT() { |
912 | init(0); |
913 | copyFrom(other); |
914 | } |
915 | |
916 | SmallDenseMap(SmallDenseMap &&other) : BaseT() { |
917 | init(0); |
918 | swap(other); |
919 | } |
920 | |
921 | template<typename InputIt> |
922 | SmallDenseMap(const InputIt &I, const InputIt &E) { |
923 | init(NextPowerOf2(std::distance(I, E))); |
924 | this->insert(I, E); |
925 | } |
926 | |
927 | SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals) |
928 | : SmallDenseMap(Vals.begin(), Vals.end()) {} |
929 | |
930 | ~SmallDenseMap() { |
931 | this->destroyAll(); |
932 | deallocateBuckets(); |
933 | } |
934 | |
935 | void swap(SmallDenseMap& RHS) { |
936 | unsigned TmpNumEntries = RHS.NumEntries; |
937 | RHS.NumEntries = NumEntries; |
938 | NumEntries = TmpNumEntries; |
939 | std::swap(NumTombstones, RHS.NumTombstones); |
940 | |
941 | const KeyT EmptyKey = this->getEmptyKey(); |
942 | const KeyT TombstoneKey = this->getTombstoneKey(); |
943 | if (Small && RHS.Small) { |
944 | // If we're swapping inline bucket arrays, we have to cope with some of |
945 | // the tricky bits of DenseMap's storage system: the buckets are not |
946 | // fully initialized. Thus we swap every key, but we may have |
947 | // a one-directional move of the value. |
948 | for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { |
949 | BucketT *LHSB = &getInlineBuckets()[i], |
950 | *RHSB = &RHS.getInlineBuckets()[i]; |
951 | bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) && |
952 | !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey)); |
953 | bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) && |
954 | !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey)); |
955 | if (hasLHSValue && hasRHSValue) { |
956 | // Swap together if we can... |
957 | std::swap(*LHSB, *RHSB); |
958 | continue; |
959 | } |
960 | // Swap separately and handle any asymmetry. |
961 | std::swap(LHSB->getFirst(), RHSB->getFirst()); |
962 | if (hasLHSValue) { |
963 | ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond())); |
964 | LHSB->getSecond().~ValueT(); |
965 | } else if (hasRHSValue) { |
966 | ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond())); |
967 | RHSB->getSecond().~ValueT(); |
968 | } |
969 | } |
970 | return; |
971 | } |
972 | if (!Small && !RHS.Small) { |
973 | std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); |
974 | std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); |
975 | return; |
976 | } |
977 | |
978 | SmallDenseMap &SmallSide = Small ? *this : RHS; |
979 | SmallDenseMap &LargeSide = Small ? RHS : *this; |
980 | |
981 | // First stash the large side's rep and move the small side across. |
982 | LargeRep TmpRep = std::move(*LargeSide.getLargeRep()); |
983 | LargeSide.getLargeRep()->~LargeRep(); |
984 | LargeSide.Small = true; |
985 | // This is similar to the standard move-from-old-buckets, but the bucket |
986 | // count hasn't actually rotated in this case. So we have to carefully |
987 | // move construct the keys and values into their new locations, but there |
988 | // is no need to re-hash things. |
989 | for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { |
990 | BucketT *NewB = &LargeSide.getInlineBuckets()[i], |
991 | *OldB = &SmallSide.getInlineBuckets()[i]; |
992 | ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst())); |
993 | OldB->getFirst().~KeyT(); |
994 | if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) && |
995 | !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) { |
996 | ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond())); |
997 | OldB->getSecond().~ValueT(); |
998 | } |
999 | } |
1000 | |
1001 | // The hard part of moving the small buckets across is done, just move |
1002 | // the TmpRep into its new home. |
1003 | SmallSide.Small = false; |
1004 | new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep)); |
1005 | } |
1006 | |
1007 | SmallDenseMap& operator=(const SmallDenseMap& other) { |
1008 | if (&other != this) |
1009 | copyFrom(other); |
1010 | return *this; |
1011 | } |
1012 | |
1013 | SmallDenseMap& operator=(SmallDenseMap &&other) { |
1014 | this->destroyAll(); |
1015 | deallocateBuckets(); |
1016 | init(0); |
1017 | swap(other); |
1018 | return *this; |
1019 | } |
1020 | |
1021 | void copyFrom(const SmallDenseMap& other) { |
1022 | this->destroyAll(); |
1023 | deallocateBuckets(); |
1024 | Small = true; |
1025 | if (other.getNumBuckets() > InlineBuckets) { |
1026 | Small = false; |
1027 | new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets())); |
1028 | } |
1029 | this->BaseT::copyFrom(other); |
1030 | } |
1031 | |
1032 | void init(unsigned InitBuckets) { |
1033 | Small = true; |
1034 | if (InitBuckets > InlineBuckets) { |
1035 | Small = false; |
1036 | new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); |
1037 | } |
1038 | this->BaseT::initEmpty(); |
1039 | } |
1040 | |
1041 | void grow(unsigned AtLeast) { |
1042 | if (AtLeast > InlineBuckets) |
1043 | AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1)); |
1044 | |
1045 | if (Small) { |
1046 | // First move the inline buckets into a temporary storage. |
1047 | AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage; |
1048 | BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage); |
1049 | BucketT *TmpEnd = TmpBegin; |
1050 | |
1051 | // Loop over the buckets, moving non-empty, non-tombstones into the |
1052 | // temporary storage. Have the loop move the TmpEnd forward as it goes. |
1053 | const KeyT EmptyKey = this->getEmptyKey(); |
1054 | const KeyT TombstoneKey = this->getTombstoneKey(); |
1055 | for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { |
1056 | if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && |
1057 | !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { |
1058 | assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&((void)0) |
1059 | "Too many inline buckets!")((void)0); |
1060 | ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst())); |
1061 | ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond())); |
1062 | ++TmpEnd; |
1063 | P->getSecond().~ValueT(); |
1064 | } |
1065 | P->getFirst().~KeyT(); |
1066 | } |
1067 | |
1068 | // AtLeast == InlineBuckets can happen if there are many tombstones, |
1069 | // and grow() is used to remove them. Usually we always switch to the |
1070 | // large rep here. |
1071 | if (AtLeast > InlineBuckets) { |
1072 | Small = false; |
1073 | new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); |
1074 | } |
1075 | this->moveFromOldBuckets(TmpBegin, TmpEnd); |
1076 | return; |
1077 | } |
1078 | |
1079 | LargeRep OldRep = std::move(*getLargeRep()); |
1080 | getLargeRep()->~LargeRep(); |
1081 | if (AtLeast <= InlineBuckets) { |
1082 | Small = true; |
1083 | } else { |
1084 | new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); |
1085 | } |
1086 | |
1087 | this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); |
1088 | |
1089 | // Free the old table. |
1090 | deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets, |
1091 | alignof(BucketT)); |
1092 | } |
1093 | |
1094 | void shrink_and_clear() { |
1095 | unsigned OldSize = this->size(); |
1096 | this->destroyAll(); |
1097 | |
1098 | // Reduce the number of buckets. |
1099 | unsigned NewNumBuckets = 0; |
1100 | if (OldSize) { |
1101 | NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); |
1102 | if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) |
1103 | NewNumBuckets = 64; |
1104 | } |
1105 | if ((Small && NewNumBuckets <= InlineBuckets) || |
1106 | (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { |
1107 | this->BaseT::initEmpty(); |
1108 | return; |
1109 | } |
1110 | |
1111 | deallocateBuckets(); |
1112 | init(NewNumBuckets); |
1113 | } |
1114 | |
1115 | private: |
1116 | unsigned getNumEntries() const { |
1117 | return NumEntries; |
1118 | } |
1119 | |
1120 | void setNumEntries(unsigned Num) { |
1121 | // NumEntries is hardcoded to be 31 bits wide. |
1122 | assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries")((void)0); |
1123 | NumEntries = Num; |
1124 | } |
1125 | |
1126 | unsigned getNumTombstones() const { |
1127 | return NumTombstones; |
1128 | } |
1129 | |
1130 | void setNumTombstones(unsigned Num) { |
1131 | NumTombstones = Num; |
1132 | } |
1133 | |
1134 | const BucketT *getInlineBuckets() const { |
1135 | assert(Small)((void)0); |
1136 | // Note that this cast does not violate aliasing rules as we assert that |
1137 | // the memory's dynamic type is the small, inline bucket buffer, and the |
1138 | // 'storage' is a POD containing a char buffer. |
1139 | return reinterpret_cast<const BucketT *>(&storage); |
1140 | } |
1141 | |
1142 | BucketT *getInlineBuckets() { |
1143 | return const_cast<BucketT *>( |
1144 | const_cast<const SmallDenseMap *>(this)->getInlineBuckets()); |
1145 | } |
1146 | |
1147 | const LargeRep *getLargeRep() const { |
1148 | assert(!Small)((void)0); |
1149 | // Note, same rule about aliasing as with getInlineBuckets. |
1150 | return reinterpret_cast<const LargeRep *>(&storage); |
1151 | } |
1152 | |
1153 | LargeRep *getLargeRep() { |
1154 | return const_cast<LargeRep *>( |
1155 | const_cast<const SmallDenseMap *>(this)->getLargeRep()); |
1156 | } |
1157 | |
1158 | const BucketT *getBuckets() const { |
1159 | return Small ? getInlineBuckets() : getLargeRep()->Buckets; |
1160 | } |
1161 | |
1162 | BucketT *getBuckets() { |
1163 | return const_cast<BucketT *>( |
1164 | const_cast<const SmallDenseMap *>(this)->getBuckets()); |
1165 | } |
1166 | |
1167 | unsigned getNumBuckets() const { |
1168 | return Small ? InlineBuckets : getLargeRep()->NumBuckets; |
1169 | } |
1170 | |
1171 | void deallocateBuckets() { |
1172 | if (Small) |
1173 | return; |
1174 | |
1175 | deallocate_buffer(getLargeRep()->Buckets, |
1176 | sizeof(BucketT) * getLargeRep()->NumBuckets, |
1177 | alignof(BucketT)); |
1178 | getLargeRep()->~LargeRep(); |
1179 | } |
1180 | |
1181 | LargeRep allocateBuckets(unsigned Num) { |
1182 | assert(Num > InlineBuckets && "Must allocate more buckets than are inline")((void)0); |
1183 | LargeRep Rep = {static_cast<BucketT *>(allocate_buffer( |
1184 | sizeof(BucketT) * Num, alignof(BucketT))), |
1185 | Num}; |
1186 | return Rep; |
1187 | } |
1188 | }; |
1189 | |
1190 | template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket, |
1191 | bool IsConst> |
1192 | class DenseMapIterator : DebugEpochBase::HandleBase { |
1193 | friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>; |
1194 | friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>; |
1195 | |
1196 | public: |
1197 | using difference_type = ptrdiff_t; |
1198 | using value_type = |
1199 | typename std::conditional<IsConst, const Bucket, Bucket>::type; |
1200 | using pointer = value_type *; |
1201 | using reference = value_type &; |
1202 | using iterator_category = std::forward_iterator_tag; |
1203 | |
1204 | private: |
1205 | pointer Ptr = nullptr; |
1206 | pointer End = nullptr; |
1207 | |
1208 | public: |
1209 | DenseMapIterator() = default; |
1210 | |
1211 | DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, |
1212 | bool NoAdvance = false) |
1213 | : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) { |
1214 | assert(isHandleInSync() && "invalid construction!")((void)0); |
1215 | |
1216 | if (NoAdvance) return; |
1217 | if (shouldReverseIterate<KeyT>()) { |
1218 | RetreatPastEmptyBuckets(); |
1219 | return; |
1220 | } |
1221 | AdvancePastEmptyBuckets(); |
1222 | } |
1223 | |
1224 | // Converting ctor from non-const iterators to const iterators. SFINAE'd out |
1225 | // for const iterator destinations so it doesn't end up as a user defined copy |
1226 | // constructor. |
1227 | template <bool IsConstSrc, |
1228 | typename = std::enable_if_t<!IsConstSrc && IsConst>> |
1229 | DenseMapIterator( |
1230 | const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I) |
1231 | : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {} |
1232 | |
1233 | reference operator*() const { |
1234 | assert(isHandleInSync() && "invalid iterator access!")((void)0); |
1235 | assert(Ptr != End && "dereferencing end() iterator")((void)0); |
1236 | if (shouldReverseIterate<KeyT>()) |
1237 | return Ptr[-1]; |
1238 | return *Ptr; |
1239 | } |
1240 | pointer operator->() const { |
1241 | assert(isHandleInSync() && "invalid iterator access!")((void)0); |
1242 | assert(Ptr != End && "dereferencing end() iterator")((void)0); |
1243 | if (shouldReverseIterate<KeyT>()) |
1244 | return &(Ptr[-1]); |
1245 | return Ptr; |
1246 | } |
1247 | |
1248 | friend bool operator==(const DenseMapIterator &LHS, |
1249 | const DenseMapIterator &RHS) { |
1250 | assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!")((void)0); |
1251 | assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!")((void)0); |
1252 | assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&((void)0) |
1253 | "comparing incomparable iterators!")((void)0); |
1254 | return LHS.Ptr == RHS.Ptr; |
1255 | } |
1256 | |
1257 | friend bool operator!=(const DenseMapIterator &LHS, |
1258 | const DenseMapIterator &RHS) { |
1259 | return !(LHS == RHS); |
1260 | } |
1261 | |
1262 | inline DenseMapIterator& operator++() { // Preincrement |
1263 | assert(isHandleInSync() && "invalid iterator access!")((void)0); |
1264 | assert(Ptr != End && "incrementing end() iterator")((void)0); |
1265 | if (shouldReverseIterate<KeyT>()) { |
1266 | --Ptr; |
1267 | RetreatPastEmptyBuckets(); |
1268 | return *this; |
1269 | } |
1270 | ++Ptr; |
1271 | AdvancePastEmptyBuckets(); |
1272 | return *this; |
1273 | } |
1274 | DenseMapIterator operator++(int) { // Postincrement |
1275 | assert(isHandleInSync() && "invalid iterator access!")((void)0); |
1276 | DenseMapIterator tmp = *this; ++*this; return tmp; |
1277 | } |
1278 | |
1279 | private: |
1280 | void AdvancePastEmptyBuckets() { |
1281 | assert(Ptr <= End)((void)0); |
1282 | const KeyT Empty = KeyInfoT::getEmptyKey(); |
1283 | const KeyT Tombstone = KeyInfoT::getTombstoneKey(); |
1284 | |
1285 | while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) || |
1286 | KeyInfoT::isEqual(Ptr->getFirst(), Tombstone))) |
1287 | ++Ptr; |
1288 | } |
1289 | |
1290 | void RetreatPastEmptyBuckets() { |
1291 | assert(Ptr >= End)((void)0); |
1292 | const KeyT Empty = KeyInfoT::getEmptyKey(); |
1293 | const KeyT Tombstone = KeyInfoT::getTombstoneKey(); |
1294 | |
1295 | while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) || |
1296 | KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone))) |
1297 | --Ptr; |
1298 | } |
1299 | }; |
1300 | |
1301 | template <typename KeyT, typename ValueT, typename KeyInfoT> |
1302 | inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) { |
1303 | return X.getMemorySize(); |
1304 | } |
1305 | |
1306 | } // end namespace llvm |
1307 | |
1308 | #endif // LLVM_ADT_DENSEMAP_H |