File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp |
Warning: | line 5600, column 15 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This transformation analyzes and transforms the induction variables (and | |||
10 | // computations derived from them) into forms suitable for efficient execution | |||
11 | // on the target. | |||
12 | // | |||
13 | // This pass performs a strength reduction on array references inside loops that | |||
14 | // have as one or more of their components the loop induction variable, it | |||
15 | // rewrites expressions to take advantage of scaled-index addressing modes | |||
16 | // available on the target, and it performs a variety of other optimizations | |||
17 | // related to loop induction variables. | |||
18 | // | |||
19 | // Terminology note: this code has a lot of handling for "post-increment" or | |||
20 | // "post-inc" users. This is not talking about post-increment addressing modes; | |||
21 | // it is instead talking about code like this: | |||
22 | // | |||
23 | // %i = phi [ 0, %entry ], [ %i.next, %latch ] | |||
24 | // ... | |||
25 | // %i.next = add %i, 1 | |||
26 | // %c = icmp eq %i.next, %n | |||
27 | // | |||
28 | // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however | |||
29 | // it's useful to think about these as the same register, with some uses using | |||
30 | // the value of the register before the add and some using it after. In this | |||
31 | // example, the icmp is a post-increment user, since it uses %i.next, which is | |||
32 | // the value of the induction variable after the increment. The other common | |||
33 | // case of post-increment users is users outside the loop. | |||
34 | // | |||
35 | // TODO: More sophistication in the way Formulae are generated and filtered. | |||
36 | // | |||
37 | // TODO: Handle multiple loops at a time. | |||
38 | // | |||
39 | // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead | |||
40 | // of a GlobalValue? | |||
41 | // | |||
42 | // TODO: When truncation is free, truncate ICmp users' operands to make it a | |||
43 | // smaller encoding (on x86 at least). | |||
44 | // | |||
45 | // TODO: When a negated register is used by an add (such as in a list of | |||
46 | // multiple base registers, or as the increment expression in an addrec), | |||
47 | // we may not actually need both reg and (-1 * reg) in registers; the | |||
48 | // negation can be implemented by using a sub instead of an add. The | |||
49 | // lack of support for taking this into consideration when making | |||
50 | // register pressure decisions is partly worked around by the "Special" | |||
51 | // use kind. | |||
52 | // | |||
53 | //===----------------------------------------------------------------------===// | |||
54 | ||||
55 | #include "llvm/Transforms/Scalar/LoopStrengthReduce.h" | |||
56 | #include "llvm/ADT/APInt.h" | |||
57 | #include "llvm/ADT/DenseMap.h" | |||
58 | #include "llvm/ADT/DenseSet.h" | |||
59 | #include "llvm/ADT/Hashing.h" | |||
60 | #include "llvm/ADT/PointerIntPair.h" | |||
61 | #include "llvm/ADT/STLExtras.h" | |||
62 | #include "llvm/ADT/SetVector.h" | |||
63 | #include "llvm/ADT/SmallBitVector.h" | |||
64 | #include "llvm/ADT/SmallPtrSet.h" | |||
65 | #include "llvm/ADT/SmallSet.h" | |||
66 | #include "llvm/ADT/SmallVector.h" | |||
67 | #include "llvm/ADT/iterator_range.h" | |||
68 | #include "llvm/Analysis/AssumptionCache.h" | |||
69 | #include "llvm/Analysis/IVUsers.h" | |||
70 | #include "llvm/Analysis/LoopAnalysisManager.h" | |||
71 | #include "llvm/Analysis/LoopInfo.h" | |||
72 | #include "llvm/Analysis/LoopPass.h" | |||
73 | #include "llvm/Analysis/MemorySSA.h" | |||
74 | #include "llvm/Analysis/MemorySSAUpdater.h" | |||
75 | #include "llvm/Analysis/ScalarEvolution.h" | |||
76 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | |||
77 | #include "llvm/Analysis/ScalarEvolutionNormalization.h" | |||
78 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
79 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
80 | #include "llvm/Analysis/ValueTracking.h" | |||
81 | #include "llvm/Config/llvm-config.h" | |||
82 | #include "llvm/IR/BasicBlock.h" | |||
83 | #include "llvm/IR/Constant.h" | |||
84 | #include "llvm/IR/Constants.h" | |||
85 | #include "llvm/IR/DebugInfoMetadata.h" | |||
86 | #include "llvm/IR/DerivedTypes.h" | |||
87 | #include "llvm/IR/Dominators.h" | |||
88 | #include "llvm/IR/GlobalValue.h" | |||
89 | #include "llvm/IR/IRBuilder.h" | |||
90 | #include "llvm/IR/InstrTypes.h" | |||
91 | #include "llvm/IR/Instruction.h" | |||
92 | #include "llvm/IR/Instructions.h" | |||
93 | #include "llvm/IR/IntrinsicInst.h" | |||
94 | #include "llvm/IR/Intrinsics.h" | |||
95 | #include "llvm/IR/Module.h" | |||
96 | #include "llvm/IR/OperandTraits.h" | |||
97 | #include "llvm/IR/Operator.h" | |||
98 | #include "llvm/IR/PassManager.h" | |||
99 | #include "llvm/IR/Type.h" | |||
100 | #include "llvm/IR/Use.h" | |||
101 | #include "llvm/IR/User.h" | |||
102 | #include "llvm/IR/Value.h" | |||
103 | #include "llvm/IR/ValueHandle.h" | |||
104 | #include "llvm/InitializePasses.h" | |||
105 | #include "llvm/Pass.h" | |||
106 | #include "llvm/Support/Casting.h" | |||
107 | #include "llvm/Support/CommandLine.h" | |||
108 | #include "llvm/Support/Compiler.h" | |||
109 | #include "llvm/Support/Debug.h" | |||
110 | #include "llvm/Support/ErrorHandling.h" | |||
111 | #include "llvm/Support/MathExtras.h" | |||
112 | #include "llvm/Support/raw_ostream.h" | |||
113 | #include "llvm/Transforms/Scalar.h" | |||
114 | #include "llvm/Transforms/Utils.h" | |||
115 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
116 | #include "llvm/Transforms/Utils/Local.h" | |||
117 | #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" | |||
118 | #include <algorithm> | |||
119 | #include <cassert> | |||
120 | #include <cstddef> | |||
121 | #include <cstdint> | |||
122 | #include <cstdlib> | |||
123 | #include <iterator> | |||
124 | #include <limits> | |||
125 | #include <map> | |||
126 | #include <numeric> | |||
127 | #include <utility> | |||
128 | ||||
129 | using namespace llvm; | |||
130 | ||||
131 | #define DEBUG_TYPE"loop-reduce" "loop-reduce" | |||
132 | ||||
133 | /// MaxIVUsers is an arbitrary threshold that provides an early opportunity for | |||
134 | /// bail out. This threshold is far beyond the number of users that LSR can | |||
135 | /// conceivably solve, so it should not affect generated code, but catches the | |||
136 | /// worst cases before LSR burns too much compile time and stack space. | |||
137 | static const unsigned MaxIVUsers = 200; | |||
138 | ||||
139 | // Temporary flag to cleanup congruent phis after LSR phi expansion. | |||
140 | // It's currently disabled until we can determine whether it's truly useful or | |||
141 | // not. The flag should be removed after the v3.0 release. | |||
142 | // This is now needed for ivchains. | |||
143 | static cl::opt<bool> EnablePhiElim( | |||
144 | "enable-lsr-phielim", cl::Hidden, cl::init(true), | |||
145 | cl::desc("Enable LSR phi elimination")); | |||
146 | ||||
147 | // The flag adds instruction count to solutions cost comparision. | |||
148 | static cl::opt<bool> InsnsCost( | |||
149 | "lsr-insns-cost", cl::Hidden, cl::init(true), | |||
150 | cl::desc("Add instruction count to a LSR cost model")); | |||
151 | ||||
152 | // Flag to choose how to narrow complex lsr solution | |||
153 | static cl::opt<bool> LSRExpNarrow( | |||
154 | "lsr-exp-narrow", cl::Hidden, cl::init(false), | |||
155 | cl::desc("Narrow LSR complex solution using" | |||
156 | " expectation of registers number")); | |||
157 | ||||
158 | // Flag to narrow search space by filtering non-optimal formulae with | |||
159 | // the same ScaledReg and Scale. | |||
160 | static cl::opt<bool> FilterSameScaledReg( | |||
161 | "lsr-filter-same-scaled-reg", cl::Hidden, cl::init(true), | |||
162 | cl::desc("Narrow LSR search space by filtering non-optimal formulae" | |||
163 | " with the same ScaledReg and Scale")); | |||
164 | ||||
165 | static cl::opt<TTI::AddressingModeKind> PreferredAddresingMode( | |||
166 | "lsr-preferred-addressing-mode", cl::Hidden, cl::init(TTI::AMK_None), | |||
167 | cl::desc("A flag that overrides the target's preferred addressing mode."), | |||
168 | cl::values(clEnumValN(TTI::AMK_None,llvm::cl::OptionEnumValue { "none", int(TTI::AMK_None), "Don't prefer any addressing mode" } | |||
169 | "none",llvm::cl::OptionEnumValue { "none", int(TTI::AMK_None), "Don't prefer any addressing mode" } | |||
170 | "Don't prefer any addressing mode")llvm::cl::OptionEnumValue { "none", int(TTI::AMK_None), "Don't prefer any addressing mode" }, | |||
171 | clEnumValN(TTI::AMK_PreIndexed,llvm::cl::OptionEnumValue { "preindexed", int(TTI::AMK_PreIndexed ), "Prefer pre-indexed addressing mode" } | |||
172 | "preindexed",llvm::cl::OptionEnumValue { "preindexed", int(TTI::AMK_PreIndexed ), "Prefer pre-indexed addressing mode" } | |||
173 | "Prefer pre-indexed addressing mode")llvm::cl::OptionEnumValue { "preindexed", int(TTI::AMK_PreIndexed ), "Prefer pre-indexed addressing mode" }, | |||
174 | clEnumValN(TTI::AMK_PostIndexed,llvm::cl::OptionEnumValue { "postindexed", int(TTI::AMK_PostIndexed ), "Prefer post-indexed addressing mode" } | |||
175 | "postindexed",llvm::cl::OptionEnumValue { "postindexed", int(TTI::AMK_PostIndexed ), "Prefer post-indexed addressing mode" } | |||
176 | "Prefer post-indexed addressing mode")llvm::cl::OptionEnumValue { "postindexed", int(TTI::AMK_PostIndexed ), "Prefer post-indexed addressing mode" })); | |||
177 | ||||
178 | static cl::opt<unsigned> ComplexityLimit( | |||
179 | "lsr-complexity-limit", cl::Hidden, | |||
180 | cl::init(std::numeric_limits<uint16_t>::max()), | |||
181 | cl::desc("LSR search space complexity limit")); | |||
182 | ||||
183 | static cl::opt<unsigned> SetupCostDepthLimit( | |||
184 | "lsr-setupcost-depth-limit", cl::Hidden, cl::init(7), | |||
185 | cl::desc("The limit on recursion depth for LSRs setup cost")); | |||
186 | ||||
187 | #ifndef NDEBUG1 | |||
188 | // Stress test IV chain generation. | |||
189 | static cl::opt<bool> StressIVChain( | |||
190 | "stress-ivchain", cl::Hidden, cl::init(false), | |||
191 | cl::desc("Stress test LSR IV chains")); | |||
192 | #else | |||
193 | static bool StressIVChain = false; | |||
194 | #endif | |||
195 | ||||
196 | namespace { | |||
197 | ||||
198 | struct MemAccessTy { | |||
199 | /// Used in situations where the accessed memory type is unknown. | |||
200 | static const unsigned UnknownAddressSpace = | |||
201 | std::numeric_limits<unsigned>::max(); | |||
202 | ||||
203 | Type *MemTy = nullptr; | |||
204 | unsigned AddrSpace = UnknownAddressSpace; | |||
205 | ||||
206 | MemAccessTy() = default; | |||
207 | MemAccessTy(Type *Ty, unsigned AS) : MemTy(Ty), AddrSpace(AS) {} | |||
208 | ||||
209 | bool operator==(MemAccessTy Other) const { | |||
210 | return MemTy == Other.MemTy && AddrSpace == Other.AddrSpace; | |||
211 | } | |||
212 | ||||
213 | bool operator!=(MemAccessTy Other) const { return !(*this == Other); } | |||
214 | ||||
215 | static MemAccessTy getUnknown(LLVMContext &Ctx, | |||
216 | unsigned AS = UnknownAddressSpace) { | |||
217 | return MemAccessTy(Type::getVoidTy(Ctx), AS); | |||
218 | } | |||
219 | ||||
220 | Type *getType() { return MemTy; } | |||
221 | }; | |||
222 | ||||
223 | /// This class holds data which is used to order reuse candidates. | |||
224 | class RegSortData { | |||
225 | public: | |||
226 | /// This represents the set of LSRUse indices which reference | |||
227 | /// a particular register. | |||
228 | SmallBitVector UsedByIndices; | |||
229 | ||||
230 | void print(raw_ostream &OS) const; | |||
231 | void dump() const; | |||
232 | }; | |||
233 | ||||
234 | } // end anonymous namespace | |||
235 | ||||
236 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
237 | void RegSortData::print(raw_ostream &OS) const { | |||
238 | OS << "[NumUses=" << UsedByIndices.count() << ']'; | |||
239 | } | |||
240 | ||||
241 | LLVM_DUMP_METHOD__attribute__((noinline)) void RegSortData::dump() const { | |||
242 | print(errs()); errs() << '\n'; | |||
243 | } | |||
244 | #endif | |||
245 | ||||
246 | namespace { | |||
247 | ||||
248 | /// Map register candidates to information about how they are used. | |||
249 | class RegUseTracker { | |||
250 | using RegUsesTy = DenseMap<const SCEV *, RegSortData>; | |||
251 | ||||
252 | RegUsesTy RegUsesMap; | |||
253 | SmallVector<const SCEV *, 16> RegSequence; | |||
254 | ||||
255 | public: | |||
256 | void countRegister(const SCEV *Reg, size_t LUIdx); | |||
257 | void dropRegister(const SCEV *Reg, size_t LUIdx); | |||
258 | void swapAndDropUse(size_t LUIdx, size_t LastLUIdx); | |||
259 | ||||
260 | bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; | |||
261 | ||||
262 | const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; | |||
263 | ||||
264 | void clear(); | |||
265 | ||||
266 | using iterator = SmallVectorImpl<const SCEV *>::iterator; | |||
267 | using const_iterator = SmallVectorImpl<const SCEV *>::const_iterator; | |||
268 | ||||
269 | iterator begin() { return RegSequence.begin(); } | |||
270 | iterator end() { return RegSequence.end(); } | |||
271 | const_iterator begin() const { return RegSequence.begin(); } | |||
272 | const_iterator end() const { return RegSequence.end(); } | |||
273 | }; | |||
274 | ||||
275 | } // end anonymous namespace | |||
276 | ||||
277 | void | |||
278 | RegUseTracker::countRegister(const SCEV *Reg, size_t LUIdx) { | |||
279 | std::pair<RegUsesTy::iterator, bool> Pair = | |||
280 | RegUsesMap.insert(std::make_pair(Reg, RegSortData())); | |||
281 | RegSortData &RSD = Pair.first->second; | |||
282 | if (Pair.second) | |||
283 | RegSequence.push_back(Reg); | |||
284 | RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); | |||
285 | RSD.UsedByIndices.set(LUIdx); | |||
286 | } | |||
287 | ||||
288 | void | |||
289 | RegUseTracker::dropRegister(const SCEV *Reg, size_t LUIdx) { | |||
290 | RegUsesTy::iterator It = RegUsesMap.find(Reg); | |||
291 | assert(It != RegUsesMap.end())((void)0); | |||
292 | RegSortData &RSD = It->second; | |||
293 | assert(RSD.UsedByIndices.size() > LUIdx)((void)0); | |||
294 | RSD.UsedByIndices.reset(LUIdx); | |||
295 | } | |||
296 | ||||
297 | void | |||
298 | RegUseTracker::swapAndDropUse(size_t LUIdx, size_t LastLUIdx) { | |||
299 | assert(LUIdx <= LastLUIdx)((void)0); | |||
300 | ||||
301 | // Update RegUses. The data structure is not optimized for this purpose; | |||
302 | // we must iterate through it and update each of the bit vectors. | |||
303 | for (auto &Pair : RegUsesMap) { | |||
304 | SmallBitVector &UsedByIndices = Pair.second.UsedByIndices; | |||
305 | if (LUIdx < UsedByIndices.size()) | |||
306 | UsedByIndices[LUIdx] = | |||
307 | LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : false; | |||
308 | UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx)); | |||
309 | } | |||
310 | } | |||
311 | ||||
312 | bool | |||
313 | RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { | |||
314 | RegUsesTy::const_iterator I = RegUsesMap.find(Reg); | |||
315 | if (I == RegUsesMap.end()) | |||
316 | return false; | |||
317 | const SmallBitVector &UsedByIndices = I->second.UsedByIndices; | |||
318 | int i = UsedByIndices.find_first(); | |||
319 | if (i == -1) return false; | |||
320 | if ((size_t)i != LUIdx) return true; | |||
321 | return UsedByIndices.find_next(i) != -1; | |||
322 | } | |||
323 | ||||
324 | const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { | |||
325 | RegUsesTy::const_iterator I = RegUsesMap.find(Reg); | |||
326 | assert(I != RegUsesMap.end() && "Unknown register!")((void)0); | |||
327 | return I->second.UsedByIndices; | |||
328 | } | |||
329 | ||||
330 | void RegUseTracker::clear() { | |||
331 | RegUsesMap.clear(); | |||
332 | RegSequence.clear(); | |||
333 | } | |||
334 | ||||
335 | namespace { | |||
336 | ||||
337 | /// This class holds information that describes a formula for computing | |||
338 | /// satisfying a use. It may include broken-out immediates and scaled registers. | |||
339 | struct Formula { | |||
340 | /// Global base address used for complex addressing. | |||
341 | GlobalValue *BaseGV = nullptr; | |||
342 | ||||
343 | /// Base offset for complex addressing. | |||
344 | int64_t BaseOffset = 0; | |||
345 | ||||
346 | /// Whether any complex addressing has a base register. | |||
347 | bool HasBaseReg = false; | |||
348 | ||||
349 | /// The scale of any complex addressing. | |||
350 | int64_t Scale = 0; | |||
351 | ||||
352 | /// The list of "base" registers for this use. When this is non-empty. The | |||
353 | /// canonical representation of a formula is | |||
354 | /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and | |||
355 | /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty(). | |||
356 | /// 3. The reg containing recurrent expr related with currect loop in the | |||
357 | /// formula should be put in the ScaledReg. | |||
358 | /// #1 enforces that the scaled register is always used when at least two | |||
359 | /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2. | |||
360 | /// #2 enforces that 1 * reg is reg. | |||
361 | /// #3 ensures invariant regs with respect to current loop can be combined | |||
362 | /// together in LSR codegen. | |||
363 | /// This invariant can be temporarily broken while building a formula. | |||
364 | /// However, every formula inserted into the LSRInstance must be in canonical | |||
365 | /// form. | |||
366 | SmallVector<const SCEV *, 4> BaseRegs; | |||
367 | ||||
368 | /// The 'scaled' register for this use. This should be non-null when Scale is | |||
369 | /// not zero. | |||
370 | const SCEV *ScaledReg = nullptr; | |||
371 | ||||
372 | /// An additional constant offset which added near the use. This requires a | |||
373 | /// temporary register, but the offset itself can live in an add immediate | |||
374 | /// field rather than a register. | |||
375 | int64_t UnfoldedOffset = 0; | |||
376 | ||||
377 | Formula() = default; | |||
378 | ||||
379 | void initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); | |||
380 | ||||
381 | bool isCanonical(const Loop &L) const; | |||
382 | ||||
383 | void canonicalize(const Loop &L); | |||
384 | ||||
385 | bool unscale(); | |||
386 | ||||
387 | bool hasZeroEnd() const; | |||
388 | ||||
389 | size_t getNumRegs() const; | |||
390 | Type *getType() const; | |||
391 | ||||
392 | void deleteBaseReg(const SCEV *&S); | |||
393 | ||||
394 | bool referencesReg(const SCEV *S) const; | |||
395 | bool hasRegsUsedByUsesOtherThan(size_t LUIdx, | |||
396 | const RegUseTracker &RegUses) const; | |||
397 | ||||
398 | void print(raw_ostream &OS) const; | |||
399 | void dump() const; | |||
400 | }; | |||
401 | ||||
402 | } // end anonymous namespace | |||
403 | ||||
404 | /// Recursion helper for initialMatch. | |||
405 | static void DoInitialMatch(const SCEV *S, Loop *L, | |||
406 | SmallVectorImpl<const SCEV *> &Good, | |||
407 | SmallVectorImpl<const SCEV *> &Bad, | |||
408 | ScalarEvolution &SE) { | |||
409 | // Collect expressions which properly dominate the loop header. | |||
410 | if (SE.properlyDominates(S, L->getHeader())) { | |||
411 | Good.push_back(S); | |||
412 | return; | |||
413 | } | |||
414 | ||||
415 | // Look at add operands. | |||
416 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { | |||
417 | for (const SCEV *S : Add->operands()) | |||
418 | DoInitialMatch(S, L, Good, Bad, SE); | |||
419 | return; | |||
420 | } | |||
421 | ||||
422 | // Look at addrec operands. | |||
423 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) | |||
424 | if (!AR->getStart()->isZero() && AR->isAffine()) { | |||
425 | DoInitialMatch(AR->getStart(), L, Good, Bad, SE); | |||
426 | DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), | |||
427 | AR->getStepRecurrence(SE), | |||
428 | // FIXME: AR->getNoWrapFlags() | |||
429 | AR->getLoop(), SCEV::FlagAnyWrap), | |||
430 | L, Good, Bad, SE); | |||
431 | return; | |||
432 | } | |||
433 | ||||
434 | // Handle a multiplication by -1 (negation) if it didn't fold. | |||
435 | if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) | |||
436 | if (Mul->getOperand(0)->isAllOnesValue()) { | |||
437 | SmallVector<const SCEV *, 4> Ops(drop_begin(Mul->operands())); | |||
438 | const SCEV *NewMul = SE.getMulExpr(Ops); | |||
439 | ||||
440 | SmallVector<const SCEV *, 4> MyGood; | |||
441 | SmallVector<const SCEV *, 4> MyBad; | |||
442 | DoInitialMatch(NewMul, L, MyGood, MyBad, SE); | |||
443 | const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( | |||
444 | SE.getEffectiveSCEVType(NewMul->getType()))); | |||
445 | for (const SCEV *S : MyGood) | |||
446 | Good.push_back(SE.getMulExpr(NegOne, S)); | |||
447 | for (const SCEV *S : MyBad) | |||
448 | Bad.push_back(SE.getMulExpr(NegOne, S)); | |||
449 | return; | |||
450 | } | |||
451 | ||||
452 | // Ok, we can't do anything interesting. Just stuff the whole thing into a | |||
453 | // register and hope for the best. | |||
454 | Bad.push_back(S); | |||
455 | } | |||
456 | ||||
457 | /// Incorporate loop-variant parts of S into this Formula, attempting to keep | |||
458 | /// all loop-invariant and loop-computable values in a single base register. | |||
459 | void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { | |||
460 | SmallVector<const SCEV *, 4> Good; | |||
461 | SmallVector<const SCEV *, 4> Bad; | |||
462 | DoInitialMatch(S, L, Good, Bad, SE); | |||
463 | if (!Good.empty()) { | |||
464 | const SCEV *Sum = SE.getAddExpr(Good); | |||
465 | if (!Sum->isZero()) | |||
466 | BaseRegs.push_back(Sum); | |||
467 | HasBaseReg = true; | |||
468 | } | |||
469 | if (!Bad.empty()) { | |||
470 | const SCEV *Sum = SE.getAddExpr(Bad); | |||
471 | if (!Sum->isZero()) | |||
472 | BaseRegs.push_back(Sum); | |||
473 | HasBaseReg = true; | |||
474 | } | |||
475 | canonicalize(*L); | |||
476 | } | |||
477 | ||||
478 | /// Check whether or not this formula satisfies the canonical | |||
479 | /// representation. | |||
480 | /// \see Formula::BaseRegs. | |||
481 | bool Formula::isCanonical(const Loop &L) const { | |||
482 | if (!ScaledReg) | |||
483 | return BaseRegs.size() <= 1; | |||
484 | ||||
485 | if (Scale != 1) | |||
486 | return true; | |||
487 | ||||
488 | if (Scale == 1 && BaseRegs.empty()) | |||
489 | return false; | |||
490 | ||||
491 | const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg); | |||
492 | if (SAR && SAR->getLoop() == &L) | |||
493 | return true; | |||
494 | ||||
495 | // If ScaledReg is not a recurrent expr, or it is but its loop is not current | |||
496 | // loop, meanwhile BaseRegs contains a recurrent expr reg related with current | |||
497 | // loop, we want to swap the reg in BaseRegs with ScaledReg. | |||
498 | auto I = find_if(BaseRegs, [&](const SCEV *S) { | |||
499 | return isa<const SCEVAddRecExpr>(S) && | |||
500 | (cast<SCEVAddRecExpr>(S)->getLoop() == &L); | |||
501 | }); | |||
502 | return I == BaseRegs.end(); | |||
503 | } | |||
504 | ||||
505 | /// Helper method to morph a formula into its canonical representation. | |||
506 | /// \see Formula::BaseRegs. | |||
507 | /// Every formula having more than one base register, must use the ScaledReg | |||
508 | /// field. Otherwise, we would have to do special cases everywhere in LSR | |||
509 | /// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ... | |||
510 | /// On the other hand, 1*reg should be canonicalized into reg. | |||
511 | void Formula::canonicalize(const Loop &L) { | |||
512 | if (isCanonical(L)) | |||
513 | return; | |||
514 | ||||
515 | if (BaseRegs.empty()) { | |||
516 | // No base reg? Use scale reg with scale = 1 as such. | |||
517 | assert(ScaledReg && "Expected 1*reg => reg")((void)0); | |||
518 | assert(Scale == 1 && "Expected 1*reg => reg")((void)0); | |||
519 | BaseRegs.push_back(ScaledReg); | |||
520 | Scale = 0; | |||
521 | ScaledReg = nullptr; | |||
522 | return; | |||
523 | } | |||
524 | ||||
525 | // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg. | |||
526 | if (!ScaledReg) { | |||
527 | ScaledReg = BaseRegs.pop_back_val(); | |||
528 | Scale = 1; | |||
529 | } | |||
530 | ||||
531 | // If ScaledReg is an invariant with respect to L, find the reg from | |||
532 | // BaseRegs containing the recurrent expr related with Loop L. Swap the | |||
533 | // reg with ScaledReg. | |||
534 | const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg); | |||
535 | if (!SAR || SAR->getLoop() != &L) { | |||
536 | auto I = find_if(BaseRegs, [&](const SCEV *S) { | |||
537 | return isa<const SCEVAddRecExpr>(S) && | |||
538 | (cast<SCEVAddRecExpr>(S)->getLoop() == &L); | |||
539 | }); | |||
540 | if (I != BaseRegs.end()) | |||
541 | std::swap(ScaledReg, *I); | |||
542 | } | |||
543 | assert(isCanonical(L) && "Failed to canonicalize?")((void)0); | |||
544 | } | |||
545 | ||||
546 | /// Get rid of the scale in the formula. | |||
547 | /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2. | |||
548 | /// \return true if it was possible to get rid of the scale, false otherwise. | |||
549 | /// \note After this operation the formula may not be in the canonical form. | |||
550 | bool Formula::unscale() { | |||
551 | if (Scale != 1) | |||
552 | return false; | |||
553 | Scale = 0; | |||
554 | BaseRegs.push_back(ScaledReg); | |||
555 | ScaledReg = nullptr; | |||
556 | return true; | |||
557 | } | |||
558 | ||||
559 | bool Formula::hasZeroEnd() const { | |||
560 | if (UnfoldedOffset || BaseOffset) | |||
561 | return false; | |||
562 | if (BaseRegs.size() != 1 || ScaledReg) | |||
563 | return false; | |||
564 | return true; | |||
565 | } | |||
566 | ||||
567 | /// Return the total number of register operands used by this formula. This does | |||
568 | /// not include register uses implied by non-constant addrec strides. | |||
569 | size_t Formula::getNumRegs() const { | |||
570 | return !!ScaledReg + BaseRegs.size(); | |||
571 | } | |||
572 | ||||
573 | /// Return the type of this formula, if it has one, or null otherwise. This type | |||
574 | /// is meaningless except for the bit size. | |||
575 | Type *Formula::getType() const { | |||
576 | return !BaseRegs.empty() ? BaseRegs.front()->getType() : | |||
577 | ScaledReg ? ScaledReg->getType() : | |||
578 | BaseGV ? BaseGV->getType() : | |||
579 | nullptr; | |||
580 | } | |||
581 | ||||
582 | /// Delete the given base reg from the BaseRegs list. | |||
583 | void Formula::deleteBaseReg(const SCEV *&S) { | |||
584 | if (&S != &BaseRegs.back()) | |||
585 | std::swap(S, BaseRegs.back()); | |||
586 | BaseRegs.pop_back(); | |||
587 | } | |||
588 | ||||
589 | /// Test if this formula references the given register. | |||
590 | bool Formula::referencesReg(const SCEV *S) const { | |||
591 | return S == ScaledReg || is_contained(BaseRegs, S); | |||
592 | } | |||
593 | ||||
594 | /// Test whether this formula uses registers which are used by uses other than | |||
595 | /// the use with the given index. | |||
596 | bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, | |||
597 | const RegUseTracker &RegUses) const { | |||
598 | if (ScaledReg) | |||
599 | if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) | |||
600 | return true; | |||
601 | for (const SCEV *BaseReg : BaseRegs) | |||
602 | if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx)) | |||
603 | return true; | |||
604 | return false; | |||
605 | } | |||
606 | ||||
607 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
608 | void Formula::print(raw_ostream &OS) const { | |||
609 | bool First = true; | |||
610 | if (BaseGV) { | |||
611 | if (!First) OS << " + "; else First = false; | |||
612 | BaseGV->printAsOperand(OS, /*PrintType=*/false); | |||
613 | } | |||
614 | if (BaseOffset != 0) { | |||
615 | if (!First) OS << " + "; else First = false; | |||
616 | OS << BaseOffset; | |||
617 | } | |||
618 | for (const SCEV *BaseReg : BaseRegs) { | |||
619 | if (!First) OS << " + "; else First = false; | |||
620 | OS << "reg(" << *BaseReg << ')'; | |||
621 | } | |||
622 | if (HasBaseReg && BaseRegs.empty()) { | |||
623 | if (!First) OS << " + "; else First = false; | |||
624 | OS << "**error: HasBaseReg**"; | |||
625 | } else if (!HasBaseReg && !BaseRegs.empty()) { | |||
626 | if (!First) OS << " + "; else First = false; | |||
627 | OS << "**error: !HasBaseReg**"; | |||
628 | } | |||
629 | if (Scale != 0) { | |||
630 | if (!First) OS << " + "; else First = false; | |||
631 | OS << Scale << "*reg("; | |||
632 | if (ScaledReg) | |||
633 | OS << *ScaledReg; | |||
634 | else | |||
635 | OS << "<unknown>"; | |||
636 | OS << ')'; | |||
637 | } | |||
638 | if (UnfoldedOffset != 0) { | |||
639 | if (!First) OS << " + "; | |||
640 | OS << "imm(" << UnfoldedOffset << ')'; | |||
641 | } | |||
642 | } | |||
643 | ||||
644 | LLVM_DUMP_METHOD__attribute__((noinline)) void Formula::dump() const { | |||
645 | print(errs()); errs() << '\n'; | |||
646 | } | |||
647 | #endif | |||
648 | ||||
649 | /// Return true if the given addrec can be sign-extended without changing its | |||
650 | /// value. | |||
651 | static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { | |||
652 | Type *WideTy = | |||
653 | IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); | |||
654 | return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); | |||
655 | } | |||
656 | ||||
657 | /// Return true if the given add can be sign-extended without changing its | |||
658 | /// value. | |||
659 | static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { | |||
660 | Type *WideTy = | |||
661 | IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); | |||
662 | return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); | |||
663 | } | |||
664 | ||||
665 | /// Return true if the given mul can be sign-extended without changing its | |||
666 | /// value. | |||
667 | static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { | |||
668 | Type *WideTy = | |||
669 | IntegerType::get(SE.getContext(), | |||
670 | SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); | |||
671 | return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); | |||
672 | } | |||
673 | ||||
674 | /// Return an expression for LHS /s RHS, if it can be determined and if the | |||
675 | /// remainder is known to be zero, or null otherwise. If IgnoreSignificantBits | |||
676 | /// is true, expressions like (X * Y) /s Y are simplified to X, ignoring that | |||
677 | /// the multiplication may overflow, which is useful when the result will be | |||
678 | /// used in a context where the most significant bits are ignored. | |||
679 | static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, | |||
680 | ScalarEvolution &SE, | |||
681 | bool IgnoreSignificantBits = false) { | |||
682 | // Handle the trivial case, which works for any SCEV type. | |||
683 | if (LHS == RHS) | |||
684 | return SE.getConstant(LHS->getType(), 1); | |||
685 | ||||
686 | // Handle a few RHS special cases. | |||
687 | const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); | |||
688 | if (RC) { | |||
689 | const APInt &RA = RC->getAPInt(); | |||
690 | // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do | |||
691 | // some folding. | |||
692 | if (RA.isAllOnesValue()) { | |||
693 | if (LHS->getType()->isPointerTy()) | |||
694 | return nullptr; | |||
695 | return SE.getMulExpr(LHS, RC); | |||
696 | } | |||
697 | // Handle x /s 1 as x. | |||
698 | if (RA == 1) | |||
699 | return LHS; | |||
700 | } | |||
701 | ||||
702 | // Check for a division of a constant by a constant. | |||
703 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { | |||
704 | if (!RC) | |||
705 | return nullptr; | |||
706 | const APInt &LA = C->getAPInt(); | |||
707 | const APInt &RA = RC->getAPInt(); | |||
708 | if (LA.srem(RA) != 0) | |||
709 | return nullptr; | |||
710 | return SE.getConstant(LA.sdiv(RA)); | |||
711 | } | |||
712 | ||||
713 | // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. | |||
714 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { | |||
715 | if ((IgnoreSignificantBits || isAddRecSExtable(AR, SE)) && AR->isAffine()) { | |||
716 | const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, | |||
717 | IgnoreSignificantBits); | |||
718 | if (!Step) return nullptr; | |||
719 | const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, | |||
720 | IgnoreSignificantBits); | |||
721 | if (!Start) return nullptr; | |||
722 | // FlagNW is independent of the start value, step direction, and is | |||
723 | // preserved with smaller magnitude steps. | |||
724 | // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) | |||
725 | return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); | |||
726 | } | |||
727 | return nullptr; | |||
728 | } | |||
729 | ||||
730 | // Distribute the sdiv over add operands, if the add doesn't overflow. | |||
731 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { | |||
732 | if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { | |||
733 | SmallVector<const SCEV *, 8> Ops; | |||
734 | for (const SCEV *S : Add->operands()) { | |||
735 | const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits); | |||
736 | if (!Op) return nullptr; | |||
737 | Ops.push_back(Op); | |||
738 | } | |||
739 | return SE.getAddExpr(Ops); | |||
740 | } | |||
741 | return nullptr; | |||
742 | } | |||
743 | ||||
744 | // Check for a multiply operand that we can pull RHS out of. | |||
745 | if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { | |||
746 | if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { | |||
747 | // Handle special case C1*X*Y /s C2*X*Y. | |||
748 | if (const SCEVMulExpr *MulRHS = dyn_cast<SCEVMulExpr>(RHS)) { | |||
749 | if (IgnoreSignificantBits || isMulSExtable(MulRHS, SE)) { | |||
750 | const SCEVConstant *LC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); | |||
751 | const SCEVConstant *RC = | |||
752 | dyn_cast<SCEVConstant>(MulRHS->getOperand(0)); | |||
753 | if (LC && RC) { | |||
754 | SmallVector<const SCEV *, 4> LOps(drop_begin(Mul->operands())); | |||
755 | SmallVector<const SCEV *, 4> ROps(drop_begin(MulRHS->operands())); | |||
756 | if (LOps == ROps) | |||
757 | return getExactSDiv(LC, RC, SE, IgnoreSignificantBits); | |||
758 | } | |||
759 | } | |||
760 | } | |||
761 | ||||
762 | SmallVector<const SCEV *, 4> Ops; | |||
763 | bool Found = false; | |||
764 | for (const SCEV *S : Mul->operands()) { | |||
765 | if (!Found) | |||
766 | if (const SCEV *Q = getExactSDiv(S, RHS, SE, | |||
767 | IgnoreSignificantBits)) { | |||
768 | S = Q; | |||
769 | Found = true; | |||
770 | } | |||
771 | Ops.push_back(S); | |||
772 | } | |||
773 | return Found ? SE.getMulExpr(Ops) : nullptr; | |||
774 | } | |||
775 | return nullptr; | |||
776 | } | |||
777 | ||||
778 | // Otherwise we don't know. | |||
779 | return nullptr; | |||
780 | } | |||
781 | ||||
782 | /// If S involves the addition of a constant integer value, return that integer | |||
783 | /// value, and mutate S to point to a new SCEV with that value excluded. | |||
784 | static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { | |||
785 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { | |||
786 | if (C->getAPInt().getMinSignedBits() <= 64) { | |||
787 | S = SE.getConstant(C->getType(), 0); | |||
788 | return C->getValue()->getSExtValue(); | |||
789 | } | |||
790 | } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { | |||
791 | SmallVector<const SCEV *, 8> NewOps(Add->operands()); | |||
792 | int64_t Result = ExtractImmediate(NewOps.front(), SE); | |||
793 | if (Result != 0) | |||
794 | S = SE.getAddExpr(NewOps); | |||
795 | return Result; | |||
796 | } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { | |||
797 | SmallVector<const SCEV *, 8> NewOps(AR->operands()); | |||
798 | int64_t Result = ExtractImmediate(NewOps.front(), SE); | |||
799 | if (Result != 0) | |||
800 | S = SE.getAddRecExpr(NewOps, AR->getLoop(), | |||
801 | // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) | |||
802 | SCEV::FlagAnyWrap); | |||
803 | return Result; | |||
804 | } | |||
805 | return 0; | |||
806 | } | |||
807 | ||||
808 | /// If S involves the addition of a GlobalValue address, return that symbol, and | |||
809 | /// mutate S to point to a new SCEV with that value excluded. | |||
810 | static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { | |||
811 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { | |||
812 | if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { | |||
813 | S = SE.getConstant(GV->getType(), 0); | |||
814 | return GV; | |||
815 | } | |||
816 | } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { | |||
817 | SmallVector<const SCEV *, 8> NewOps(Add->operands()); | |||
818 | GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); | |||
819 | if (Result) | |||
820 | S = SE.getAddExpr(NewOps); | |||
821 | return Result; | |||
822 | } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { | |||
823 | SmallVector<const SCEV *, 8> NewOps(AR->operands()); | |||
824 | GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); | |||
825 | if (Result) | |||
826 | S = SE.getAddRecExpr(NewOps, AR->getLoop(), | |||
827 | // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) | |||
828 | SCEV::FlagAnyWrap); | |||
829 | return Result; | |||
830 | } | |||
831 | return nullptr; | |||
832 | } | |||
833 | ||||
834 | /// Returns true if the specified instruction is using the specified value as an | |||
835 | /// address. | |||
836 | static bool isAddressUse(const TargetTransformInfo &TTI, | |||
837 | Instruction *Inst, Value *OperandVal) { | |||
838 | bool isAddress = isa<LoadInst>(Inst); | |||
839 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { | |||
840 | if (SI->getPointerOperand() == OperandVal) | |||
841 | isAddress = true; | |||
842 | } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { | |||
843 | // Addressing modes can also be folded into prefetches and a variety | |||
844 | // of intrinsics. | |||
845 | switch (II->getIntrinsicID()) { | |||
846 | case Intrinsic::memset: | |||
847 | case Intrinsic::prefetch: | |||
848 | case Intrinsic::masked_load: | |||
849 | if (II->getArgOperand(0) == OperandVal) | |||
850 | isAddress = true; | |||
851 | break; | |||
852 | case Intrinsic::masked_store: | |||
853 | if (II->getArgOperand(1) == OperandVal) | |||
854 | isAddress = true; | |||
855 | break; | |||
856 | case Intrinsic::memmove: | |||
857 | case Intrinsic::memcpy: | |||
858 | if (II->getArgOperand(0) == OperandVal || | |||
859 | II->getArgOperand(1) == OperandVal) | |||
860 | isAddress = true; | |||
861 | break; | |||
862 | default: { | |||
863 | MemIntrinsicInfo IntrInfo; | |||
864 | if (TTI.getTgtMemIntrinsic(II, IntrInfo)) { | |||
865 | if (IntrInfo.PtrVal == OperandVal) | |||
866 | isAddress = true; | |||
867 | } | |||
868 | } | |||
869 | } | |||
870 | } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { | |||
871 | if (RMW->getPointerOperand() == OperandVal) | |||
872 | isAddress = true; | |||
873 | } else if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) { | |||
874 | if (CmpX->getPointerOperand() == OperandVal) | |||
875 | isAddress = true; | |||
876 | } | |||
877 | return isAddress; | |||
878 | } | |||
879 | ||||
880 | /// Return the type of the memory being accessed. | |||
881 | static MemAccessTy getAccessType(const TargetTransformInfo &TTI, | |||
882 | Instruction *Inst, Value *OperandVal) { | |||
883 | MemAccessTy AccessTy(Inst->getType(), MemAccessTy::UnknownAddressSpace); | |||
884 | if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { | |||
885 | AccessTy.MemTy = SI->getOperand(0)->getType(); | |||
886 | AccessTy.AddrSpace = SI->getPointerAddressSpace(); | |||
887 | } else if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { | |||
888 | AccessTy.AddrSpace = LI->getPointerAddressSpace(); | |||
889 | } else if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { | |||
890 | AccessTy.AddrSpace = RMW->getPointerAddressSpace(); | |||
891 | } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) { | |||
892 | AccessTy.AddrSpace = CmpX->getPointerAddressSpace(); | |||
893 | } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { | |||
894 | switch (II->getIntrinsicID()) { | |||
895 | case Intrinsic::prefetch: | |||
896 | case Intrinsic::memset: | |||
897 | AccessTy.AddrSpace = II->getArgOperand(0)->getType()->getPointerAddressSpace(); | |||
898 | AccessTy.MemTy = OperandVal->getType(); | |||
899 | break; | |||
900 | case Intrinsic::memmove: | |||
901 | case Intrinsic::memcpy: | |||
902 | AccessTy.AddrSpace = OperandVal->getType()->getPointerAddressSpace(); | |||
903 | AccessTy.MemTy = OperandVal->getType(); | |||
904 | break; | |||
905 | case Intrinsic::masked_load: | |||
906 | AccessTy.AddrSpace = | |||
907 | II->getArgOperand(0)->getType()->getPointerAddressSpace(); | |||
908 | break; | |||
909 | case Intrinsic::masked_store: | |||
910 | AccessTy.MemTy = II->getOperand(0)->getType(); | |||
911 | AccessTy.AddrSpace = | |||
912 | II->getArgOperand(1)->getType()->getPointerAddressSpace(); | |||
913 | break; | |||
914 | default: { | |||
915 | MemIntrinsicInfo IntrInfo; | |||
916 | if (TTI.getTgtMemIntrinsic(II, IntrInfo) && IntrInfo.PtrVal) { | |||
917 | AccessTy.AddrSpace | |||
918 | = IntrInfo.PtrVal->getType()->getPointerAddressSpace(); | |||
919 | } | |||
920 | ||||
921 | break; | |||
922 | } | |||
923 | } | |||
924 | } | |||
925 | ||||
926 | // All pointers have the same requirements, so canonicalize them to an | |||
927 | // arbitrary pointer type to minimize variation. | |||
928 | if (PointerType *PTy = dyn_cast<PointerType>(AccessTy.MemTy)) | |||
929 | AccessTy.MemTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), | |||
930 | PTy->getAddressSpace()); | |||
931 | ||||
932 | return AccessTy; | |||
933 | } | |||
934 | ||||
935 | /// Return true if this AddRec is already a phi in its loop. | |||
936 | static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { | |||
937 | for (PHINode &PN : AR->getLoop()->getHeader()->phis()) { | |||
938 | if (SE.isSCEVable(PN.getType()) && | |||
939 | (SE.getEffectiveSCEVType(PN.getType()) == | |||
940 | SE.getEffectiveSCEVType(AR->getType())) && | |||
941 | SE.getSCEV(&PN) == AR) | |||
942 | return true; | |||
943 | } | |||
944 | return false; | |||
945 | } | |||
946 | ||||
947 | /// Check if expanding this expression is likely to incur significant cost. This | |||
948 | /// is tricky because SCEV doesn't track which expressions are actually computed | |||
949 | /// by the current IR. | |||
950 | /// | |||
951 | /// We currently allow expansion of IV increments that involve adds, | |||
952 | /// multiplication by constants, and AddRecs from existing phis. | |||
953 | /// | |||
954 | /// TODO: Allow UDivExpr if we can find an existing IV increment that is an | |||
955 | /// obvious multiple of the UDivExpr. | |||
956 | static bool isHighCostExpansion(const SCEV *S, | |||
957 | SmallPtrSetImpl<const SCEV*> &Processed, | |||
958 | ScalarEvolution &SE) { | |||
959 | // Zero/One operand expressions | |||
960 | switch (S->getSCEVType()) { | |||
961 | case scUnknown: | |||
962 | case scConstant: | |||
963 | return false; | |||
964 | case scTruncate: | |||
965 | return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(), | |||
966 | Processed, SE); | |||
967 | case scZeroExtend: | |||
968 | return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(), | |||
969 | Processed, SE); | |||
970 | case scSignExtend: | |||
971 | return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(), | |||
972 | Processed, SE); | |||
973 | default: | |||
974 | break; | |||
975 | } | |||
976 | ||||
977 | if (!Processed.insert(S).second) | |||
978 | return false; | |||
979 | ||||
980 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { | |||
981 | for (const SCEV *S : Add->operands()) { | |||
982 | if (isHighCostExpansion(S, Processed, SE)) | |||
983 | return true; | |||
984 | } | |||
985 | return false; | |||
986 | } | |||
987 | ||||
988 | if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { | |||
989 | if (Mul->getNumOperands() == 2) { | |||
990 | // Multiplication by a constant is ok | |||
991 | if (isa<SCEVConstant>(Mul->getOperand(0))) | |||
992 | return isHighCostExpansion(Mul->getOperand(1), Processed, SE); | |||
993 | ||||
994 | // If we have the value of one operand, check if an existing | |||
995 | // multiplication already generates this expression. | |||
996 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) { | |||
997 | Value *UVal = U->getValue(); | |||
998 | for (User *UR : UVal->users()) { | |||
999 | // If U is a constant, it may be used by a ConstantExpr. | |||
1000 | Instruction *UI = dyn_cast<Instruction>(UR); | |||
1001 | if (UI && UI->getOpcode() == Instruction::Mul && | |||
1002 | SE.isSCEVable(UI->getType())) { | |||
1003 | return SE.getSCEV(UI) == Mul; | |||
1004 | } | |||
1005 | } | |||
1006 | } | |||
1007 | } | |||
1008 | } | |||
1009 | ||||
1010 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { | |||
1011 | if (isExistingPhi(AR, SE)) | |||
1012 | return false; | |||
1013 | } | |||
1014 | ||||
1015 | // Fow now, consider any other type of expression (div/mul/min/max) high cost. | |||
1016 | return true; | |||
1017 | } | |||
1018 | ||||
1019 | namespace { | |||
1020 | ||||
1021 | class LSRUse; | |||
1022 | ||||
1023 | } // end anonymous namespace | |||
1024 | ||||
1025 | /// Check if the addressing mode defined by \p F is completely | |||
1026 | /// folded in \p LU at isel time. | |||
1027 | /// This includes address-mode folding and special icmp tricks. | |||
1028 | /// This function returns true if \p LU can accommodate what \p F | |||
1029 | /// defines and up to 1 base + 1 scaled + offset. | |||
1030 | /// In other words, if \p F has several base registers, this function may | |||
1031 | /// still return true. Therefore, users still need to account for | |||
1032 | /// additional base registers and/or unfolded offsets to derive an | |||
1033 | /// accurate cost model. | |||
1034 | static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, | |||
1035 | const LSRUse &LU, const Formula &F); | |||
1036 | ||||
1037 | // Get the cost of the scaling factor used in F for LU. | |||
1038 | static InstructionCost getScalingFactorCost(const TargetTransformInfo &TTI, | |||
1039 | const LSRUse &LU, const Formula &F, | |||
1040 | const Loop &L); | |||
1041 | ||||
1042 | namespace { | |||
1043 | ||||
1044 | /// This class is used to measure and compare candidate formulae. | |||
1045 | class Cost { | |||
1046 | const Loop *L = nullptr; | |||
1047 | ScalarEvolution *SE = nullptr; | |||
1048 | const TargetTransformInfo *TTI = nullptr; | |||
1049 | TargetTransformInfo::LSRCost C; | |||
1050 | TTI::AddressingModeKind AMK = TTI::AMK_None; | |||
1051 | ||||
1052 | public: | |||
1053 | Cost() = delete; | |||
1054 | Cost(const Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI, | |||
1055 | TTI::AddressingModeKind AMK) : | |||
1056 | L(L), SE(&SE), TTI(&TTI), AMK(AMK) { | |||
1057 | C.Insns = 0; | |||
1058 | C.NumRegs = 0; | |||
1059 | C.AddRecCost = 0; | |||
1060 | C.NumIVMuls = 0; | |||
1061 | C.NumBaseAdds = 0; | |||
1062 | C.ImmCost = 0; | |||
1063 | C.SetupCost = 0; | |||
1064 | C.ScaleCost = 0; | |||
1065 | } | |||
1066 | ||||
1067 | bool isLess(Cost &Other); | |||
1068 | ||||
1069 | void Lose(); | |||
1070 | ||||
1071 | #ifndef NDEBUG1 | |||
1072 | // Once any of the metrics loses, they must all remain losers. | |||
1073 | bool isValid() { | |||
1074 | return ((C.Insns | C.NumRegs | C.AddRecCost | C.NumIVMuls | C.NumBaseAdds | |||
1075 | | C.ImmCost | C.SetupCost | C.ScaleCost) != ~0u) | |||
1076 | || ((C.Insns & C.NumRegs & C.AddRecCost & C.NumIVMuls & C.NumBaseAdds | |||
1077 | & C.ImmCost & C.SetupCost & C.ScaleCost) == ~0u); | |||
1078 | } | |||
1079 | #endif | |||
1080 | ||||
1081 | bool isLoser() { | |||
1082 | assert(isValid() && "invalid cost")((void)0); | |||
1083 | return C.NumRegs == ~0u; | |||
1084 | } | |||
1085 | ||||
1086 | void RateFormula(const Formula &F, | |||
1087 | SmallPtrSetImpl<const SCEV *> &Regs, | |||
1088 | const DenseSet<const SCEV *> &VisitedRegs, | |||
1089 | const LSRUse &LU, | |||
1090 | SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr); | |||
1091 | ||||
1092 | void print(raw_ostream &OS) const; | |||
1093 | void dump() const; | |||
1094 | ||||
1095 | private: | |||
1096 | void RateRegister(const Formula &F, const SCEV *Reg, | |||
1097 | SmallPtrSetImpl<const SCEV *> &Regs); | |||
1098 | void RatePrimaryRegister(const Formula &F, const SCEV *Reg, | |||
1099 | SmallPtrSetImpl<const SCEV *> &Regs, | |||
1100 | SmallPtrSetImpl<const SCEV *> *LoserRegs); | |||
1101 | }; | |||
1102 | ||||
1103 | /// An operand value in an instruction which is to be replaced with some | |||
1104 | /// equivalent, possibly strength-reduced, replacement. | |||
1105 | struct LSRFixup { | |||
1106 | /// The instruction which will be updated. | |||
1107 | Instruction *UserInst = nullptr; | |||
1108 | ||||
1109 | /// The operand of the instruction which will be replaced. The operand may be | |||
1110 | /// used more than once; every instance will be replaced. | |||
1111 | Value *OperandValToReplace = nullptr; | |||
1112 | ||||
1113 | /// If this user is to use the post-incremented value of an induction | |||
1114 | /// variable, this set is non-empty and holds the loops associated with the | |||
1115 | /// induction variable. | |||
1116 | PostIncLoopSet PostIncLoops; | |||
1117 | ||||
1118 | /// A constant offset to be added to the LSRUse expression. This allows | |||
1119 | /// multiple fixups to share the same LSRUse with different offsets, for | |||
1120 | /// example in an unrolled loop. | |||
1121 | int64_t Offset = 0; | |||
1122 | ||||
1123 | LSRFixup() = default; | |||
1124 | ||||
1125 | bool isUseFullyOutsideLoop(const Loop *L) const; | |||
1126 | ||||
1127 | void print(raw_ostream &OS) const; | |||
1128 | void dump() const; | |||
1129 | }; | |||
1130 | ||||
1131 | /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of sorted | |||
1132 | /// SmallVectors of const SCEV*. | |||
1133 | struct UniquifierDenseMapInfo { | |||
1134 | static SmallVector<const SCEV *, 4> getEmptyKey() { | |||
1135 | SmallVector<const SCEV *, 4> V; | |||
1136 | V.push_back(reinterpret_cast<const SCEV *>(-1)); | |||
1137 | return V; | |||
1138 | } | |||
1139 | ||||
1140 | static SmallVector<const SCEV *, 4> getTombstoneKey() { | |||
1141 | SmallVector<const SCEV *, 4> V; | |||
1142 | V.push_back(reinterpret_cast<const SCEV *>(-2)); | |||
1143 | return V; | |||
1144 | } | |||
1145 | ||||
1146 | static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) { | |||
1147 | return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); | |||
1148 | } | |||
1149 | ||||
1150 | static bool isEqual(const SmallVector<const SCEV *, 4> &LHS, | |||
1151 | const SmallVector<const SCEV *, 4> &RHS) { | |||
1152 | return LHS == RHS; | |||
1153 | } | |||
1154 | }; | |||
1155 | ||||
1156 | /// This class holds the state that LSR keeps for each use in IVUsers, as well | |||
1157 | /// as uses invented by LSR itself. It includes information about what kinds of | |||
1158 | /// things can be folded into the user, information about the user itself, and | |||
1159 | /// information about how the use may be satisfied. TODO: Represent multiple | |||
1160 | /// users of the same expression in common? | |||
1161 | class LSRUse { | |||
1162 | DenseSet<SmallVector<const SCEV *, 4>, UniquifierDenseMapInfo> Uniquifier; | |||
1163 | ||||
1164 | public: | |||
1165 | /// An enum for a kind of use, indicating what types of scaled and immediate | |||
1166 | /// operands it might support. | |||
1167 | enum KindType { | |||
1168 | Basic, ///< A normal use, with no folding. | |||
1169 | Special, ///< A special case of basic, allowing -1 scales. | |||
1170 | Address, ///< An address use; folding according to TargetLowering | |||
1171 | ICmpZero ///< An equality icmp with both operands folded into one. | |||
1172 | // TODO: Add a generic icmp too? | |||
1173 | }; | |||
1174 | ||||
1175 | using SCEVUseKindPair = PointerIntPair<const SCEV *, 2, KindType>; | |||
1176 | ||||
1177 | KindType Kind; | |||
1178 | MemAccessTy AccessTy; | |||
1179 | ||||
1180 | /// The list of operands which are to be replaced. | |||
1181 | SmallVector<LSRFixup, 8> Fixups; | |||
1182 | ||||
1183 | /// Keep track of the min and max offsets of the fixups. | |||
1184 | int64_t MinOffset = std::numeric_limits<int64_t>::max(); | |||
1185 | int64_t MaxOffset = std::numeric_limits<int64_t>::min(); | |||
1186 | ||||
1187 | /// This records whether all of the fixups using this LSRUse are outside of | |||
1188 | /// the loop, in which case some special-case heuristics may be used. | |||
1189 | bool AllFixupsOutsideLoop = true; | |||
1190 | ||||
1191 | /// RigidFormula is set to true to guarantee that this use will be associated | |||
1192 | /// with a single formula--the one that initially matched. Some SCEV | |||
1193 | /// expressions cannot be expanded. This allows LSR to consider the registers | |||
1194 | /// used by those expressions without the need to expand them later after | |||
1195 | /// changing the formula. | |||
1196 | bool RigidFormula = false; | |||
1197 | ||||
1198 | /// This records the widest use type for any fixup using this | |||
1199 | /// LSRUse. FindUseWithSimilarFormula can't consider uses with different max | |||
1200 | /// fixup widths to be equivalent, because the narrower one may be relying on | |||
1201 | /// the implicit truncation to truncate away bogus bits. | |||
1202 | Type *WidestFixupType = nullptr; | |||
1203 | ||||
1204 | /// A list of ways to build a value that can satisfy this user. After the | |||
1205 | /// list is populated, one of these is selected heuristically and used to | |||
1206 | /// formulate a replacement for OperandValToReplace in UserInst. | |||
1207 | SmallVector<Formula, 12> Formulae; | |||
1208 | ||||
1209 | /// The set of register candidates used by all formulae in this LSRUse. | |||
1210 | SmallPtrSet<const SCEV *, 4> Regs; | |||
1211 | ||||
1212 | LSRUse(KindType K, MemAccessTy AT) : Kind(K), AccessTy(AT) {} | |||
1213 | ||||
1214 | LSRFixup &getNewFixup() { | |||
1215 | Fixups.push_back(LSRFixup()); | |||
1216 | return Fixups.back(); | |||
1217 | } | |||
1218 | ||||
1219 | void pushFixup(LSRFixup &f) { | |||
1220 | Fixups.push_back(f); | |||
1221 | if (f.Offset > MaxOffset) | |||
1222 | MaxOffset = f.Offset; | |||
1223 | if (f.Offset < MinOffset) | |||
1224 | MinOffset = f.Offset; | |||
1225 | } | |||
1226 | ||||
1227 | bool HasFormulaWithSameRegs(const Formula &F) const; | |||
1228 | float getNotSelectedProbability(const SCEV *Reg) const; | |||
1229 | bool InsertFormula(const Formula &F, const Loop &L); | |||
1230 | void DeleteFormula(Formula &F); | |||
1231 | void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); | |||
1232 | ||||
1233 | void print(raw_ostream &OS) const; | |||
1234 | void dump() const; | |||
1235 | }; | |||
1236 | ||||
1237 | } // end anonymous namespace | |||
1238 | ||||
1239 | static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, | |||
1240 | LSRUse::KindType Kind, MemAccessTy AccessTy, | |||
1241 | GlobalValue *BaseGV, int64_t BaseOffset, | |||
1242 | bool HasBaseReg, int64_t Scale, | |||
1243 | Instruction *Fixup = nullptr); | |||
1244 | ||||
1245 | static unsigned getSetupCost(const SCEV *Reg, unsigned Depth) { | |||
1246 | if (isa<SCEVUnknown>(Reg) || isa<SCEVConstant>(Reg)) | |||
1247 | return 1; | |||
1248 | if (Depth == 0) | |||
1249 | return 0; | |||
1250 | if (const auto *S = dyn_cast<SCEVAddRecExpr>(Reg)) | |||
1251 | return getSetupCost(S->getStart(), Depth - 1); | |||
1252 | if (auto S = dyn_cast<SCEVIntegralCastExpr>(Reg)) | |||
1253 | return getSetupCost(S->getOperand(), Depth - 1); | |||
1254 | if (auto S = dyn_cast<SCEVNAryExpr>(Reg)) | |||
1255 | return std::accumulate(S->op_begin(), S->op_end(), 0, | |||
1256 | [&](unsigned i, const SCEV *Reg) { | |||
1257 | return i + getSetupCost(Reg, Depth - 1); | |||
1258 | }); | |||
1259 | if (auto S = dyn_cast<SCEVUDivExpr>(Reg)) | |||
1260 | return getSetupCost(S->getLHS(), Depth - 1) + | |||
1261 | getSetupCost(S->getRHS(), Depth - 1); | |||
1262 | return 0; | |||
1263 | } | |||
1264 | ||||
1265 | /// Tally up interesting quantities from the given register. | |||
1266 | void Cost::RateRegister(const Formula &F, const SCEV *Reg, | |||
1267 | SmallPtrSetImpl<const SCEV *> &Regs) { | |||
1268 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { | |||
1269 | // If this is an addrec for another loop, it should be an invariant | |||
1270 | // with respect to L since L is the innermost loop (at least | |||
1271 | // for now LSR only handles innermost loops). | |||
1272 | if (AR->getLoop() != L) { | |||
1273 | // If the AddRec exists, consider it's register free and leave it alone. | |||
1274 | if (isExistingPhi(AR, *SE) && AMK != TTI::AMK_PostIndexed) | |||
1275 | return; | |||
1276 | ||||
1277 | // It is bad to allow LSR for current loop to add induction variables | |||
1278 | // for its sibling loops. | |||
1279 | if (!AR->getLoop()->contains(L)) { | |||
1280 | Lose(); | |||
1281 | return; | |||
1282 | } | |||
1283 | ||||
1284 | // Otherwise, it will be an invariant with respect to Loop L. | |||
1285 | ++C.NumRegs; | |||
1286 | return; | |||
1287 | } | |||
1288 | ||||
1289 | unsigned LoopCost = 1; | |||
1290 | if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) || | |||
1291 | TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) { | |||
1292 | ||||
1293 | // If the step size matches the base offset, we could use pre-indexed | |||
1294 | // addressing. | |||
1295 | if (AMK == TTI::AMK_PreIndexed) { | |||
1296 | if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE))) | |||
1297 | if (Step->getAPInt() == F.BaseOffset) | |||
1298 | LoopCost = 0; | |||
1299 | } else if (AMK == TTI::AMK_PostIndexed) { | |||
1300 | const SCEV *LoopStep = AR->getStepRecurrence(*SE); | |||
1301 | if (isa<SCEVConstant>(LoopStep)) { | |||
1302 | const SCEV *LoopStart = AR->getStart(); | |||
1303 | if (!isa<SCEVConstant>(LoopStart) && | |||
1304 | SE->isLoopInvariant(LoopStart, L)) | |||
1305 | LoopCost = 0; | |||
1306 | } | |||
1307 | } | |||
1308 | } | |||
1309 | C.AddRecCost += LoopCost; | |||
1310 | ||||
1311 | // Add the step value register, if it needs one. | |||
1312 | // TODO: The non-affine case isn't precisely modeled here. | |||
1313 | if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) { | |||
1314 | if (!Regs.count(AR->getOperand(1))) { | |||
1315 | RateRegister(F, AR->getOperand(1), Regs); | |||
1316 | if (isLoser()) | |||
1317 | return; | |||
1318 | } | |||
1319 | } | |||
1320 | } | |||
1321 | ++C.NumRegs; | |||
1322 | ||||
1323 | // Rough heuristic; favor registers which don't require extra setup | |||
1324 | // instructions in the preheader. | |||
1325 | C.SetupCost += getSetupCost(Reg, SetupCostDepthLimit); | |||
1326 | // Ensure we don't, even with the recusion limit, produce invalid costs. | |||
1327 | C.SetupCost = std::min<unsigned>(C.SetupCost, 1 << 16); | |||
1328 | ||||
1329 | C.NumIVMuls += isa<SCEVMulExpr>(Reg) && | |||
1330 | SE->hasComputableLoopEvolution(Reg, L); | |||
1331 | } | |||
1332 | ||||
1333 | /// Record this register in the set. If we haven't seen it before, rate | |||
1334 | /// it. Optional LoserRegs provides a way to declare any formula that refers to | |||
1335 | /// one of those regs an instant loser. | |||
1336 | void Cost::RatePrimaryRegister(const Formula &F, const SCEV *Reg, | |||
1337 | SmallPtrSetImpl<const SCEV *> &Regs, | |||
1338 | SmallPtrSetImpl<const SCEV *> *LoserRegs) { | |||
1339 | if (LoserRegs && LoserRegs->count(Reg)) { | |||
1340 | Lose(); | |||
1341 | return; | |||
1342 | } | |||
1343 | if (Regs.insert(Reg).second) { | |||
1344 | RateRegister(F, Reg, Regs); | |||
1345 | if (LoserRegs && isLoser()) | |||
1346 | LoserRegs->insert(Reg); | |||
1347 | } | |||
1348 | } | |||
1349 | ||||
1350 | void Cost::RateFormula(const Formula &F, | |||
1351 | SmallPtrSetImpl<const SCEV *> &Regs, | |||
1352 | const DenseSet<const SCEV *> &VisitedRegs, | |||
1353 | const LSRUse &LU, | |||
1354 | SmallPtrSetImpl<const SCEV *> *LoserRegs) { | |||
1355 | assert(F.isCanonical(*L) && "Cost is accurate only for canonical formula")((void)0); | |||
1356 | // Tally up the registers. | |||
1357 | unsigned PrevAddRecCost = C.AddRecCost; | |||
1358 | unsigned PrevNumRegs = C.NumRegs; | |||
1359 | unsigned PrevNumBaseAdds = C.NumBaseAdds; | |||
1360 | if (const SCEV *ScaledReg = F.ScaledReg) { | |||
1361 | if (VisitedRegs.count(ScaledReg)) { | |||
1362 | Lose(); | |||
1363 | return; | |||
1364 | } | |||
1365 | RatePrimaryRegister(F, ScaledReg, Regs, LoserRegs); | |||
1366 | if (isLoser()) | |||
1367 | return; | |||
1368 | } | |||
1369 | for (const SCEV *BaseReg : F.BaseRegs) { | |||
1370 | if (VisitedRegs.count(BaseReg)) { | |||
1371 | Lose(); | |||
1372 | return; | |||
1373 | } | |||
1374 | RatePrimaryRegister(F, BaseReg, Regs, LoserRegs); | |||
1375 | if (isLoser()) | |||
1376 | return; | |||
1377 | } | |||
1378 | ||||
1379 | // Determine how many (unfolded) adds we'll need inside the loop. | |||
1380 | size_t NumBaseParts = F.getNumRegs(); | |||
1381 | if (NumBaseParts > 1) | |||
1382 | // Do not count the base and a possible second register if the target | |||
1383 | // allows to fold 2 registers. | |||
1384 | C.NumBaseAdds += | |||
1385 | NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(*TTI, LU, F))); | |||
1386 | C.NumBaseAdds += (F.UnfoldedOffset != 0); | |||
1387 | ||||
1388 | // Accumulate non-free scaling amounts. | |||
1389 | C.ScaleCost += *getScalingFactorCost(*TTI, LU, F, *L).getValue(); | |||
1390 | ||||
1391 | // Tally up the non-zero immediates. | |||
1392 | for (const LSRFixup &Fixup : LU.Fixups) { | |||
1393 | int64_t O = Fixup.Offset; | |||
1394 | int64_t Offset = (uint64_t)O + F.BaseOffset; | |||
1395 | if (F.BaseGV) | |||
1396 | C.ImmCost += 64; // Handle symbolic values conservatively. | |||
1397 | // TODO: This should probably be the pointer size. | |||
1398 | else if (Offset != 0) | |||
1399 | C.ImmCost += APInt(64, Offset, true).getMinSignedBits(); | |||
1400 | ||||
1401 | // Check with target if this offset with this instruction is | |||
1402 | // specifically not supported. | |||
1403 | if (LU.Kind == LSRUse::Address && Offset != 0 && | |||
1404 | !isAMCompletelyFolded(*TTI, LSRUse::Address, LU.AccessTy, F.BaseGV, | |||
1405 | Offset, F.HasBaseReg, F.Scale, Fixup.UserInst)) | |||
1406 | C.NumBaseAdds++; | |||
1407 | } | |||
1408 | ||||
1409 | // If we don't count instruction cost exit here. | |||
1410 | if (!InsnsCost) { | |||
1411 | assert(isValid() && "invalid cost")((void)0); | |||
1412 | return; | |||
1413 | } | |||
1414 | ||||
1415 | // Treat every new register that exceeds TTI.getNumberOfRegisters() - 1 as | |||
1416 | // additional instruction (at least fill). | |||
1417 | // TODO: Need distinguish register class? | |||
1418 | unsigned TTIRegNum = TTI->getNumberOfRegisters( | |||
1419 | TTI->getRegisterClassForType(false, F.getType())) - 1; | |||
1420 | if (C.NumRegs > TTIRegNum) { | |||
1421 | // Cost already exceeded TTIRegNum, then only newly added register can add | |||
1422 | // new instructions. | |||
1423 | if (PrevNumRegs > TTIRegNum) | |||
1424 | C.Insns += (C.NumRegs - PrevNumRegs); | |||
1425 | else | |||
1426 | C.Insns += (C.NumRegs - TTIRegNum); | |||
1427 | } | |||
1428 | ||||
1429 | // If ICmpZero formula ends with not 0, it could not be replaced by | |||
1430 | // just add or sub. We'll need to compare final result of AddRec. | |||
1431 | // That means we'll need an additional instruction. But if the target can | |||
1432 | // macro-fuse a compare with a branch, don't count this extra instruction. | |||
1433 | // For -10 + {0, +, 1}: | |||
1434 | // i = i + 1; | |||
1435 | // cmp i, 10 | |||
1436 | // | |||
1437 | // For {-10, +, 1}: | |||
1438 | // i = i + 1; | |||
1439 | if (LU.Kind == LSRUse::ICmpZero && !F.hasZeroEnd() && | |||
1440 | !TTI->canMacroFuseCmp()) | |||
1441 | C.Insns++; | |||
1442 | // Each new AddRec adds 1 instruction to calculation. | |||
1443 | C.Insns += (C.AddRecCost - PrevAddRecCost); | |||
1444 | ||||
1445 | // BaseAdds adds instructions for unfolded registers. | |||
1446 | if (LU.Kind != LSRUse::ICmpZero) | |||
1447 | C.Insns += C.NumBaseAdds - PrevNumBaseAdds; | |||
1448 | assert(isValid() && "invalid cost")((void)0); | |||
1449 | } | |||
1450 | ||||
1451 | /// Set this cost to a losing value. | |||
1452 | void Cost::Lose() { | |||
1453 | C.Insns = std::numeric_limits<unsigned>::max(); | |||
1454 | C.NumRegs = std::numeric_limits<unsigned>::max(); | |||
1455 | C.AddRecCost = std::numeric_limits<unsigned>::max(); | |||
1456 | C.NumIVMuls = std::numeric_limits<unsigned>::max(); | |||
1457 | C.NumBaseAdds = std::numeric_limits<unsigned>::max(); | |||
1458 | C.ImmCost = std::numeric_limits<unsigned>::max(); | |||
1459 | C.SetupCost = std::numeric_limits<unsigned>::max(); | |||
1460 | C.ScaleCost = std::numeric_limits<unsigned>::max(); | |||
1461 | } | |||
1462 | ||||
1463 | /// Choose the lower cost. | |||
1464 | bool Cost::isLess(Cost &Other) { | |||
1465 | if (InsnsCost.getNumOccurrences() > 0 && InsnsCost && | |||
1466 | C.Insns != Other.C.Insns) | |||
1467 | return C.Insns < Other.C.Insns; | |||
1468 | return TTI->isLSRCostLess(C, Other.C); | |||
1469 | } | |||
1470 | ||||
1471 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
1472 | void Cost::print(raw_ostream &OS) const { | |||
1473 | if (InsnsCost) | |||
1474 | OS << C.Insns << " instruction" << (C.Insns == 1 ? " " : "s "); | |||
1475 | OS << C.NumRegs << " reg" << (C.NumRegs == 1 ? "" : "s"); | |||
1476 | if (C.AddRecCost != 0) | |||
1477 | OS << ", with addrec cost " << C.AddRecCost; | |||
1478 | if (C.NumIVMuls != 0) | |||
1479 | OS << ", plus " << C.NumIVMuls << " IV mul" | |||
1480 | << (C.NumIVMuls == 1 ? "" : "s"); | |||
1481 | if (C.NumBaseAdds != 0) | |||
1482 | OS << ", plus " << C.NumBaseAdds << " base add" | |||
1483 | << (C.NumBaseAdds == 1 ? "" : "s"); | |||
1484 | if (C.ScaleCost != 0) | |||
1485 | OS << ", plus " << C.ScaleCost << " scale cost"; | |||
1486 | if (C.ImmCost != 0) | |||
1487 | OS << ", plus " << C.ImmCost << " imm cost"; | |||
1488 | if (C.SetupCost != 0) | |||
1489 | OS << ", plus " << C.SetupCost << " setup cost"; | |||
1490 | } | |||
1491 | ||||
1492 | LLVM_DUMP_METHOD__attribute__((noinline)) void Cost::dump() const { | |||
1493 | print(errs()); errs() << '\n'; | |||
1494 | } | |||
1495 | #endif | |||
1496 | ||||
1497 | /// Test whether this fixup always uses its value outside of the given loop. | |||
1498 | bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { | |||
1499 | // PHI nodes use their value in their incoming blocks. | |||
1500 | if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { | |||
1501 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) | |||
1502 | if (PN->getIncomingValue(i) == OperandValToReplace && | |||
1503 | L->contains(PN->getIncomingBlock(i))) | |||
1504 | return false; | |||
1505 | return true; | |||
1506 | } | |||
1507 | ||||
1508 | return !L->contains(UserInst); | |||
1509 | } | |||
1510 | ||||
1511 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
1512 | void LSRFixup::print(raw_ostream &OS) const { | |||
1513 | OS << "UserInst="; | |||
1514 | // Store is common and interesting enough to be worth special-casing. | |||
1515 | if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { | |||
1516 | OS << "store "; | |||
1517 | Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false); | |||
1518 | } else if (UserInst->getType()->isVoidTy()) | |||
1519 | OS << UserInst->getOpcodeName(); | |||
1520 | else | |||
1521 | UserInst->printAsOperand(OS, /*PrintType=*/false); | |||
1522 | ||||
1523 | OS << ", OperandValToReplace="; | |||
1524 | OperandValToReplace->printAsOperand(OS, /*PrintType=*/false); | |||
1525 | ||||
1526 | for (const Loop *PIL : PostIncLoops) { | |||
1527 | OS << ", PostIncLoop="; | |||
1528 | PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false); | |||
1529 | } | |||
1530 | ||||
1531 | if (Offset != 0) | |||
1532 | OS << ", Offset=" << Offset; | |||
1533 | } | |||
1534 | ||||
1535 | LLVM_DUMP_METHOD__attribute__((noinline)) void LSRFixup::dump() const { | |||
1536 | print(errs()); errs() << '\n'; | |||
1537 | } | |||
1538 | #endif | |||
1539 | ||||
1540 | /// Test whether this use as a formula which has the same registers as the given | |||
1541 | /// formula. | |||
1542 | bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { | |||
1543 | SmallVector<const SCEV *, 4> Key = F.BaseRegs; | |||
1544 | if (F.ScaledReg) Key.push_back(F.ScaledReg); | |||
1545 | // Unstable sort by host order ok, because this is only used for uniquifying. | |||
1546 | llvm::sort(Key); | |||
1547 | return Uniquifier.count(Key); | |||
1548 | } | |||
1549 | ||||
1550 | /// The function returns a probability of selecting formula without Reg. | |||
1551 | float LSRUse::getNotSelectedProbability(const SCEV *Reg) const { | |||
1552 | unsigned FNum = 0; | |||
1553 | for (const Formula &F : Formulae) | |||
1554 | if (F.referencesReg(Reg)) | |||
1555 | FNum++; | |||
1556 | return ((float)(Formulae.size() - FNum)) / Formulae.size(); | |||
1557 | } | |||
1558 | ||||
1559 | /// If the given formula has not yet been inserted, add it to the list, and | |||
1560 | /// return true. Return false otherwise. The formula must be in canonical form. | |||
1561 | bool LSRUse::InsertFormula(const Formula &F, const Loop &L) { | |||
1562 | assert(F.isCanonical(L) && "Invalid canonical representation")((void)0); | |||
1563 | ||||
1564 | if (!Formulae.empty() && RigidFormula) | |||
1565 | return false; | |||
1566 | ||||
1567 | SmallVector<const SCEV *, 4> Key = F.BaseRegs; | |||
1568 | if (F.ScaledReg) Key.push_back(F.ScaledReg); | |||
1569 | // Unstable sort by host order ok, because this is only used for uniquifying. | |||
1570 | llvm::sort(Key); | |||
1571 | ||||
1572 | if (!Uniquifier.insert(Key).second) | |||
1573 | return false; | |||
1574 | ||||
1575 | // Using a register to hold the value of 0 is not profitable. | |||
1576 | assert((!F.ScaledReg || !F.ScaledReg->isZero()) &&((void)0) | |||
1577 | "Zero allocated in a scaled register!")((void)0); | |||
1578 | #ifndef NDEBUG1 | |||
1579 | for (const SCEV *BaseReg : F.BaseRegs) | |||
1580 | assert(!BaseReg->isZero() && "Zero allocated in a base register!")((void)0); | |||
1581 | #endif | |||
1582 | ||||
1583 | // Add the formula to the list. | |||
1584 | Formulae.push_back(F); | |||
1585 | ||||
1586 | // Record registers now being used by this use. | |||
1587 | Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); | |||
1588 | if (F.ScaledReg) | |||
1589 | Regs.insert(F.ScaledReg); | |||
1590 | ||||
1591 | return true; | |||
1592 | } | |||
1593 | ||||
1594 | /// Remove the given formula from this use's list. | |||
1595 | void LSRUse::DeleteFormula(Formula &F) { | |||
1596 | if (&F != &Formulae.back()) | |||
1597 | std::swap(F, Formulae.back()); | |||
1598 | Formulae.pop_back(); | |||
1599 | } | |||
1600 | ||||
1601 | /// Recompute the Regs field, and update RegUses. | |||
1602 | void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { | |||
1603 | // Now that we've filtered out some formulae, recompute the Regs set. | |||
1604 | SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs); | |||
1605 | Regs.clear(); | |||
1606 | for (const Formula &F : Formulae) { | |||
1607 | if (F.ScaledReg) Regs.insert(F.ScaledReg); | |||
1608 | Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); | |||
1609 | } | |||
1610 | ||||
1611 | // Update the RegTracker. | |||
1612 | for (const SCEV *S : OldRegs) | |||
1613 | if (!Regs.count(S)) | |||
1614 | RegUses.dropRegister(S, LUIdx); | |||
1615 | } | |||
1616 | ||||
1617 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
1618 | void LSRUse::print(raw_ostream &OS) const { | |||
1619 | OS << "LSR Use: Kind="; | |||
1620 | switch (Kind) { | |||
1621 | case Basic: OS << "Basic"; break; | |||
1622 | case Special: OS << "Special"; break; | |||
1623 | case ICmpZero: OS << "ICmpZero"; break; | |||
1624 | case Address: | |||
1625 | OS << "Address of "; | |||
1626 | if (AccessTy.MemTy->isPointerTy()) | |||
1627 | OS << "pointer"; // the full pointer type could be really verbose | |||
1628 | else { | |||
1629 | OS << *AccessTy.MemTy; | |||
1630 | } | |||
1631 | ||||
1632 | OS << " in addrspace(" << AccessTy.AddrSpace << ')'; | |||
1633 | } | |||
1634 | ||||
1635 | OS << ", Offsets={"; | |||
1636 | bool NeedComma = false; | |||
1637 | for (const LSRFixup &Fixup : Fixups) { | |||
1638 | if (NeedComma) OS << ','; | |||
1639 | OS << Fixup.Offset; | |||
1640 | NeedComma = true; | |||
1641 | } | |||
1642 | OS << '}'; | |||
1643 | ||||
1644 | if (AllFixupsOutsideLoop) | |||
1645 | OS << ", all-fixups-outside-loop"; | |||
1646 | ||||
1647 | if (WidestFixupType) | |||
1648 | OS << ", widest fixup type: " << *WidestFixupType; | |||
1649 | } | |||
1650 | ||||
1651 | LLVM_DUMP_METHOD__attribute__((noinline)) void LSRUse::dump() const { | |||
1652 | print(errs()); errs() << '\n'; | |||
1653 | } | |||
1654 | #endif | |||
1655 | ||||
1656 | static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, | |||
1657 | LSRUse::KindType Kind, MemAccessTy AccessTy, | |||
1658 | GlobalValue *BaseGV, int64_t BaseOffset, | |||
1659 | bool HasBaseReg, int64_t Scale, | |||
1660 | Instruction *Fixup/*= nullptr*/) { | |||
1661 | switch (Kind) { | |||
1662 | case LSRUse::Address: | |||
1663 | return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, BaseOffset, | |||
1664 | HasBaseReg, Scale, AccessTy.AddrSpace, Fixup); | |||
1665 | ||||
1666 | case LSRUse::ICmpZero: | |||
1667 | // There's not even a target hook for querying whether it would be legal to | |||
1668 | // fold a GV into an ICmp. | |||
1669 | if (BaseGV) | |||
1670 | return false; | |||
1671 | ||||
1672 | // ICmp only has two operands; don't allow more than two non-trivial parts. | |||
1673 | if (Scale != 0 && HasBaseReg && BaseOffset != 0) | |||
1674 | return false; | |||
1675 | ||||
1676 | // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by | |||
1677 | // putting the scaled register in the other operand of the icmp. | |||
1678 | if (Scale != 0 && Scale != -1) | |||
1679 | return false; | |||
1680 | ||||
1681 | // If we have low-level target information, ask the target if it can fold an | |||
1682 | // integer immediate on an icmp. | |||
1683 | if (BaseOffset != 0) { | |||
1684 | // We have one of: | |||
1685 | // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset | |||
1686 | // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset | |||
1687 | // Offs is the ICmp immediate. | |||
1688 | if (Scale == 0) | |||
1689 | // The cast does the right thing with | |||
1690 | // std::numeric_limits<int64_t>::min(). | |||
1691 | BaseOffset = -(uint64_t)BaseOffset; | |||
1692 | return TTI.isLegalICmpImmediate(BaseOffset); | |||
1693 | } | |||
1694 | ||||
1695 | // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg | |||
1696 | return true; | |||
1697 | ||||
1698 | case LSRUse::Basic: | |||
1699 | // Only handle single-register values. | |||
1700 | return !BaseGV && Scale == 0 && BaseOffset == 0; | |||
1701 | ||||
1702 | case LSRUse::Special: | |||
1703 | // Special case Basic to handle -1 scales. | |||
1704 | return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0; | |||
1705 | } | |||
1706 | ||||
1707 | llvm_unreachable("Invalid LSRUse Kind!")__builtin_unreachable(); | |||
1708 | } | |||
1709 | ||||
1710 | static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, | |||
1711 | int64_t MinOffset, int64_t MaxOffset, | |||
1712 | LSRUse::KindType Kind, MemAccessTy AccessTy, | |||
1713 | GlobalValue *BaseGV, int64_t BaseOffset, | |||
1714 | bool HasBaseReg, int64_t Scale) { | |||
1715 | // Check for overflow. | |||
1716 | if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) != | |||
1717 | (MinOffset > 0)) | |||
1718 | return false; | |||
1719 | MinOffset = (uint64_t)BaseOffset + MinOffset; | |||
1720 | if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) != | |||
1721 | (MaxOffset > 0)) | |||
1722 | return false; | |||
1723 | MaxOffset = (uint64_t)BaseOffset + MaxOffset; | |||
1724 | ||||
1725 | return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset, | |||
1726 | HasBaseReg, Scale) && | |||
1727 | isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset, | |||
1728 | HasBaseReg, Scale); | |||
1729 | } | |||
1730 | ||||
1731 | static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, | |||
1732 | int64_t MinOffset, int64_t MaxOffset, | |||
1733 | LSRUse::KindType Kind, MemAccessTy AccessTy, | |||
1734 | const Formula &F, const Loop &L) { | |||
1735 | // For the purpose of isAMCompletelyFolded either having a canonical formula | |||
1736 | // or a scale not equal to zero is correct. | |||
1737 | // Problems may arise from non canonical formulae having a scale == 0. | |||
1738 | // Strictly speaking it would best to just rely on canonical formulae. | |||
1739 | // However, when we generate the scaled formulae, we first check that the | |||
1740 | // scaling factor is profitable before computing the actual ScaledReg for | |||
1741 | // compile time sake. | |||
1742 | assert((F.isCanonical(L) || F.Scale != 0))((void)0); | |||
1743 | return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, | |||
1744 | F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale); | |||
1745 | } | |||
1746 | ||||
1747 | /// Test whether we know how to expand the current formula. | |||
1748 | static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, | |||
1749 | int64_t MaxOffset, LSRUse::KindType Kind, | |||
1750 | MemAccessTy AccessTy, GlobalValue *BaseGV, | |||
1751 | int64_t BaseOffset, bool HasBaseReg, int64_t Scale) { | |||
1752 | // We know how to expand completely foldable formulae. | |||
1753 | return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, | |||
1754 | BaseOffset, HasBaseReg, Scale) || | |||
1755 | // Or formulae that use a base register produced by a sum of base | |||
1756 | // registers. | |||
1757 | (Scale == 1 && | |||
1758 | isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, | |||
1759 | BaseGV, BaseOffset, true, 0)); | |||
1760 | } | |||
1761 | ||||
1762 | static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, | |||
1763 | int64_t MaxOffset, LSRUse::KindType Kind, | |||
1764 | MemAccessTy AccessTy, const Formula &F) { | |||
1765 | return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV, | |||
1766 | F.BaseOffset, F.HasBaseReg, F.Scale); | |||
1767 | } | |||
1768 | ||||
1769 | static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, | |||
1770 | const LSRUse &LU, const Formula &F) { | |||
1771 | // Target may want to look at the user instructions. | |||
1772 | if (LU.Kind == LSRUse::Address && TTI.LSRWithInstrQueries()) { | |||
1773 | for (const LSRFixup &Fixup : LU.Fixups) | |||
1774 | if (!isAMCompletelyFolded(TTI, LSRUse::Address, LU.AccessTy, F.BaseGV, | |||
1775 | (F.BaseOffset + Fixup.Offset), F.HasBaseReg, | |||
1776 | F.Scale, Fixup.UserInst)) | |||
1777 | return false; | |||
1778 | return true; | |||
1779 | } | |||
1780 | ||||
1781 | return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, | |||
1782 | LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg, | |||
1783 | F.Scale); | |||
1784 | } | |||
1785 | ||||
1786 | static InstructionCost getScalingFactorCost(const TargetTransformInfo &TTI, | |||
1787 | const LSRUse &LU, const Formula &F, | |||
1788 | const Loop &L) { | |||
1789 | if (!F.Scale) | |||
1790 | return 0; | |||
1791 | ||||
1792 | // If the use is not completely folded in that instruction, we will have to | |||
1793 | // pay an extra cost only for scale != 1. | |||
1794 | if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, | |||
1795 | LU.AccessTy, F, L)) | |||
1796 | return F.Scale != 1; | |||
1797 | ||||
1798 | switch (LU.Kind) { | |||
1799 | case LSRUse::Address: { | |||
1800 | // Check the scaling factor cost with both the min and max offsets. | |||
1801 | InstructionCost ScaleCostMinOffset = TTI.getScalingFactorCost( | |||
1802 | LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg, | |||
1803 | F.Scale, LU.AccessTy.AddrSpace); | |||
1804 | InstructionCost ScaleCostMaxOffset = TTI.getScalingFactorCost( | |||
1805 | LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg, | |||
1806 | F.Scale, LU.AccessTy.AddrSpace); | |||
1807 | ||||
1808 | assert(ScaleCostMinOffset.isValid() && ScaleCostMaxOffset.isValid() &&((void)0) | |||
1809 | "Legal addressing mode has an illegal cost!")((void)0); | |||
1810 | return std::max(ScaleCostMinOffset, ScaleCostMaxOffset); | |||
1811 | } | |||
1812 | case LSRUse::ICmpZero: | |||
1813 | case LSRUse::Basic: | |||
1814 | case LSRUse::Special: | |||
1815 | // The use is completely folded, i.e., everything is folded into the | |||
1816 | // instruction. | |||
1817 | return 0; | |||
1818 | } | |||
1819 | ||||
1820 | llvm_unreachable("Invalid LSRUse Kind!")__builtin_unreachable(); | |||
1821 | } | |||
1822 | ||||
1823 | static bool isAlwaysFoldable(const TargetTransformInfo &TTI, | |||
1824 | LSRUse::KindType Kind, MemAccessTy AccessTy, | |||
1825 | GlobalValue *BaseGV, int64_t BaseOffset, | |||
1826 | bool HasBaseReg) { | |||
1827 | // Fast-path: zero is always foldable. | |||
1828 | if (BaseOffset == 0 && !BaseGV) return true; | |||
1829 | ||||
1830 | // Conservatively, create an address with an immediate and a | |||
1831 | // base and a scale. | |||
1832 | int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; | |||
1833 | ||||
1834 | // Canonicalize a scale of 1 to a base register if the formula doesn't | |||
1835 | // already have a base register. | |||
1836 | if (!HasBaseReg && Scale == 1) { | |||
1837 | Scale = 0; | |||
1838 | HasBaseReg = true; | |||
1839 | } | |||
1840 | ||||
1841 | return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset, | |||
1842 | HasBaseReg, Scale); | |||
1843 | } | |||
1844 | ||||
1845 | static bool isAlwaysFoldable(const TargetTransformInfo &TTI, | |||
1846 | ScalarEvolution &SE, int64_t MinOffset, | |||
1847 | int64_t MaxOffset, LSRUse::KindType Kind, | |||
1848 | MemAccessTy AccessTy, const SCEV *S, | |||
1849 | bool HasBaseReg) { | |||
1850 | // Fast-path: zero is always foldable. | |||
1851 | if (S->isZero()) return true; | |||
1852 | ||||
1853 | // Conservatively, create an address with an immediate and a | |||
1854 | // base and a scale. | |||
1855 | int64_t BaseOffset = ExtractImmediate(S, SE); | |||
1856 | GlobalValue *BaseGV = ExtractSymbol(S, SE); | |||
1857 | ||||
1858 | // If there's anything else involved, it's not foldable. | |||
1859 | if (!S->isZero()) return false; | |||
1860 | ||||
1861 | // Fast-path: zero is always foldable. | |||
1862 | if (BaseOffset == 0 && !BaseGV) return true; | |||
1863 | ||||
1864 | // Conservatively, create an address with an immediate and a | |||
1865 | // base and a scale. | |||
1866 | int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; | |||
1867 | ||||
1868 | return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, | |||
1869 | BaseOffset, HasBaseReg, Scale); | |||
1870 | } | |||
1871 | ||||
1872 | namespace { | |||
1873 | ||||
1874 | /// An individual increment in a Chain of IV increments. Relate an IV user to | |||
1875 | /// an expression that computes the IV it uses from the IV used by the previous | |||
1876 | /// link in the Chain. | |||
1877 | /// | |||
1878 | /// For the head of a chain, IncExpr holds the absolute SCEV expression for the | |||
1879 | /// original IVOperand. The head of the chain's IVOperand is only valid during | |||
1880 | /// chain collection, before LSR replaces IV users. During chain generation, | |||
1881 | /// IncExpr can be used to find the new IVOperand that computes the same | |||
1882 | /// expression. | |||
1883 | struct IVInc { | |||
1884 | Instruction *UserInst; | |||
1885 | Value* IVOperand; | |||
1886 | const SCEV *IncExpr; | |||
1887 | ||||
1888 | IVInc(Instruction *U, Value *O, const SCEV *E) | |||
1889 | : UserInst(U), IVOperand(O), IncExpr(E) {} | |||
1890 | }; | |||
1891 | ||||
1892 | // The list of IV increments in program order. We typically add the head of a | |||
1893 | // chain without finding subsequent links. | |||
1894 | struct IVChain { | |||
1895 | SmallVector<IVInc, 1> Incs; | |||
1896 | const SCEV *ExprBase = nullptr; | |||
1897 | ||||
1898 | IVChain() = default; | |||
1899 | IVChain(const IVInc &Head, const SCEV *Base) | |||
1900 | : Incs(1, Head), ExprBase(Base) {} | |||
1901 | ||||
1902 | using const_iterator = SmallVectorImpl<IVInc>::const_iterator; | |||
1903 | ||||
1904 | // Return the first increment in the chain. | |||
1905 | const_iterator begin() const { | |||
1906 | assert(!Incs.empty())((void)0); | |||
1907 | return std::next(Incs.begin()); | |||
1908 | } | |||
1909 | const_iterator end() const { | |||
1910 | return Incs.end(); | |||
1911 | } | |||
1912 | ||||
1913 | // Returns true if this chain contains any increments. | |||
1914 | bool hasIncs() const { return Incs.size() >= 2; } | |||
1915 | ||||
1916 | // Add an IVInc to the end of this chain. | |||
1917 | void add(const IVInc &X) { Incs.push_back(X); } | |||
1918 | ||||
1919 | // Returns the last UserInst in the chain. | |||
1920 | Instruction *tailUserInst() const { return Incs.back().UserInst; } | |||
1921 | ||||
1922 | // Returns true if IncExpr can be profitably added to this chain. | |||
1923 | bool isProfitableIncrement(const SCEV *OperExpr, | |||
1924 | const SCEV *IncExpr, | |||
1925 | ScalarEvolution&); | |||
1926 | }; | |||
1927 | ||||
1928 | /// Helper for CollectChains to track multiple IV increment uses. Distinguish | |||
1929 | /// between FarUsers that definitely cross IV increments and NearUsers that may | |||
1930 | /// be used between IV increments. | |||
1931 | struct ChainUsers { | |||
1932 | SmallPtrSet<Instruction*, 4> FarUsers; | |||
1933 | SmallPtrSet<Instruction*, 4> NearUsers; | |||
1934 | }; | |||
1935 | ||||
1936 | /// This class holds state for the main loop strength reduction logic. | |||
1937 | class LSRInstance { | |||
1938 | IVUsers &IU; | |||
1939 | ScalarEvolution &SE; | |||
1940 | DominatorTree &DT; | |||
1941 | LoopInfo &LI; | |||
1942 | AssumptionCache &AC; | |||
1943 | TargetLibraryInfo &TLI; | |||
1944 | const TargetTransformInfo &TTI; | |||
1945 | Loop *const L; | |||
1946 | MemorySSAUpdater *MSSAU; | |||
1947 | TTI::AddressingModeKind AMK; | |||
1948 | bool Changed = false; | |||
1949 | ||||
1950 | /// This is the insert position that the current loop's induction variable | |||
1951 | /// increment should be placed. In simple loops, this is the latch block's | |||
1952 | /// terminator. But in more complicated cases, this is a position which will | |||
1953 | /// dominate all the in-loop post-increment users. | |||
1954 | Instruction *IVIncInsertPos = nullptr; | |||
1955 | ||||
1956 | /// Interesting factors between use strides. | |||
1957 | /// | |||
1958 | /// We explicitly use a SetVector which contains a SmallSet, instead of the | |||
1959 | /// default, a SmallDenseSet, because we need to use the full range of | |||
1960 | /// int64_ts, and there's currently no good way of doing that with | |||
1961 | /// SmallDenseSet. | |||
1962 | SetVector<int64_t, SmallVector<int64_t, 8>, SmallSet<int64_t, 8>> Factors; | |||
1963 | ||||
1964 | /// Interesting use types, to facilitate truncation reuse. | |||
1965 | SmallSetVector<Type *, 4> Types; | |||
1966 | ||||
1967 | /// The list of interesting uses. | |||
1968 | mutable SmallVector<LSRUse, 16> Uses; | |||
1969 | ||||
1970 | /// Track which uses use which register candidates. | |||
1971 | RegUseTracker RegUses; | |||
1972 | ||||
1973 | // Limit the number of chains to avoid quadratic behavior. We don't expect to | |||
1974 | // have more than a few IV increment chains in a loop. Missing a Chain falls | |||
1975 | // back to normal LSR behavior for those uses. | |||
1976 | static const unsigned MaxChains = 8; | |||
1977 | ||||
1978 | /// IV users can form a chain of IV increments. | |||
1979 | SmallVector<IVChain, MaxChains> IVChainVec; | |||
1980 | ||||
1981 | /// IV users that belong to profitable IVChains. | |||
1982 | SmallPtrSet<Use*, MaxChains> IVIncSet; | |||
1983 | ||||
1984 | /// Induction variables that were generated and inserted by the SCEV Expander. | |||
1985 | SmallVector<llvm::WeakVH, 2> ScalarEvolutionIVs; | |||
1986 | ||||
1987 | void OptimizeShadowIV(); | |||
1988 | bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); | |||
1989 | ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); | |||
1990 | void OptimizeLoopTermCond(); | |||
1991 | ||||
1992 | void ChainInstruction(Instruction *UserInst, Instruction *IVOper, | |||
1993 | SmallVectorImpl<ChainUsers> &ChainUsersVec); | |||
1994 | void FinalizeChain(IVChain &Chain); | |||
1995 | void CollectChains(); | |||
1996 | void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, | |||
1997 | SmallVectorImpl<WeakTrackingVH> &DeadInsts); | |||
1998 | ||||
1999 | void CollectInterestingTypesAndFactors(); | |||
2000 | void CollectFixupsAndInitialFormulae(); | |||
2001 | ||||
2002 | // Support for sharing of LSRUses between LSRFixups. | |||
2003 | using UseMapTy = DenseMap<LSRUse::SCEVUseKindPair, size_t>; | |||
2004 | UseMapTy UseMap; | |||
2005 | ||||
2006 | bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, | |||
2007 | LSRUse::KindType Kind, MemAccessTy AccessTy); | |||
2008 | ||||
2009 | std::pair<size_t, int64_t> getUse(const SCEV *&Expr, LSRUse::KindType Kind, | |||
2010 | MemAccessTy AccessTy); | |||
2011 | ||||
2012 | void DeleteUse(LSRUse &LU, size_t LUIdx); | |||
2013 | ||||
2014 | LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); | |||
2015 | ||||
2016 | void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); | |||
2017 | void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); | |||
2018 | void CountRegisters(const Formula &F, size_t LUIdx); | |||
2019 | bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); | |||
2020 | ||||
2021 | void CollectLoopInvariantFixupsAndFormulae(); | |||
2022 | ||||
2023 | void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, | |||
2024 | unsigned Depth = 0); | |||
2025 | ||||
2026 | void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, | |||
2027 | const Formula &Base, unsigned Depth, | |||
2028 | size_t Idx, bool IsScaledReg = false); | |||
2029 | void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); | |||
2030 | void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, | |||
2031 | const Formula &Base, size_t Idx, | |||
2032 | bool IsScaledReg = false); | |||
2033 | void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); | |||
2034 | void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx, | |||
2035 | const Formula &Base, | |||
2036 | const SmallVectorImpl<int64_t> &Worklist, | |||
2037 | size_t Idx, bool IsScaledReg = false); | |||
2038 | void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); | |||
2039 | void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); | |||
2040 | void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); | |||
2041 | void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); | |||
2042 | void GenerateCrossUseConstantOffsets(); | |||
2043 | void GenerateAllReuseFormulae(); | |||
2044 | ||||
2045 | void FilterOutUndesirableDedicatedRegisters(); | |||
2046 | ||||
2047 | size_t EstimateSearchSpaceComplexity() const; | |||
2048 | void NarrowSearchSpaceByDetectingSupersets(); | |||
2049 | void NarrowSearchSpaceByCollapsingUnrolledCode(); | |||
2050 | void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); | |||
2051 | void NarrowSearchSpaceByFilterFormulaWithSameScaledReg(); | |||
2052 | void NarrowSearchSpaceByFilterPostInc(); | |||
2053 | void NarrowSearchSpaceByDeletingCostlyFormulas(); | |||
2054 | void NarrowSearchSpaceByPickingWinnerRegs(); | |||
2055 | void NarrowSearchSpaceUsingHeuristics(); | |||
2056 | ||||
2057 | void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, | |||
2058 | Cost &SolutionCost, | |||
2059 | SmallVectorImpl<const Formula *> &Workspace, | |||
2060 | const Cost &CurCost, | |||
2061 | const SmallPtrSet<const SCEV *, 16> &CurRegs, | |||
2062 | DenseSet<const SCEV *> &VisitedRegs) const; | |||
2063 | void Solve(SmallVectorImpl<const Formula *> &Solution) const; | |||
2064 | ||||
2065 | BasicBlock::iterator | |||
2066 | HoistInsertPosition(BasicBlock::iterator IP, | |||
2067 | const SmallVectorImpl<Instruction *> &Inputs) const; | |||
2068 | BasicBlock::iterator | |||
2069 | AdjustInsertPositionForExpand(BasicBlock::iterator IP, | |||
2070 | const LSRFixup &LF, | |||
2071 | const LSRUse &LU, | |||
2072 | SCEVExpander &Rewriter) const; | |||
2073 | ||||
2074 | Value *Expand(const LSRUse &LU, const LSRFixup &LF, const Formula &F, | |||
2075 | BasicBlock::iterator IP, SCEVExpander &Rewriter, | |||
2076 | SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; | |||
2077 | void RewriteForPHI(PHINode *PN, const LSRUse &LU, const LSRFixup &LF, | |||
2078 | const Formula &F, SCEVExpander &Rewriter, | |||
2079 | SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; | |||
2080 | void Rewrite(const LSRUse &LU, const LSRFixup &LF, const Formula &F, | |||
2081 | SCEVExpander &Rewriter, | |||
2082 | SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; | |||
2083 | void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution); | |||
2084 | ||||
2085 | public: | |||
2086 | LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, DominatorTree &DT, | |||
2087 | LoopInfo &LI, const TargetTransformInfo &TTI, AssumptionCache &AC, | |||
2088 | TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU); | |||
2089 | ||||
2090 | bool getChanged() const { return Changed; } | |||
2091 | const SmallVectorImpl<WeakVH> &getScalarEvolutionIVs() const { | |||
2092 | return ScalarEvolutionIVs; | |||
2093 | } | |||
2094 | ||||
2095 | void print_factors_and_types(raw_ostream &OS) const; | |||
2096 | void print_fixups(raw_ostream &OS) const; | |||
2097 | void print_uses(raw_ostream &OS) const; | |||
2098 | void print(raw_ostream &OS) const; | |||
2099 | void dump() const; | |||
2100 | }; | |||
2101 | ||||
2102 | } // end anonymous namespace | |||
2103 | ||||
2104 | /// If IV is used in a int-to-float cast inside the loop then try to eliminate | |||
2105 | /// the cast operation. | |||
2106 | void LSRInstance::OptimizeShadowIV() { | |||
2107 | const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); | |||
2108 | if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) | |||
2109 | return; | |||
2110 | ||||
2111 | for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); | |||
2112 | UI != E; /* empty */) { | |||
2113 | IVUsers::const_iterator CandidateUI = UI; | |||
2114 | ++UI; | |||
2115 | Instruction *ShadowUse = CandidateUI->getUser(); | |||
2116 | Type *DestTy = nullptr; | |||
2117 | bool IsSigned = false; | |||
2118 | ||||
2119 | /* If shadow use is a int->float cast then insert a second IV | |||
2120 | to eliminate this cast. | |||
2121 | ||||
2122 | for (unsigned i = 0; i < n; ++i) | |||
2123 | foo((double)i); | |||
2124 | ||||
2125 | is transformed into | |||
2126 | ||||
2127 | double d = 0.0; | |||
2128 | for (unsigned i = 0; i < n; ++i, ++d) | |||
2129 | foo(d); | |||
2130 | */ | |||
2131 | if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) { | |||
2132 | IsSigned = false; | |||
2133 | DestTy = UCast->getDestTy(); | |||
2134 | } | |||
2135 | else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) { | |||
2136 | IsSigned = true; | |||
2137 | DestTy = SCast->getDestTy(); | |||
2138 | } | |||
2139 | if (!DestTy) continue; | |||
2140 | ||||
2141 | // If target does not support DestTy natively then do not apply | |||
2142 | // this transformation. | |||
2143 | if (!TTI.isTypeLegal(DestTy)) continue; | |||
2144 | ||||
2145 | PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); | |||
2146 | if (!PH) continue; | |||
2147 | if (PH->getNumIncomingValues() != 2) continue; | |||
2148 | ||||
2149 | // If the calculation in integers overflows, the result in FP type will | |||
2150 | // differ. So we only can do this transformation if we are guaranteed to not | |||
2151 | // deal with overflowing values | |||
2152 | const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PH)); | |||
2153 | if (!AR) continue; | |||
2154 | if (IsSigned && !AR->hasNoSignedWrap()) continue; | |||
2155 | if (!IsSigned && !AR->hasNoUnsignedWrap()) continue; | |||
2156 | ||||
2157 | Type *SrcTy = PH->getType(); | |||
2158 | int Mantissa = DestTy->getFPMantissaWidth(); | |||
2159 | if (Mantissa == -1) continue; | |||
2160 | if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) | |||
2161 | continue; | |||
2162 | ||||
2163 | unsigned Entry, Latch; | |||
2164 | if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { | |||
2165 | Entry = 0; | |||
2166 | Latch = 1; | |||
2167 | } else { | |||
2168 | Entry = 1; | |||
2169 | Latch = 0; | |||
2170 | } | |||
2171 | ||||
2172 | ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); | |||
2173 | if (!Init) continue; | |||
2174 | Constant *NewInit = ConstantFP::get(DestTy, IsSigned ? | |||
2175 | (double)Init->getSExtValue() : | |||
2176 | (double)Init->getZExtValue()); | |||
2177 | ||||
2178 | BinaryOperator *Incr = | |||
2179 | dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); | |||
2180 | if (!Incr) continue; | |||
2181 | if (Incr->getOpcode() != Instruction::Add | |||
2182 | && Incr->getOpcode() != Instruction::Sub) | |||
2183 | continue; | |||
2184 | ||||
2185 | /* Initialize new IV, double d = 0.0 in above example. */ | |||
2186 | ConstantInt *C = nullptr; | |||
2187 | if (Incr->getOperand(0) == PH) | |||
2188 | C = dyn_cast<ConstantInt>(Incr->getOperand(1)); | |||
2189 | else if (Incr->getOperand(1) == PH) | |||
2190 | C = dyn_cast<ConstantInt>(Incr->getOperand(0)); | |||
2191 | else | |||
2192 | continue; | |||
2193 | ||||
2194 | if (!C) continue; | |||
2195 | ||||
2196 | // Ignore negative constants, as the code below doesn't handle them | |||
2197 | // correctly. TODO: Remove this restriction. | |||
2198 | if (!C->getValue().isStrictlyPositive()) continue; | |||
2199 | ||||
2200 | /* Add new PHINode. */ | |||
2201 | PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH); | |||
2202 | ||||
2203 | /* create new increment. '++d' in above example. */ | |||
2204 | Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); | |||
2205 | BinaryOperator *NewIncr = | |||
2206 | BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? | |||
2207 | Instruction::FAdd : Instruction::FSub, | |||
2208 | NewPH, CFP, "IV.S.next.", Incr); | |||
2209 | ||||
2210 | NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); | |||
2211 | NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); | |||
2212 | ||||
2213 | /* Remove cast operation */ | |||
2214 | ShadowUse->replaceAllUsesWith(NewPH); | |||
2215 | ShadowUse->eraseFromParent(); | |||
2216 | Changed = true; | |||
2217 | break; | |||
2218 | } | |||
2219 | } | |||
2220 | ||||
2221 | /// If Cond has an operand that is an expression of an IV, set the IV user and | |||
2222 | /// stride information and return true, otherwise return false. | |||
2223 | bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { | |||
2224 | for (IVStrideUse &U : IU) | |||
2225 | if (U.getUser() == Cond) { | |||
2226 | // NOTE: we could handle setcc instructions with multiple uses here, but | |||
2227 | // InstCombine does it as well for simple uses, it's not clear that it | |||
2228 | // occurs enough in real life to handle. | |||
2229 | CondUse = &U; | |||
2230 | return true; | |||
2231 | } | |||
2232 | return false; | |||
2233 | } | |||
2234 | ||||
2235 | /// Rewrite the loop's terminating condition if it uses a max computation. | |||
2236 | /// | |||
2237 | /// This is a narrow solution to a specific, but acute, problem. For loops | |||
2238 | /// like this: | |||
2239 | /// | |||
2240 | /// i = 0; | |||
2241 | /// do { | |||
2242 | /// p[i] = 0.0; | |||
2243 | /// } while (++i < n); | |||
2244 | /// | |||
2245 | /// the trip count isn't just 'n', because 'n' might not be positive. And | |||
2246 | /// unfortunately this can come up even for loops where the user didn't use | |||
2247 | /// a C do-while loop. For example, seemingly well-behaved top-test loops | |||
2248 | /// will commonly be lowered like this: | |||
2249 | /// | |||
2250 | /// if (n > 0) { | |||
2251 | /// i = 0; | |||
2252 | /// do { | |||
2253 | /// p[i] = 0.0; | |||
2254 | /// } while (++i < n); | |||
2255 | /// } | |||
2256 | /// | |||
2257 | /// and then it's possible for subsequent optimization to obscure the if | |||
2258 | /// test in such a way that indvars can't find it. | |||
2259 | /// | |||
2260 | /// When indvars can't find the if test in loops like this, it creates a | |||
2261 | /// max expression, which allows it to give the loop a canonical | |||
2262 | /// induction variable: | |||
2263 | /// | |||
2264 | /// i = 0; | |||
2265 | /// max = n < 1 ? 1 : n; | |||
2266 | /// do { | |||
2267 | /// p[i] = 0.0; | |||
2268 | /// } while (++i != max); | |||
2269 | /// | |||
2270 | /// Canonical induction variables are necessary because the loop passes | |||
2271 | /// are designed around them. The most obvious example of this is the | |||
2272 | /// LoopInfo analysis, which doesn't remember trip count values. It | |||
2273 | /// expects to be able to rediscover the trip count each time it is | |||
2274 | /// needed, and it does this using a simple analysis that only succeeds if | |||
2275 | /// the loop has a canonical induction variable. | |||
2276 | /// | |||
2277 | /// However, when it comes time to generate code, the maximum operation | |||
2278 | /// can be quite costly, especially if it's inside of an outer loop. | |||
2279 | /// | |||
2280 | /// This function solves this problem by detecting this type of loop and | |||
2281 | /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting | |||
2282 | /// the instructions for the maximum computation. | |||
2283 | ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { | |||
2284 | // Check that the loop matches the pattern we're looking for. | |||
2285 | if (Cond->getPredicate() != CmpInst::ICMP_EQ && | |||
2286 | Cond->getPredicate() != CmpInst::ICMP_NE) | |||
2287 | return Cond; | |||
2288 | ||||
2289 | SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); | |||
2290 | if (!Sel || !Sel->hasOneUse()) return Cond; | |||
2291 | ||||
2292 | const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); | |||
2293 | if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) | |||
2294 | return Cond; | |||
2295 | const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); | |||
2296 | ||||
2297 | // Add one to the backedge-taken count to get the trip count. | |||
2298 | const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); | |||
2299 | if (IterationCount != SE.getSCEV(Sel)) return Cond; | |||
2300 | ||||
2301 | // Check for a max calculation that matches the pattern. There's no check | |||
2302 | // for ICMP_ULE here because the comparison would be with zero, which | |||
2303 | // isn't interesting. | |||
2304 | CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; | |||
2305 | const SCEVNAryExpr *Max = nullptr; | |||
2306 | if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { | |||
2307 | Pred = ICmpInst::ICMP_SLE; | |||
2308 | Max = S; | |||
2309 | } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { | |||
2310 | Pred = ICmpInst::ICMP_SLT; | |||
2311 | Max = S; | |||
2312 | } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { | |||
2313 | Pred = ICmpInst::ICMP_ULT; | |||
2314 | Max = U; | |||
2315 | } else { | |||
2316 | // No match; bail. | |||
2317 | return Cond; | |||
2318 | } | |||
2319 | ||||
2320 | // To handle a max with more than two operands, this optimization would | |||
2321 | // require additional checking and setup. | |||
2322 | if (Max->getNumOperands() != 2) | |||
2323 | return Cond; | |||
2324 | ||||
2325 | const SCEV *MaxLHS = Max->getOperand(0); | |||
2326 | const SCEV *MaxRHS = Max->getOperand(1); | |||
2327 | ||||
2328 | // ScalarEvolution canonicalizes constants to the left. For < and >, look | |||
2329 | // for a comparison with 1. For <= and >=, a comparison with zero. | |||
2330 | if (!MaxLHS || | |||
2331 | (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) | |||
2332 | return Cond; | |||
2333 | ||||
2334 | // Check the relevant induction variable for conformance to | |||
2335 | // the pattern. | |||
2336 | const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); | |||
2337 | const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); | |||
2338 | if (!AR || !AR->isAffine() || | |||
2339 | AR->getStart() != One || | |||
2340 | AR->getStepRecurrence(SE) != One) | |||
2341 | return Cond; | |||
2342 | ||||
2343 | assert(AR->getLoop() == L &&((void)0) | |||
2344 | "Loop condition operand is an addrec in a different loop!")((void)0); | |||
2345 | ||||
2346 | // Check the right operand of the select, and remember it, as it will | |||
2347 | // be used in the new comparison instruction. | |||
2348 | Value *NewRHS = nullptr; | |||
2349 | if (ICmpInst::isTrueWhenEqual(Pred)) { | |||
2350 | // Look for n+1, and grab n. | |||
2351 | if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) | |||
2352 | if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) | |||
2353 | if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) | |||
2354 | NewRHS = BO->getOperand(0); | |||
2355 | if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) | |||
2356 | if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) | |||
2357 | if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) | |||
2358 | NewRHS = BO->getOperand(0); | |||
2359 | if (!NewRHS) | |||
2360 | return Cond; | |||
2361 | } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) | |||
2362 | NewRHS = Sel->getOperand(1); | |||
2363 | else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) | |||
2364 | NewRHS = Sel->getOperand(2); | |||
2365 | else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) | |||
2366 | NewRHS = SU->getValue(); | |||
2367 | else | |||
2368 | // Max doesn't match expected pattern. | |||
2369 | return Cond; | |||
2370 | ||||
2371 | // Determine the new comparison opcode. It may be signed or unsigned, | |||
2372 | // and the original comparison may be either equality or inequality. | |||
2373 | if (Cond->getPredicate() == CmpInst::ICMP_EQ) | |||
2374 | Pred = CmpInst::getInversePredicate(Pred); | |||
2375 | ||||
2376 | // Ok, everything looks ok to change the condition into an SLT or SGE and | |||
2377 | // delete the max calculation. | |||
2378 | ICmpInst *NewCond = | |||
2379 | new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); | |||
2380 | ||||
2381 | // Delete the max calculation instructions. | |||
2382 | NewCond->setDebugLoc(Cond->getDebugLoc()); | |||
2383 | Cond->replaceAllUsesWith(NewCond); | |||
2384 | CondUse->setUser(NewCond); | |||
2385 | Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); | |||
2386 | Cond->eraseFromParent(); | |||
2387 | Sel->eraseFromParent(); | |||
2388 | if (Cmp->use_empty()) | |||
2389 | Cmp->eraseFromParent(); | |||
2390 | return NewCond; | |||
2391 | } | |||
2392 | ||||
2393 | /// Change loop terminating condition to use the postinc iv when possible. | |||
2394 | void | |||
2395 | LSRInstance::OptimizeLoopTermCond() { | |||
2396 | SmallPtrSet<Instruction *, 4> PostIncs; | |||
2397 | ||||
2398 | // We need a different set of heuristics for rotated and non-rotated loops. | |||
2399 | // If a loop is rotated then the latch is also the backedge, so inserting | |||
2400 | // post-inc expressions just before the latch is ideal. To reduce live ranges | |||
2401 | // it also makes sense to rewrite terminating conditions to use post-inc | |||
2402 | // expressions. | |||
2403 | // | |||
2404 | // If the loop is not rotated then the latch is not a backedge; the latch | |||
2405 | // check is done in the loop head. Adding post-inc expressions before the | |||
2406 | // latch will cause overlapping live-ranges of pre-inc and post-inc expressions | |||
2407 | // in the loop body. In this case we do *not* want to use post-inc expressions | |||
2408 | // in the latch check, and we want to insert post-inc expressions before | |||
2409 | // the backedge. | |||
2410 | BasicBlock *LatchBlock = L->getLoopLatch(); | |||
2411 | SmallVector<BasicBlock*, 8> ExitingBlocks; | |||
2412 | L->getExitingBlocks(ExitingBlocks); | |||
2413 | if (llvm::all_of(ExitingBlocks, [&LatchBlock](const BasicBlock *BB) { | |||
2414 | return LatchBlock != BB; | |||
2415 | })) { | |||
2416 | // The backedge doesn't exit the loop; treat this as a head-tested loop. | |||
2417 | IVIncInsertPos = LatchBlock->getTerminator(); | |||
2418 | return; | |||
2419 | } | |||
2420 | ||||
2421 | // Otherwise treat this as a rotated loop. | |||
2422 | for (BasicBlock *ExitingBlock : ExitingBlocks) { | |||
2423 | // Get the terminating condition for the loop if possible. If we | |||
2424 | // can, we want to change it to use a post-incremented version of its | |||
2425 | // induction variable, to allow coalescing the live ranges for the IV into | |||
2426 | // one register value. | |||
2427 | ||||
2428 | BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); | |||
2429 | if (!TermBr) | |||
2430 | continue; | |||
2431 | // FIXME: Overly conservative, termination condition could be an 'or' etc.. | |||
2432 | if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) | |||
2433 | continue; | |||
2434 | ||||
2435 | // Search IVUsesByStride to find Cond's IVUse if there is one. | |||
2436 | IVStrideUse *CondUse = nullptr; | |||
2437 | ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); | |||
2438 | if (!FindIVUserForCond(Cond, CondUse)) | |||
2439 | continue; | |||
2440 | ||||
2441 | // If the trip count is computed in terms of a max (due to ScalarEvolution | |||
2442 | // being unable to find a sufficient guard, for example), change the loop | |||
2443 | // comparison to use SLT or ULT instead of NE. | |||
2444 | // One consequence of doing this now is that it disrupts the count-down | |||
2445 | // optimization. That's not always a bad thing though, because in such | |||
2446 | // cases it may still be worthwhile to avoid a max. | |||
2447 | Cond = OptimizeMax(Cond, CondUse); | |||
2448 | ||||
2449 | // If this exiting block dominates the latch block, it may also use | |||
2450 | // the post-inc value if it won't be shared with other uses. | |||
2451 | // Check for dominance. | |||
2452 | if (!DT.dominates(ExitingBlock, LatchBlock)) | |||
2453 | continue; | |||
2454 | ||||
2455 | // Conservatively avoid trying to use the post-inc value in non-latch | |||
2456 | // exits if there may be pre-inc users in intervening blocks. | |||
2457 | if (LatchBlock != ExitingBlock) | |||
2458 | for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) | |||
2459 | // Test if the use is reachable from the exiting block. This dominator | |||
2460 | // query is a conservative approximation of reachability. | |||
2461 | if (&*UI != CondUse && | |||
2462 | !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { | |||
2463 | // Conservatively assume there may be reuse if the quotient of their | |||
2464 | // strides could be a legal scale. | |||
2465 | const SCEV *A = IU.getStride(*CondUse, L); | |||
2466 | const SCEV *B = IU.getStride(*UI, L); | |||
2467 | if (!A || !B) continue; | |||
2468 | if (SE.getTypeSizeInBits(A->getType()) != | |||
2469 | SE.getTypeSizeInBits(B->getType())) { | |||
2470 | if (SE.getTypeSizeInBits(A->getType()) > | |||
2471 | SE.getTypeSizeInBits(B->getType())) | |||
2472 | B = SE.getSignExtendExpr(B, A->getType()); | |||
2473 | else | |||
2474 | A = SE.getSignExtendExpr(A, B->getType()); | |||
2475 | } | |||
2476 | if (const SCEVConstant *D = | |||
2477 | dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { | |||
2478 | const ConstantInt *C = D->getValue(); | |||
2479 | // Stride of one or negative one can have reuse with non-addresses. | |||
2480 | if (C->isOne() || C->isMinusOne()) | |||
2481 | goto decline_post_inc; | |||
2482 | // Avoid weird situations. | |||
2483 | if (C->getValue().getMinSignedBits() >= 64 || | |||
2484 | C->getValue().isMinSignedValue()) | |||
2485 | goto decline_post_inc; | |||
2486 | // Check for possible scaled-address reuse. | |||
2487 | if (isAddressUse(TTI, UI->getUser(), UI->getOperandValToReplace())) { | |||
2488 | MemAccessTy AccessTy = getAccessType( | |||
2489 | TTI, UI->getUser(), UI->getOperandValToReplace()); | |||
2490 | int64_t Scale = C->getSExtValue(); | |||
2491 | if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, | |||
2492 | /*BaseOffset=*/0, | |||
2493 | /*HasBaseReg=*/false, Scale, | |||
2494 | AccessTy.AddrSpace)) | |||
2495 | goto decline_post_inc; | |||
2496 | Scale = -Scale; | |||
2497 | if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, | |||
2498 | /*BaseOffset=*/0, | |||
2499 | /*HasBaseReg=*/false, Scale, | |||
2500 | AccessTy.AddrSpace)) | |||
2501 | goto decline_post_inc; | |||
2502 | } | |||
2503 | } | |||
2504 | } | |||
2505 | ||||
2506 | LLVM_DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "do { } while (false) | |||
2507 | << *Cond << '\n')do { } while (false); | |||
2508 | ||||
2509 | // It's possible for the setcc instruction to be anywhere in the loop, and | |||
2510 | // possible for it to have multiple users. If it is not immediately before | |||
2511 | // the exiting block branch, move it. | |||
2512 | if (Cond->getNextNonDebugInstruction() != TermBr) { | |||
2513 | if (Cond->hasOneUse()) { | |||
2514 | Cond->moveBefore(TermBr); | |||
2515 | } else { | |||
2516 | // Clone the terminating condition and insert into the loopend. | |||
2517 | ICmpInst *OldCond = Cond; | |||
2518 | Cond = cast<ICmpInst>(Cond->clone()); | |||
2519 | Cond->setName(L->getHeader()->getName() + ".termcond"); | |||
2520 | ExitingBlock->getInstList().insert(TermBr->getIterator(), Cond); | |||
2521 | ||||
2522 | // Clone the IVUse, as the old use still exists! | |||
2523 | CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); | |||
2524 | TermBr->replaceUsesOfWith(OldCond, Cond); | |||
2525 | } | |||
2526 | } | |||
2527 | ||||
2528 | // If we get to here, we know that we can transform the setcc instruction to | |||
2529 | // use the post-incremented version of the IV, allowing us to coalesce the | |||
2530 | // live ranges for the IV correctly. | |||
2531 | CondUse->transformToPostInc(L); | |||
2532 | Changed = true; | |||
2533 | ||||
2534 | PostIncs.insert(Cond); | |||
2535 | decline_post_inc:; | |||
2536 | } | |||
2537 | ||||
2538 | // Determine an insertion point for the loop induction variable increment. It | |||
2539 | // must dominate all the post-inc comparisons we just set up, and it must | |||
2540 | // dominate the loop latch edge. | |||
2541 | IVIncInsertPos = L->getLoopLatch()->getTerminator(); | |||
2542 | for (Instruction *Inst : PostIncs) { | |||
2543 | BasicBlock *BB = | |||
2544 | DT.findNearestCommonDominator(IVIncInsertPos->getParent(), | |||
2545 | Inst->getParent()); | |||
2546 | if (BB == Inst->getParent()) | |||
2547 | IVIncInsertPos = Inst; | |||
2548 | else if (BB != IVIncInsertPos->getParent()) | |||
2549 | IVIncInsertPos = BB->getTerminator(); | |||
2550 | } | |||
2551 | } | |||
2552 | ||||
2553 | /// Determine if the given use can accommodate a fixup at the given offset and | |||
2554 | /// other details. If so, update the use and return true. | |||
2555 | bool LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, | |||
2556 | bool HasBaseReg, LSRUse::KindType Kind, | |||
2557 | MemAccessTy AccessTy) { | |||
2558 | int64_t NewMinOffset = LU.MinOffset; | |||
2559 | int64_t NewMaxOffset = LU.MaxOffset; | |||
2560 | MemAccessTy NewAccessTy = AccessTy; | |||
2561 | ||||
2562 | // Check for a mismatched kind. It's tempting to collapse mismatched kinds to | |||
2563 | // something conservative, however this can pessimize in the case that one of | |||
2564 | // the uses will have all its uses outside the loop, for example. | |||
2565 | if (LU.Kind != Kind) | |||
2566 | return false; | |||
2567 | ||||
2568 | // Check for a mismatched access type, and fall back conservatively as needed. | |||
2569 | // TODO: Be less conservative when the type is similar and can use the same | |||
2570 | // addressing modes. | |||
2571 | if (Kind == LSRUse::Address) { | |||
2572 | if (AccessTy.MemTy != LU.AccessTy.MemTy) { | |||
2573 | NewAccessTy = MemAccessTy::getUnknown(AccessTy.MemTy->getContext(), | |||
2574 | AccessTy.AddrSpace); | |||
2575 | } | |||
2576 | } | |||
2577 | ||||
2578 | // Conservatively assume HasBaseReg is true for now. | |||
2579 | if (NewOffset < LU.MinOffset) { | |||
2580 | if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, | |||
2581 | LU.MaxOffset - NewOffset, HasBaseReg)) | |||
2582 | return false; | |||
2583 | NewMinOffset = NewOffset; | |||
2584 | } else if (NewOffset > LU.MaxOffset) { | |||
2585 | if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, | |||
2586 | NewOffset - LU.MinOffset, HasBaseReg)) | |||
2587 | return false; | |||
2588 | NewMaxOffset = NewOffset; | |||
2589 | } | |||
2590 | ||||
2591 | // Update the use. | |||
2592 | LU.MinOffset = NewMinOffset; | |||
2593 | LU.MaxOffset = NewMaxOffset; | |||
2594 | LU.AccessTy = NewAccessTy; | |||
2595 | return true; | |||
2596 | } | |||
2597 | ||||
2598 | /// Return an LSRUse index and an offset value for a fixup which needs the given | |||
2599 | /// expression, with the given kind and optional access type. Either reuse an | |||
2600 | /// existing use or create a new one, as needed. | |||
2601 | std::pair<size_t, int64_t> LSRInstance::getUse(const SCEV *&Expr, | |||
2602 | LSRUse::KindType Kind, | |||
2603 | MemAccessTy AccessTy) { | |||
2604 | const SCEV *Copy = Expr; | |||
2605 | int64_t Offset = ExtractImmediate(Expr, SE); | |||
2606 | ||||
2607 | // Basic uses can't accept any offset, for example. | |||
2608 | if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr, | |||
2609 | Offset, /*HasBaseReg=*/ true)) { | |||
2610 | Expr = Copy; | |||
2611 | Offset = 0; | |||
2612 | } | |||
2613 | ||||
2614 | std::pair<UseMapTy::iterator, bool> P = | |||
2615 | UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0)); | |||
2616 | if (!P.second) { | |||
2617 | // A use already existed with this base. | |||
2618 | size_t LUIdx = P.first->second; | |||
2619 | LSRUse &LU = Uses[LUIdx]; | |||
2620 | if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) | |||
2621 | // Reuse this use. | |||
2622 | return std::make_pair(LUIdx, Offset); | |||
2623 | } | |||
2624 | ||||
2625 | // Create a new use. | |||
2626 | size_t LUIdx = Uses.size(); | |||
2627 | P.first->second = LUIdx; | |||
2628 | Uses.push_back(LSRUse(Kind, AccessTy)); | |||
2629 | LSRUse &LU = Uses[LUIdx]; | |||
2630 | ||||
2631 | LU.MinOffset = Offset; | |||
2632 | LU.MaxOffset = Offset; | |||
2633 | return std::make_pair(LUIdx, Offset); | |||
2634 | } | |||
2635 | ||||
2636 | /// Delete the given use from the Uses list. | |||
2637 | void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { | |||
2638 | if (&LU != &Uses.back()) | |||
2639 | std::swap(LU, Uses.back()); | |||
2640 | Uses.pop_back(); | |||
2641 | ||||
2642 | // Update RegUses. | |||
2643 | RegUses.swapAndDropUse(LUIdx, Uses.size()); | |||
2644 | } | |||
2645 | ||||
2646 | /// Look for a use distinct from OrigLU which is has a formula that has the same | |||
2647 | /// registers as the given formula. | |||
2648 | LSRUse * | |||
2649 | LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, | |||
2650 | const LSRUse &OrigLU) { | |||
2651 | // Search all uses for the formula. This could be more clever. | |||
2652 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
2653 | LSRUse &LU = Uses[LUIdx]; | |||
2654 | // Check whether this use is close enough to OrigLU, to see whether it's | |||
2655 | // worthwhile looking through its formulae. | |||
2656 | // Ignore ICmpZero uses because they may contain formulae generated by | |||
2657 | // GenerateICmpZeroScales, in which case adding fixup offsets may | |||
2658 | // be invalid. | |||
2659 | if (&LU != &OrigLU && | |||
2660 | LU.Kind != LSRUse::ICmpZero && | |||
2661 | LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && | |||
2662 | LU.WidestFixupType == OrigLU.WidestFixupType && | |||
2663 | LU.HasFormulaWithSameRegs(OrigF)) { | |||
2664 | // Scan through this use's formulae. | |||
2665 | for (const Formula &F : LU.Formulae) { | |||
2666 | // Check to see if this formula has the same registers and symbols | |||
2667 | // as OrigF. | |||
2668 | if (F.BaseRegs == OrigF.BaseRegs && | |||
2669 | F.ScaledReg == OrigF.ScaledReg && | |||
2670 | F.BaseGV == OrigF.BaseGV && | |||
2671 | F.Scale == OrigF.Scale && | |||
2672 | F.UnfoldedOffset == OrigF.UnfoldedOffset) { | |||
2673 | if (F.BaseOffset == 0) | |||
2674 | return &LU; | |||
2675 | // This is the formula where all the registers and symbols matched; | |||
2676 | // there aren't going to be any others. Since we declined it, we | |||
2677 | // can skip the rest of the formulae and proceed to the next LSRUse. | |||
2678 | break; | |||
2679 | } | |||
2680 | } | |||
2681 | } | |||
2682 | } | |||
2683 | ||||
2684 | // Nothing looked good. | |||
2685 | return nullptr; | |||
2686 | } | |||
2687 | ||||
2688 | void LSRInstance::CollectInterestingTypesAndFactors() { | |||
2689 | SmallSetVector<const SCEV *, 4> Strides; | |||
2690 | ||||
2691 | // Collect interesting types and strides. | |||
2692 | SmallVector<const SCEV *, 4> Worklist; | |||
2693 | for (const IVStrideUse &U : IU) { | |||
2694 | const SCEV *Expr = IU.getExpr(U); | |||
2695 | ||||
2696 | // Collect interesting types. | |||
2697 | Types.insert(SE.getEffectiveSCEVType(Expr->getType())); | |||
2698 | ||||
2699 | // Add strides for mentioned loops. | |||
2700 | Worklist.push_back(Expr); | |||
2701 | do { | |||
2702 | const SCEV *S = Worklist.pop_back_val(); | |||
2703 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { | |||
2704 | if (AR->getLoop() == L) | |||
2705 | Strides.insert(AR->getStepRecurrence(SE)); | |||
2706 | Worklist.push_back(AR->getStart()); | |||
2707 | } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { | |||
2708 | Worklist.append(Add->op_begin(), Add->op_end()); | |||
2709 | } | |||
2710 | } while (!Worklist.empty()); | |||
2711 | } | |||
2712 | ||||
2713 | // Compute interesting factors from the set of interesting strides. | |||
2714 | for (SmallSetVector<const SCEV *, 4>::const_iterator | |||
2715 | I = Strides.begin(), E = Strides.end(); I != E; ++I) | |||
2716 | for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = | |||
2717 | std::next(I); NewStrideIter != E; ++NewStrideIter) { | |||
2718 | const SCEV *OldStride = *I; | |||
2719 | const SCEV *NewStride = *NewStrideIter; | |||
2720 | ||||
2721 | if (SE.getTypeSizeInBits(OldStride->getType()) != | |||
2722 | SE.getTypeSizeInBits(NewStride->getType())) { | |||
2723 | if (SE.getTypeSizeInBits(OldStride->getType()) > | |||
2724 | SE.getTypeSizeInBits(NewStride->getType())) | |||
2725 | NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); | |||
2726 | else | |||
2727 | OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); | |||
2728 | } | |||
2729 | if (const SCEVConstant *Factor = | |||
2730 | dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, | |||
2731 | SE, true))) { | |||
2732 | if (Factor->getAPInt().getMinSignedBits() <= 64 && !Factor->isZero()) | |||
2733 | Factors.insert(Factor->getAPInt().getSExtValue()); | |||
2734 | } else if (const SCEVConstant *Factor = | |||
2735 | dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, | |||
2736 | NewStride, | |||
2737 | SE, true))) { | |||
2738 | if (Factor->getAPInt().getMinSignedBits() <= 64 && !Factor->isZero()) | |||
2739 | Factors.insert(Factor->getAPInt().getSExtValue()); | |||
2740 | } | |||
2741 | } | |||
2742 | ||||
2743 | // If all uses use the same type, don't bother looking for truncation-based | |||
2744 | // reuse. | |||
2745 | if (Types.size() == 1) | |||
2746 | Types.clear(); | |||
2747 | ||||
2748 | LLVM_DEBUG(print_factors_and_types(dbgs()))do { } while (false); | |||
2749 | } | |||
2750 | ||||
2751 | /// Helper for CollectChains that finds an IV operand (computed by an AddRec in | |||
2752 | /// this loop) within [OI,OE) or returns OE. If IVUsers mapped Instructions to | |||
2753 | /// IVStrideUses, we could partially skip this. | |||
2754 | static User::op_iterator | |||
2755 | findIVOperand(User::op_iterator OI, User::op_iterator OE, | |||
2756 | Loop *L, ScalarEvolution &SE) { | |||
2757 | for(; OI != OE; ++OI) { | |||
2758 | if (Instruction *Oper = dyn_cast<Instruction>(*OI)) { | |||
2759 | if (!SE.isSCEVable(Oper->getType())) | |||
2760 | continue; | |||
2761 | ||||
2762 | if (const SCEVAddRecExpr *AR = | |||
2763 | dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) { | |||
2764 | if (AR->getLoop() == L) | |||
2765 | break; | |||
2766 | } | |||
2767 | } | |||
2768 | } | |||
2769 | return OI; | |||
2770 | } | |||
2771 | ||||
2772 | /// IVChain logic must consistently peek base TruncInst operands, so wrap it in | |||
2773 | /// a convenient helper. | |||
2774 | static Value *getWideOperand(Value *Oper) { | |||
2775 | if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper)) | |||
2776 | return Trunc->getOperand(0); | |||
2777 | return Oper; | |||
2778 | } | |||
2779 | ||||
2780 | /// Return true if we allow an IV chain to include both types. | |||
2781 | static bool isCompatibleIVType(Value *LVal, Value *RVal) { | |||
2782 | Type *LType = LVal->getType(); | |||
2783 | Type *RType = RVal->getType(); | |||
2784 | return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy() && | |||
2785 | // Different address spaces means (possibly) | |||
2786 | // different types of the pointer implementation, | |||
2787 | // e.g. i16 vs i32 so disallow that. | |||
2788 | (LType->getPointerAddressSpace() == | |||
2789 | RType->getPointerAddressSpace())); | |||
2790 | } | |||
2791 | ||||
2792 | /// Return an approximation of this SCEV expression's "base", or NULL for any | |||
2793 | /// constant. Returning the expression itself is conservative. Returning a | |||
2794 | /// deeper subexpression is more precise and valid as long as it isn't less | |||
2795 | /// complex than another subexpression. For expressions involving multiple | |||
2796 | /// unscaled values, we need to return the pointer-type SCEVUnknown. This avoids | |||
2797 | /// forming chains across objects, such as: PrevOper==a[i], IVOper==b[i], | |||
2798 | /// IVInc==b-a. | |||
2799 | /// | |||
2800 | /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost | |||
2801 | /// SCEVUnknown, we simply return the rightmost SCEV operand. | |||
2802 | static const SCEV *getExprBase(const SCEV *S) { | |||
2803 | switch (S->getSCEVType()) { | |||
2804 | default: // uncluding scUnknown. | |||
2805 | return S; | |||
2806 | case scConstant: | |||
2807 | return nullptr; | |||
2808 | case scTruncate: | |||
2809 | return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand()); | |||
2810 | case scZeroExtend: | |||
2811 | return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand()); | |||
2812 | case scSignExtend: | |||
2813 | return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand()); | |||
2814 | case scAddExpr: { | |||
2815 | // Skip over scaled operands (scMulExpr) to follow add operands as long as | |||
2816 | // there's nothing more complex. | |||
2817 | // FIXME: not sure if we want to recognize negation. | |||
2818 | const SCEVAddExpr *Add = cast<SCEVAddExpr>(S); | |||
2819 | for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()), | |||
2820 | E(Add->op_begin()); I != E; ++I) { | |||
2821 | const SCEV *SubExpr = *I; | |||
2822 | if (SubExpr->getSCEVType() == scAddExpr) | |||
2823 | return getExprBase(SubExpr); | |||
2824 | ||||
2825 | if (SubExpr->getSCEVType() != scMulExpr) | |||
2826 | return SubExpr; | |||
2827 | } | |||
2828 | return S; // all operands are scaled, be conservative. | |||
2829 | } | |||
2830 | case scAddRecExpr: | |||
2831 | return getExprBase(cast<SCEVAddRecExpr>(S)->getStart()); | |||
2832 | } | |||
2833 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | |||
2834 | } | |||
2835 | ||||
2836 | /// Return true if the chain increment is profitable to expand into a loop | |||
2837 | /// invariant value, which may require its own register. A profitable chain | |||
2838 | /// increment will be an offset relative to the same base. We allow such offsets | |||
2839 | /// to potentially be used as chain increment as long as it's not obviously | |||
2840 | /// expensive to expand using real instructions. | |||
2841 | bool IVChain::isProfitableIncrement(const SCEV *OperExpr, | |||
2842 | const SCEV *IncExpr, | |||
2843 | ScalarEvolution &SE) { | |||
2844 | // Aggressively form chains when -stress-ivchain. | |||
2845 | if (StressIVChain) | |||
2846 | return true; | |||
2847 | ||||
2848 | // Do not replace a constant offset from IV head with a nonconstant IV | |||
2849 | // increment. | |||
2850 | if (!isa<SCEVConstant>(IncExpr)) { | |||
2851 | const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand)); | |||
2852 | if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr))) | |||
2853 | return false; | |||
2854 | } | |||
2855 | ||||
2856 | SmallPtrSet<const SCEV*, 8> Processed; | |||
2857 | return !isHighCostExpansion(IncExpr, Processed, SE); | |||
2858 | } | |||
2859 | ||||
2860 | /// Return true if the number of registers needed for the chain is estimated to | |||
2861 | /// be less than the number required for the individual IV users. First prohibit | |||
2862 | /// any IV users that keep the IV live across increments (the Users set should | |||
2863 | /// be empty). Next count the number and type of increments in the chain. | |||
2864 | /// | |||
2865 | /// Chaining IVs can lead to considerable code bloat if ISEL doesn't | |||
2866 | /// effectively use postinc addressing modes. Only consider it profitable it the | |||
2867 | /// increments can be computed in fewer registers when chained. | |||
2868 | /// | |||
2869 | /// TODO: Consider IVInc free if it's already used in another chains. | |||
2870 | static bool isProfitableChain(IVChain &Chain, | |||
2871 | SmallPtrSetImpl<Instruction *> &Users, | |||
2872 | ScalarEvolution &SE, | |||
2873 | const TargetTransformInfo &TTI) { | |||
2874 | if (StressIVChain) | |||
2875 | return true; | |||
2876 | ||||
2877 | if (!Chain.hasIncs()) | |||
2878 | return false; | |||
2879 | ||||
2880 | if (!Users.empty()) { | |||
2881 | LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n";do { } while (false) | |||
2882 | for (Instruction *Instdo { } while (false) | |||
2883 | : Users) { dbgs() << " " << *Inst << "\n"; })do { } while (false); | |||
2884 | return false; | |||
2885 | } | |||
2886 | assert(!Chain.Incs.empty() && "empty IV chains are not allowed")((void)0); | |||
2887 | ||||
2888 | // The chain itself may require a register, so intialize cost to 1. | |||
2889 | int cost = 1; | |||
2890 | ||||
2891 | // A complete chain likely eliminates the need for keeping the original IV in | |||
2892 | // a register. LSR does not currently know how to form a complete chain unless | |||
2893 | // the header phi already exists. | |||
2894 | if (isa<PHINode>(Chain.tailUserInst()) | |||
2895 | && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) { | |||
2896 | --cost; | |||
2897 | } | |||
2898 | const SCEV *LastIncExpr = nullptr; | |||
2899 | unsigned NumConstIncrements = 0; | |||
2900 | unsigned NumVarIncrements = 0; | |||
2901 | unsigned NumReusedIncrements = 0; | |||
2902 | ||||
2903 | if (TTI.isProfitableLSRChainElement(Chain.Incs[0].UserInst)) | |||
2904 | return true; | |||
2905 | ||||
2906 | for (const IVInc &Inc : Chain) { | |||
2907 | if (TTI.isProfitableLSRChainElement(Inc.UserInst)) | |||
2908 | return true; | |||
2909 | if (Inc.IncExpr->isZero()) | |||
2910 | continue; | |||
2911 | ||||
2912 | // Incrementing by zero or some constant is neutral. We assume constants can | |||
2913 | // be folded into an addressing mode or an add's immediate operand. | |||
2914 | if (isa<SCEVConstant>(Inc.IncExpr)) { | |||
2915 | ++NumConstIncrements; | |||
2916 | continue; | |||
2917 | } | |||
2918 | ||||
2919 | if (Inc.IncExpr == LastIncExpr) | |||
2920 | ++NumReusedIncrements; | |||
2921 | else | |||
2922 | ++NumVarIncrements; | |||
2923 | ||||
2924 | LastIncExpr = Inc.IncExpr; | |||
2925 | } | |||
2926 | // An IV chain with a single increment is handled by LSR's postinc | |||
2927 | // uses. However, a chain with multiple increments requires keeping the IV's | |||
2928 | // value live longer than it needs to be if chained. | |||
2929 | if (NumConstIncrements > 1) | |||
2930 | --cost; | |||
2931 | ||||
2932 | // Materializing increment expressions in the preheader that didn't exist in | |||
2933 | // the original code may cost a register. For example, sign-extended array | |||
2934 | // indices can produce ridiculous increments like this: | |||
2935 | // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) | |||
2936 | cost += NumVarIncrements; | |||
2937 | ||||
2938 | // Reusing variable increments likely saves a register to hold the multiple of | |||
2939 | // the stride. | |||
2940 | cost -= NumReusedIncrements; | |||
2941 | ||||
2942 | LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << costdo { } while (false) | |||
2943 | << "\n")do { } while (false); | |||
2944 | ||||
2945 | return cost < 0; | |||
2946 | } | |||
2947 | ||||
2948 | /// Add this IV user to an existing chain or make it the head of a new chain. | |||
2949 | void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, | |||
2950 | SmallVectorImpl<ChainUsers> &ChainUsersVec) { | |||
2951 | // When IVs are used as types of varying widths, they are generally converted | |||
2952 | // to a wider type with some uses remaining narrow under a (free) trunc. | |||
2953 | Value *const NextIV = getWideOperand(IVOper); | |||
2954 | const SCEV *const OperExpr = SE.getSCEV(NextIV); | |||
2955 | const SCEV *const OperExprBase = getExprBase(OperExpr); | |||
2956 | ||||
2957 | // Visit all existing chains. Check if its IVOper can be computed as a | |||
2958 | // profitable loop invariant increment from the last link in the Chain. | |||
2959 | unsigned ChainIdx = 0, NChains = IVChainVec.size(); | |||
2960 | const SCEV *LastIncExpr = nullptr; | |||
2961 | for (; ChainIdx < NChains; ++ChainIdx) { | |||
2962 | IVChain &Chain = IVChainVec[ChainIdx]; | |||
2963 | ||||
2964 | // Prune the solution space aggressively by checking that both IV operands | |||
2965 | // are expressions that operate on the same unscaled SCEVUnknown. This | |||
2966 | // "base" will be canceled by the subsequent getMinusSCEV call. Checking | |||
2967 | // first avoids creating extra SCEV expressions. | |||
2968 | if (!StressIVChain && Chain.ExprBase != OperExprBase) | |||
2969 | continue; | |||
2970 | ||||
2971 | Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand); | |||
2972 | if (!isCompatibleIVType(PrevIV, NextIV)) | |||
2973 | continue; | |||
2974 | ||||
2975 | // A phi node terminates a chain. | |||
2976 | if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst())) | |||
2977 | continue; | |||
2978 | ||||
2979 | // The increment must be loop-invariant so it can be kept in a register. | |||
2980 | const SCEV *PrevExpr = SE.getSCEV(PrevIV); | |||
2981 | const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); | |||
2982 | if (isa<SCEVCouldNotCompute>(IncExpr) || !SE.isLoopInvariant(IncExpr, L)) | |||
2983 | continue; | |||
2984 | ||||
2985 | if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { | |||
2986 | LastIncExpr = IncExpr; | |||
2987 | break; | |||
2988 | } | |||
2989 | } | |||
2990 | // If we haven't found a chain, create a new one, unless we hit the max. Don't | |||
2991 | // bother for phi nodes, because they must be last in the chain. | |||
2992 | if (ChainIdx == NChains) { | |||
2993 | if (isa<PHINode>(UserInst)) | |||
2994 | return; | |||
2995 | if (NChains >= MaxChains && !StressIVChain) { | |||
2996 | LLVM_DEBUG(dbgs() << "IV Chain Limit\n")do { } while (false); | |||
2997 | return; | |||
2998 | } | |||
2999 | LastIncExpr = OperExpr; | |||
3000 | // IVUsers may have skipped over sign/zero extensions. We don't currently | |||
3001 | // attempt to form chains involving extensions unless they can be hoisted | |||
3002 | // into this loop's AddRec. | |||
3003 | if (!isa<SCEVAddRecExpr>(LastIncExpr)) | |||
3004 | return; | |||
3005 | ++NChains; | |||
3006 | IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr), | |||
3007 | OperExprBase)); | |||
3008 | ChainUsersVec.resize(NChains); | |||
3009 | LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInstdo { } while (false) | |||
3010 | << ") IV=" << *LastIncExpr << "\n")do { } while (false); | |||
3011 | } else { | |||
3012 | LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInstdo { } while (false) | |||
3013 | << ") IV+" << *LastIncExpr << "\n")do { } while (false); | |||
3014 | // Add this IV user to the end of the chain. | |||
3015 | IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr)); | |||
3016 | } | |||
3017 | IVChain &Chain = IVChainVec[ChainIdx]; | |||
3018 | ||||
3019 | SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers; | |||
3020 | // This chain's NearUsers become FarUsers. | |||
3021 | if (!LastIncExpr->isZero()) { | |||
3022 | ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(), | |||
3023 | NearUsers.end()); | |||
3024 | NearUsers.clear(); | |||
3025 | } | |||
3026 | ||||
3027 | // All other uses of IVOperand become near uses of the chain. | |||
3028 | // We currently ignore intermediate values within SCEV expressions, assuming | |||
3029 | // they will eventually be used be the current chain, or can be computed | |||
3030 | // from one of the chain increments. To be more precise we could | |||
3031 | // transitively follow its user and only add leaf IV users to the set. | |||
3032 | for (User *U : IVOper->users()) { | |||
3033 | Instruction *OtherUse = dyn_cast<Instruction>(U); | |||
3034 | if (!OtherUse) | |||
3035 | continue; | |||
3036 | // Uses in the chain will no longer be uses if the chain is formed. | |||
3037 | // Include the head of the chain in this iteration (not Chain.begin()). | |||
3038 | IVChain::const_iterator IncIter = Chain.Incs.begin(); | |||
3039 | IVChain::const_iterator IncEnd = Chain.Incs.end(); | |||
3040 | for( ; IncIter != IncEnd; ++IncIter) { | |||
3041 | if (IncIter->UserInst == OtherUse) | |||
3042 | break; | |||
3043 | } | |||
3044 | if (IncIter != IncEnd) | |||
3045 | continue; | |||
3046 | ||||
3047 | if (SE.isSCEVable(OtherUse->getType()) | |||
3048 | && !isa<SCEVUnknown>(SE.getSCEV(OtherUse)) | |||
3049 | && IU.isIVUserOrOperand(OtherUse)) { | |||
3050 | continue; | |||
3051 | } | |||
3052 | NearUsers.insert(OtherUse); | |||
3053 | } | |||
3054 | ||||
3055 | // Since this user is part of the chain, it's no longer considered a use | |||
3056 | // of the chain. | |||
3057 | ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); | |||
3058 | } | |||
3059 | ||||
3060 | /// Populate the vector of Chains. | |||
3061 | /// | |||
3062 | /// This decreases ILP at the architecture level. Targets with ample registers, | |||
3063 | /// multiple memory ports, and no register renaming probably don't want | |||
3064 | /// this. However, such targets should probably disable LSR altogether. | |||
3065 | /// | |||
3066 | /// The job of LSR is to make a reasonable choice of induction variables across | |||
3067 | /// the loop. Subsequent passes can easily "unchain" computation exposing more | |||
3068 | /// ILP *within the loop* if the target wants it. | |||
3069 | /// | |||
3070 | /// Finding the best IV chain is potentially a scheduling problem. Since LSR | |||
3071 | /// will not reorder memory operations, it will recognize this as a chain, but | |||
3072 | /// will generate redundant IV increments. Ideally this would be corrected later | |||
3073 | /// by a smart scheduler: | |||
3074 | /// = A[i] | |||
3075 | /// = A[i+x] | |||
3076 | /// A[i] = | |||
3077 | /// A[i+x] = | |||
3078 | /// | |||
3079 | /// TODO: Walk the entire domtree within this loop, not just the path to the | |||
3080 | /// loop latch. This will discover chains on side paths, but requires | |||
3081 | /// maintaining multiple copies of the Chains state. | |||
3082 | void LSRInstance::CollectChains() { | |||
3083 | LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n")do { } while (false); | |||
3084 | SmallVector<ChainUsers, 8> ChainUsersVec; | |||
3085 | ||||
3086 | SmallVector<BasicBlock *,8> LatchPath; | |||
3087 | BasicBlock *LoopHeader = L->getHeader(); | |||
3088 | for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch()); | |||
3089 | Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) { | |||
3090 | LatchPath.push_back(Rung->getBlock()); | |||
3091 | } | |||
3092 | LatchPath.push_back(LoopHeader); | |||
3093 | ||||
3094 | // Walk the instruction stream from the loop header to the loop latch. | |||
3095 | for (BasicBlock *BB : reverse(LatchPath)) { | |||
3096 | for (Instruction &I : *BB) { | |||
3097 | // Skip instructions that weren't seen by IVUsers analysis. | |||
3098 | if (isa<PHINode>(I) || !IU.isIVUserOrOperand(&I)) | |||
3099 | continue; | |||
3100 | ||||
3101 | // Ignore users that are part of a SCEV expression. This way we only | |||
3102 | // consider leaf IV Users. This effectively rediscovers a portion of | |||
3103 | // IVUsers analysis but in program order this time. | |||
3104 | if (SE.isSCEVable(I.getType()) && !isa<SCEVUnknown>(SE.getSCEV(&I))) | |||
3105 | continue; | |||
3106 | ||||
3107 | // Remove this instruction from any NearUsers set it may be in. | |||
3108 | for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); | |||
3109 | ChainIdx < NChains; ++ChainIdx) { | |||
3110 | ChainUsersVec[ChainIdx].NearUsers.erase(&I); | |||
3111 | } | |||
3112 | // Search for operands that can be chained. | |||
3113 | SmallPtrSet<Instruction*, 4> UniqueOperands; | |||
3114 | User::op_iterator IVOpEnd = I.op_end(); | |||
3115 | User::op_iterator IVOpIter = findIVOperand(I.op_begin(), IVOpEnd, L, SE); | |||
3116 | while (IVOpIter != IVOpEnd) { | |||
3117 | Instruction *IVOpInst = cast<Instruction>(*IVOpIter); | |||
3118 | if (UniqueOperands.insert(IVOpInst).second) | |||
3119 | ChainInstruction(&I, IVOpInst, ChainUsersVec); | |||
3120 | IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); | |||
3121 | } | |||
3122 | } // Continue walking down the instructions. | |||
3123 | } // Continue walking down the domtree. | |||
3124 | // Visit phi backedges to determine if the chain can generate the IV postinc. | |||
3125 | for (PHINode &PN : L->getHeader()->phis()) { | |||
3126 | if (!SE.isSCEVable(PN.getType())) | |||
3127 | continue; | |||
3128 | ||||
3129 | Instruction *IncV = | |||
3130 | dyn_cast<Instruction>(PN.getIncomingValueForBlock(L->getLoopLatch())); | |||
3131 | if (IncV) | |||
3132 | ChainInstruction(&PN, IncV, ChainUsersVec); | |||
3133 | } | |||
3134 | // Remove any unprofitable chains. | |||
3135 | unsigned ChainIdx = 0; | |||
3136 | for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); | |||
3137 | UsersIdx < NChains; ++UsersIdx) { | |||
3138 | if (!isProfitableChain(IVChainVec[UsersIdx], | |||
3139 | ChainUsersVec[UsersIdx].FarUsers, SE, TTI)) | |||
3140 | continue; | |||
3141 | // Preserve the chain at UsesIdx. | |||
3142 | if (ChainIdx != UsersIdx) | |||
3143 | IVChainVec[ChainIdx] = IVChainVec[UsersIdx]; | |||
3144 | FinalizeChain(IVChainVec[ChainIdx]); | |||
3145 | ++ChainIdx; | |||
3146 | } | |||
3147 | IVChainVec.resize(ChainIdx); | |||
3148 | } | |||
3149 | ||||
3150 | void LSRInstance::FinalizeChain(IVChain &Chain) { | |||
3151 | assert(!Chain.Incs.empty() && "empty IV chains are not allowed")((void)0); | |||
3152 | LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n")do { } while (false); | |||
3153 | ||||
3154 | for (const IVInc &Inc : Chain) { | |||
3155 | LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n")do { } while (false); | |||
3156 | auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand); | |||
3157 | assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand")((void)0); | |||
3158 | IVIncSet.insert(UseI); | |||
3159 | } | |||
3160 | } | |||
3161 | ||||
3162 | /// Return true if the IVInc can be folded into an addressing mode. | |||
3163 | static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, | |||
3164 | Value *Operand, const TargetTransformInfo &TTI) { | |||
3165 | const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr); | |||
3166 | if (!IncConst || !isAddressUse(TTI, UserInst, Operand)) | |||
3167 | return false; | |||
3168 | ||||
3169 | if (IncConst->getAPInt().getMinSignedBits() > 64) | |||
3170 | return false; | |||
3171 | ||||
3172 | MemAccessTy AccessTy = getAccessType(TTI, UserInst, Operand); | |||
3173 | int64_t IncOffset = IncConst->getValue()->getSExtValue(); | |||
3174 | if (!isAlwaysFoldable(TTI, LSRUse::Address, AccessTy, /*BaseGV=*/nullptr, | |||
3175 | IncOffset, /*HasBaseReg=*/false)) | |||
3176 | return false; | |||
3177 | ||||
3178 | return true; | |||
3179 | } | |||
3180 | ||||
3181 | /// Generate an add or subtract for each IVInc in a chain to materialize the IV | |||
3182 | /// user's operand from the previous IV user's operand. | |||
3183 | void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, | |||
3184 | SmallVectorImpl<WeakTrackingVH> &DeadInsts) { | |||
3185 | // Find the new IVOperand for the head of the chain. It may have been replaced | |||
3186 | // by LSR. | |||
3187 | const IVInc &Head = Chain.Incs[0]; | |||
3188 | User::op_iterator IVOpEnd = Head.UserInst->op_end(); | |||
3189 | // findIVOperand returns IVOpEnd if it can no longer find a valid IV user. | |||
3190 | User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), | |||
3191 | IVOpEnd, L, SE); | |||
3192 | Value *IVSrc = nullptr; | |||
3193 | while (IVOpIter != IVOpEnd) { | |||
3194 | IVSrc = getWideOperand(*IVOpIter); | |||
3195 | ||||
3196 | // If this operand computes the expression that the chain needs, we may use | |||
3197 | // it. (Check this after setting IVSrc which is used below.) | |||
3198 | // | |||
3199 | // Note that if Head.IncExpr is wider than IVSrc, then this phi is too | |||
3200 | // narrow for the chain, so we can no longer use it. We do allow using a | |||
3201 | // wider phi, assuming the LSR checked for free truncation. In that case we | |||
3202 | // should already have a truncate on this operand such that | |||
3203 | // getSCEV(IVSrc) == IncExpr. | |||
3204 | if (SE.getSCEV(*IVOpIter) == Head.IncExpr | |||
3205 | || SE.getSCEV(IVSrc) == Head.IncExpr) { | |||
3206 | break; | |||
3207 | } | |||
3208 | IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); | |||
3209 | } | |||
3210 | if (IVOpIter == IVOpEnd) { | |||
3211 | // Gracefully give up on this chain. | |||
3212 | LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n")do { } while (false); | |||
3213 | return; | |||
3214 | } | |||
3215 | assert(IVSrc && "Failed to find IV chain source")((void)0); | |||
3216 | ||||
3217 | LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n")do { } while (false); | |||
3218 | Type *IVTy = IVSrc->getType(); | |||
3219 | Type *IntTy = SE.getEffectiveSCEVType(IVTy); | |||
3220 | const SCEV *LeftOverExpr = nullptr; | |||
3221 | for (const IVInc &Inc : Chain) { | |||
3222 | Instruction *InsertPt = Inc.UserInst; | |||
3223 | if (isa<PHINode>(InsertPt)) | |||
3224 | InsertPt = L->getLoopLatch()->getTerminator(); | |||
3225 | ||||
3226 | // IVOper will replace the current IV User's operand. IVSrc is the IV | |||
3227 | // value currently held in a register. | |||
3228 | Value *IVOper = IVSrc; | |||
3229 | if (!Inc.IncExpr->isZero()) { | |||
3230 | // IncExpr was the result of subtraction of two narrow values, so must | |||
3231 | // be signed. | |||
3232 | const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy); | |||
3233 | LeftOverExpr = LeftOverExpr ? | |||
3234 | SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; | |||
3235 | } | |||
3236 | if (LeftOverExpr && !LeftOverExpr->isZero()) { | |||
3237 | // Expand the IV increment. | |||
3238 | Rewriter.clearPostInc(); | |||
3239 | Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt); | |||
3240 | const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc), | |||
3241 | SE.getUnknown(IncV)); | |||
3242 | IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); | |||
3243 | ||||
3244 | // If an IV increment can't be folded, use it as the next IV value. | |||
3245 | if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) { | |||
3246 | assert(IVTy == IVOper->getType() && "inconsistent IV increment type")((void)0); | |||
3247 | IVSrc = IVOper; | |||
3248 | LeftOverExpr = nullptr; | |||
3249 | } | |||
3250 | } | |||
3251 | Type *OperTy = Inc.IVOperand->getType(); | |||
3252 | if (IVTy != OperTy) { | |||
3253 | assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) &&((void)0) | |||
3254 | "cannot extend a chained IV")((void)0); | |||
3255 | IRBuilder<> Builder(InsertPt); | |||
3256 | IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); | |||
3257 | } | |||
3258 | Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper); | |||
3259 | if (auto *OperandIsInstr = dyn_cast<Instruction>(Inc.IVOperand)) | |||
3260 | DeadInsts.emplace_back(OperandIsInstr); | |||
3261 | } | |||
3262 | // If LSR created a new, wider phi, we may also replace its postinc. We only | |||
3263 | // do this if we also found a wide value for the head of the chain. | |||
3264 | if (isa<PHINode>(Chain.tailUserInst())) { | |||
3265 | for (PHINode &Phi : L->getHeader()->phis()) { | |||
3266 | if (!isCompatibleIVType(&Phi, IVSrc)) | |||
3267 | continue; | |||
3268 | Instruction *PostIncV = dyn_cast<Instruction>( | |||
3269 | Phi.getIncomingValueForBlock(L->getLoopLatch())); | |||
3270 | if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc))) | |||
3271 | continue; | |||
3272 | Value *IVOper = IVSrc; | |||
3273 | Type *PostIncTy = PostIncV->getType(); | |||
3274 | if (IVTy != PostIncTy) { | |||
3275 | assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types")((void)0); | |||
3276 | IRBuilder<> Builder(L->getLoopLatch()->getTerminator()); | |||
3277 | Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc()); | |||
3278 | IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); | |||
3279 | } | |||
3280 | Phi.replaceUsesOfWith(PostIncV, IVOper); | |||
3281 | DeadInsts.emplace_back(PostIncV); | |||
3282 | } | |||
3283 | } | |||
3284 | } | |||
3285 | ||||
3286 | void LSRInstance::CollectFixupsAndInitialFormulae() { | |||
3287 | BranchInst *ExitBranch = nullptr; | |||
3288 | bool SaveCmp = TTI.canSaveCmp(L, &ExitBranch, &SE, &LI, &DT, &AC, &TLI); | |||
3289 | ||||
3290 | for (const IVStrideUse &U : IU) { | |||
3291 | Instruction *UserInst = U.getUser(); | |||
3292 | // Skip IV users that are part of profitable IV Chains. | |||
3293 | User::op_iterator UseI = | |||
3294 | find(UserInst->operands(), U.getOperandValToReplace()); | |||
3295 | assert(UseI != UserInst->op_end() && "cannot find IV operand")((void)0); | |||
3296 | if (IVIncSet.count(UseI)) { | |||
3297 | LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n')do { } while (false); | |||
3298 | continue; | |||
3299 | } | |||
3300 | ||||
3301 | LSRUse::KindType Kind = LSRUse::Basic; | |||
3302 | MemAccessTy AccessTy; | |||
3303 | if (isAddressUse(TTI, UserInst, U.getOperandValToReplace())) { | |||
3304 | Kind = LSRUse::Address; | |||
3305 | AccessTy = getAccessType(TTI, UserInst, U.getOperandValToReplace()); | |||
3306 | } | |||
3307 | ||||
3308 | const SCEV *S = IU.getExpr(U); | |||
3309 | PostIncLoopSet TmpPostIncLoops = U.getPostIncLoops(); | |||
3310 | ||||
3311 | // Equality (== and !=) ICmps are special. We can rewrite (i == N) as | |||
3312 | // (N - i == 0), and this allows (N - i) to be the expression that we work | |||
3313 | // with rather than just N or i, so we can consider the register | |||
3314 | // requirements for both N and i at the same time. Limiting this code to | |||
3315 | // equality icmps is not a problem because all interesting loops use | |||
3316 | // equality icmps, thanks to IndVarSimplify. | |||
3317 | if (ICmpInst *CI = dyn_cast<ICmpInst>(UserInst)) { | |||
3318 | // If CI can be saved in some target, like replaced inside hardware loop | |||
3319 | // in PowerPC, no need to generate initial formulae for it. | |||
3320 | if (SaveCmp && CI == dyn_cast<ICmpInst>(ExitBranch->getCondition())) | |||
3321 | continue; | |||
3322 | if (CI->isEquality()) { | |||
3323 | // Swap the operands if needed to put the OperandValToReplace on the | |||
3324 | // left, for consistency. | |||
3325 | Value *NV = CI->getOperand(1); | |||
3326 | if (NV == U.getOperandValToReplace()) { | |||
3327 | CI->setOperand(1, CI->getOperand(0)); | |||
3328 | CI->setOperand(0, NV); | |||
3329 | NV = CI->getOperand(1); | |||
3330 | Changed = true; | |||
3331 | } | |||
3332 | ||||
3333 | // x == y --> x - y == 0 | |||
3334 | const SCEV *N = SE.getSCEV(NV); | |||
3335 | if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE) && | |||
3336 | (!NV->getType()->isPointerTy() || | |||
3337 | SE.getPointerBase(N) == SE.getPointerBase(S))) { | |||
3338 | // S is normalized, so normalize N before folding it into S | |||
3339 | // to keep the result normalized. | |||
3340 | N = normalizeForPostIncUse(N, TmpPostIncLoops, SE); | |||
3341 | Kind = LSRUse::ICmpZero; | |||
3342 | S = SE.getMinusSCEV(N, S); | |||
3343 | } | |||
3344 | ||||
3345 | // -1 and the negations of all interesting strides (except the negation | |||
3346 | // of -1) are now also interesting. | |||
3347 | for (size_t i = 0, e = Factors.size(); i != e; ++i) | |||
3348 | if (Factors[i] != -1) | |||
3349 | Factors.insert(-(uint64_t)Factors[i]); | |||
3350 | Factors.insert(-1); | |||
3351 | } | |||
3352 | } | |||
3353 | ||||
3354 | // Get or create an LSRUse. | |||
3355 | std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); | |||
3356 | size_t LUIdx = P.first; | |||
3357 | int64_t Offset = P.second; | |||
3358 | LSRUse &LU = Uses[LUIdx]; | |||
3359 | ||||
3360 | // Record the fixup. | |||
3361 | LSRFixup &LF = LU.getNewFixup(); | |||
3362 | LF.UserInst = UserInst; | |||
3363 | LF.OperandValToReplace = U.getOperandValToReplace(); | |||
3364 | LF.PostIncLoops = TmpPostIncLoops; | |||
3365 | LF.Offset = Offset; | |||
3366 | LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); | |||
3367 | ||||
3368 | if (!LU.WidestFixupType || | |||
3369 | SE.getTypeSizeInBits(LU.WidestFixupType) < | |||
3370 | SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) | |||
3371 | LU.WidestFixupType = LF.OperandValToReplace->getType(); | |||
3372 | ||||
3373 | // If this is the first use of this LSRUse, give it a formula. | |||
3374 | if (LU.Formulae.empty()) { | |||
3375 | InsertInitialFormula(S, LU, LUIdx); | |||
3376 | CountRegisters(LU.Formulae.back(), LUIdx); | |||
3377 | } | |||
3378 | } | |||
3379 | ||||
3380 | LLVM_DEBUG(print_fixups(dbgs()))do { } while (false); | |||
3381 | } | |||
3382 | ||||
3383 | /// Insert a formula for the given expression into the given use, separating out | |||
3384 | /// loop-variant portions from loop-invariant and loop-computable portions. | |||
3385 | void | |||
3386 | LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { | |||
3387 | // Mark uses whose expressions cannot be expanded. | |||
3388 | if (!isSafeToExpand(S, SE)) | |||
3389 | LU.RigidFormula = true; | |||
3390 | ||||
3391 | Formula F; | |||
3392 | F.initialMatch(S, L, SE); | |||
3393 | bool Inserted = InsertFormula(LU, LUIdx, F); | |||
3394 | assert(Inserted && "Initial formula already exists!")((void)0); (void)Inserted; | |||
3395 | } | |||
3396 | ||||
3397 | /// Insert a simple single-register formula for the given expression into the | |||
3398 | /// given use. | |||
3399 | void | |||
3400 | LSRInstance::InsertSupplementalFormula(const SCEV *S, | |||
3401 | LSRUse &LU, size_t LUIdx) { | |||
3402 | Formula F; | |||
3403 | F.BaseRegs.push_back(S); | |||
3404 | F.HasBaseReg = true; | |||
3405 | bool Inserted = InsertFormula(LU, LUIdx, F); | |||
3406 | assert(Inserted && "Supplemental formula already exists!")((void)0); (void)Inserted; | |||
3407 | } | |||
3408 | ||||
3409 | /// Note which registers are used by the given formula, updating RegUses. | |||
3410 | void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { | |||
3411 | if (F.ScaledReg) | |||
3412 | RegUses.countRegister(F.ScaledReg, LUIdx); | |||
3413 | for (const SCEV *BaseReg : F.BaseRegs) | |||
3414 | RegUses.countRegister(BaseReg, LUIdx); | |||
3415 | } | |||
3416 | ||||
3417 | /// If the given formula has not yet been inserted, add it to the list, and | |||
3418 | /// return true. Return false otherwise. | |||
3419 | bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { | |||
3420 | // Do not insert formula that we will not be able to expand. | |||
3421 | assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) &&((void)0) | |||
3422 | "Formula is illegal")((void)0); | |||
3423 | ||||
3424 | if (!LU.InsertFormula(F, *L)) | |||
3425 | return false; | |||
3426 | ||||
3427 | CountRegisters(F, LUIdx); | |||
3428 | return true; | |||
3429 | } | |||
3430 | ||||
3431 | /// Check for other uses of loop-invariant values which we're tracking. These | |||
3432 | /// other uses will pin these values in registers, making them less profitable | |||
3433 | /// for elimination. | |||
3434 | /// TODO: This currently misses non-constant addrec step registers. | |||
3435 | /// TODO: Should this give more weight to users inside the loop? | |||
3436 | void | |||
3437 | LSRInstance::CollectLoopInvariantFixupsAndFormulae() { | |||
3438 | SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); | |||
3439 | SmallPtrSet<const SCEV *, 32> Visited; | |||
3440 | ||||
3441 | while (!Worklist.empty()) { | |||
3442 | const SCEV *S = Worklist.pop_back_val(); | |||
3443 | ||||
3444 | // Don't process the same SCEV twice | |||
3445 | if (!Visited.insert(S).second) | |||
3446 | continue; | |||
3447 | ||||
3448 | if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) | |||
3449 | Worklist.append(N->op_begin(), N->op_end()); | |||
3450 | else if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(S)) | |||
3451 | Worklist.push_back(C->getOperand()); | |||
3452 | else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { | |||
3453 | Worklist.push_back(D->getLHS()); | |||
3454 | Worklist.push_back(D->getRHS()); | |||
3455 | } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) { | |||
3456 | const Value *V = US->getValue(); | |||
3457 | if (const Instruction *Inst = dyn_cast<Instruction>(V)) { | |||
3458 | // Look for instructions defined outside the loop. | |||
3459 | if (L->contains(Inst)) continue; | |||
3460 | } else if (isa<UndefValue>(V)) | |||
3461 | // Undef doesn't have a live range, so it doesn't matter. | |||
3462 | continue; | |||
3463 | for (const Use &U : V->uses()) { | |||
3464 | const Instruction *UserInst = dyn_cast<Instruction>(U.getUser()); | |||
3465 | // Ignore non-instructions. | |||
3466 | if (!UserInst) | |||
3467 | continue; | |||
3468 | // Don't bother if the instruction is an EHPad. | |||
3469 | if (UserInst->isEHPad()) | |||
3470 | continue; | |||
3471 | // Ignore instructions in other functions (as can happen with | |||
3472 | // Constants). | |||
3473 | if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) | |||
3474 | continue; | |||
3475 | // Ignore instructions not dominated by the loop. | |||
3476 | const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? | |||
3477 | UserInst->getParent() : | |||
3478 | cast<PHINode>(UserInst)->getIncomingBlock( | |||
3479 | PHINode::getIncomingValueNumForOperand(U.getOperandNo())); | |||
3480 | if (!DT.dominates(L->getHeader(), UseBB)) | |||
3481 | continue; | |||
3482 | // Don't bother if the instruction is in a BB which ends in an EHPad. | |||
3483 | if (UseBB->getTerminator()->isEHPad()) | |||
3484 | continue; | |||
3485 | // Don't bother rewriting PHIs in catchswitch blocks. | |||
3486 | if (isa<CatchSwitchInst>(UserInst->getParent()->getTerminator())) | |||
3487 | continue; | |||
3488 | // Ignore uses which are part of other SCEV expressions, to avoid | |||
3489 | // analyzing them multiple times. | |||
3490 | if (SE.isSCEVable(UserInst->getType())) { | |||
3491 | const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); | |||
3492 | // If the user is a no-op, look through to its uses. | |||
3493 | if (!isa<SCEVUnknown>(UserS)) | |||
3494 | continue; | |||
3495 | if (UserS == US) { | |||
3496 | Worklist.push_back( | |||
3497 | SE.getUnknown(const_cast<Instruction *>(UserInst))); | |||
3498 | continue; | |||
3499 | } | |||
3500 | } | |||
3501 | // Ignore icmp instructions which are already being analyzed. | |||
3502 | if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { | |||
3503 | unsigned OtherIdx = !U.getOperandNo(); | |||
3504 | Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); | |||
3505 | if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) | |||
3506 | continue; | |||
3507 | } | |||
3508 | ||||
3509 | std::pair<size_t, int64_t> P = getUse( | |||
3510 | S, LSRUse::Basic, MemAccessTy()); | |||
3511 | size_t LUIdx = P.first; | |||
3512 | int64_t Offset = P.second; | |||
3513 | LSRUse &LU = Uses[LUIdx]; | |||
3514 | LSRFixup &LF = LU.getNewFixup(); | |||
3515 | LF.UserInst = const_cast<Instruction *>(UserInst); | |||
3516 | LF.OperandValToReplace = U; | |||
3517 | LF.Offset = Offset; | |||
3518 | LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); | |||
3519 | if (!LU.WidestFixupType || | |||
3520 | SE.getTypeSizeInBits(LU.WidestFixupType) < | |||
3521 | SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) | |||
3522 | LU.WidestFixupType = LF.OperandValToReplace->getType(); | |||
3523 | InsertSupplementalFormula(US, LU, LUIdx); | |||
3524 | CountRegisters(LU.Formulae.back(), Uses.size() - 1); | |||
3525 | break; | |||
3526 | } | |||
3527 | } | |||
3528 | } | |||
3529 | } | |||
3530 | ||||
3531 | /// Split S into subexpressions which can be pulled out into separate | |||
3532 | /// registers. If C is non-null, multiply each subexpression by C. | |||
3533 | /// | |||
3534 | /// Return remainder expression after factoring the subexpressions captured by | |||
3535 | /// Ops. If Ops is complete, return NULL. | |||
3536 | static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, | |||
3537 | SmallVectorImpl<const SCEV *> &Ops, | |||
3538 | const Loop *L, | |||
3539 | ScalarEvolution &SE, | |||
3540 | unsigned Depth = 0) { | |||
3541 | // Arbitrarily cap recursion to protect compile time. | |||
3542 | if (Depth >= 3) | |||
3543 | return S; | |||
3544 | ||||
3545 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { | |||
3546 | // Break out add operands. | |||
3547 | for (const SCEV *S : Add->operands()) { | |||
3548 | const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1); | |||
3549 | if (Remainder) | |||
3550 | Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); | |||
3551 | } | |||
3552 | return nullptr; | |||
3553 | } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { | |||
3554 | // Split a non-zero base out of an addrec. | |||
3555 | if (AR->getStart()->isZero() || !AR->isAffine()) | |||
3556 | return S; | |||
3557 | ||||
3558 | const SCEV *Remainder = CollectSubexprs(AR->getStart(), | |||
3559 | C, Ops, L, SE, Depth+1); | |||
3560 | // Split the non-zero AddRec unless it is part of a nested recurrence that | |||
3561 | // does not pertain to this loop. | |||
3562 | if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) { | |||
3563 | Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); | |||
3564 | Remainder = nullptr; | |||
3565 | } | |||
3566 | if (Remainder != AR->getStart()) { | |||
3567 | if (!Remainder) | |||
3568 | Remainder = SE.getConstant(AR->getType(), 0); | |||
3569 | return SE.getAddRecExpr(Remainder, | |||
3570 | AR->getStepRecurrence(SE), | |||
3571 | AR->getLoop(), | |||
3572 | //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) | |||
3573 | SCEV::FlagAnyWrap); | |||
3574 | } | |||
3575 | } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { | |||
3576 | // Break (C * (a + b + c)) into C*a + C*b + C*c. | |||
3577 | if (Mul->getNumOperands() != 2) | |||
3578 | return S; | |||
3579 | if (const SCEVConstant *Op0 = | |||
3580 | dyn_cast<SCEVConstant>(Mul->getOperand(0))) { | |||
3581 | C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0; | |||
3582 | const SCEV *Remainder = | |||
3583 | CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1); | |||
3584 | if (Remainder) | |||
3585 | Ops.push_back(SE.getMulExpr(C, Remainder)); | |||
3586 | return nullptr; | |||
3587 | } | |||
3588 | } | |||
3589 | return S; | |||
3590 | } | |||
3591 | ||||
3592 | /// Return true if the SCEV represents a value that may end up as a | |||
3593 | /// post-increment operation. | |||
3594 | static bool mayUsePostIncMode(const TargetTransformInfo &TTI, | |||
3595 | LSRUse &LU, const SCEV *S, const Loop *L, | |||
3596 | ScalarEvolution &SE) { | |||
3597 | if (LU.Kind != LSRUse::Address || | |||
3598 | !LU.AccessTy.getType()->isIntOrIntVectorTy()) | |||
3599 | return false; | |||
3600 | const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S); | |||
3601 | if (!AR) | |||
3602 | return false; | |||
3603 | const SCEV *LoopStep = AR->getStepRecurrence(SE); | |||
3604 | if (!isa<SCEVConstant>(LoopStep)) | |||
3605 | return false; | |||
3606 | // Check if a post-indexed load/store can be used. | |||
3607 | if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) || | |||
3608 | TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) { | |||
3609 | const SCEV *LoopStart = AR->getStart(); | |||
3610 | if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L)) | |||
3611 | return true; | |||
3612 | } | |||
3613 | return false; | |||
3614 | } | |||
3615 | ||||
3616 | /// Helper function for LSRInstance::GenerateReassociations. | |||
3617 | void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, | |||
3618 | const Formula &Base, | |||
3619 | unsigned Depth, size_t Idx, | |||
3620 | bool IsScaledReg) { | |||
3621 | const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; | |||
3622 | // Don't generate reassociations for the base register of a value that | |||
3623 | // may generate a post-increment operator. The reason is that the | |||
3624 | // reassociations cause extra base+register formula to be created, | |||
3625 | // and possibly chosen, but the post-increment is more efficient. | |||
3626 | if (AMK == TTI::AMK_PostIndexed && mayUsePostIncMode(TTI, LU, BaseReg, L, SE)) | |||
3627 | return; | |||
3628 | SmallVector<const SCEV *, 8> AddOps; | |||
3629 | const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE); | |||
3630 | if (Remainder) | |||
3631 | AddOps.push_back(Remainder); | |||
3632 | ||||
3633 | if (AddOps.size() == 1) | |||
3634 | return; | |||
3635 | ||||
3636 | for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), | |||
3637 | JE = AddOps.end(); | |||
3638 | J != JE; ++J) { | |||
3639 | // Loop-variant "unknown" values are uninteresting; we won't be able to | |||
3640 | // do anything meaningful with them. | |||
3641 | if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L)) | |||
3642 | continue; | |||
3643 | ||||
3644 | // Don't pull a constant into a register if the constant could be folded | |||
3645 | // into an immediate field. | |||
3646 | if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, | |||
3647 | LU.AccessTy, *J, Base.getNumRegs() > 1)) | |||
3648 | continue; | |||
3649 | ||||
3650 | // Collect all operands except *J. | |||
3651 | SmallVector<const SCEV *, 8> InnerAddOps( | |||
3652 | ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); | |||
3653 | InnerAddOps.append(std::next(J), | |||
3654 | ((const SmallVector<const SCEV *, 8> &)AddOps).end()); | |||
3655 | ||||
3656 | // Don't leave just a constant behind in a register if the constant could | |||
3657 | // be folded into an immediate field. | |||
3658 | if (InnerAddOps.size() == 1 && | |||
3659 | isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, | |||
3660 | LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1)) | |||
3661 | continue; | |||
3662 | ||||
3663 | const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); | |||
3664 | if (InnerSum->isZero()) | |||
3665 | continue; | |||
3666 | Formula F = Base; | |||
3667 | ||||
3668 | // Add the remaining pieces of the add back into the new formula. | |||
3669 | const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum); | |||
3670 | if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && | |||
3671 | TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + | |||
3672 | InnerSumSC->getValue()->getZExtValue())) { | |||
3673 | F.UnfoldedOffset = | |||
3674 | (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue(); | |||
3675 | if (IsScaledReg) | |||
3676 | F.ScaledReg = nullptr; | |||
3677 | else | |||
3678 | F.BaseRegs.erase(F.BaseRegs.begin() + Idx); | |||
3679 | } else if (IsScaledReg) | |||
3680 | F.ScaledReg = InnerSum; | |||
3681 | else | |||
3682 | F.BaseRegs[Idx] = InnerSum; | |||
3683 | ||||
3684 | // Add J as its own register, or an unfolded immediate. | |||
3685 | const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J); | |||
3686 | if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && | |||
3687 | TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + | |||
3688 | SC->getValue()->getZExtValue())) | |||
3689 | F.UnfoldedOffset = | |||
3690 | (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue(); | |||
3691 | else | |||
3692 | F.BaseRegs.push_back(*J); | |||
3693 | // We may have changed the number of register in base regs, adjust the | |||
3694 | // formula accordingly. | |||
3695 | F.canonicalize(*L); | |||
3696 | ||||
3697 | if (InsertFormula(LU, LUIdx, F)) | |||
3698 | // If that formula hadn't been seen before, recurse to find more like | |||
3699 | // it. | |||
3700 | // Add check on Log16(AddOps.size()) - same as Log2_32(AddOps.size()) >> 2) | |||
3701 | // Because just Depth is not enough to bound compile time. | |||
3702 | // This means that every time AddOps.size() is greater 16^x we will add | |||
3703 | // x to Depth. | |||
3704 | GenerateReassociations(LU, LUIdx, LU.Formulae.back(), | |||
3705 | Depth + 1 + (Log2_32(AddOps.size()) >> 2)); | |||
3706 | } | |||
3707 | } | |||
3708 | ||||
3709 | /// Split out subexpressions from adds and the bases of addrecs. | |||
3710 | void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, | |||
3711 | Formula Base, unsigned Depth) { | |||
3712 | assert(Base.isCanonical(*L) && "Input must be in the canonical form")((void)0); | |||
3713 | // Arbitrarily cap recursion to protect compile time. | |||
3714 | if (Depth >= 3) | |||
3715 | return; | |||
3716 | ||||
3717 | for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) | |||
3718 | GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i); | |||
3719 | ||||
3720 | if (Base.Scale == 1) | |||
3721 | GenerateReassociationsImpl(LU, LUIdx, Base, Depth, | |||
3722 | /* Idx */ -1, /* IsScaledReg */ true); | |||
3723 | } | |||
3724 | ||||
3725 | /// Generate a formula consisting of all of the loop-dominating registers added | |||
3726 | /// into a single register. | |||
3727 | void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, | |||
3728 | Formula Base) { | |||
3729 | // This method is only interesting on a plurality of registers. | |||
3730 | if (Base.BaseRegs.size() + (Base.Scale == 1) + | |||
3731 | (Base.UnfoldedOffset != 0) <= 1) | |||
3732 | return; | |||
3733 | ||||
3734 | // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before | |||
3735 | // processing the formula. | |||
3736 | Base.unscale(); | |||
3737 | SmallVector<const SCEV *, 4> Ops; | |||
3738 | Formula NewBase = Base; | |||
3739 | NewBase.BaseRegs.clear(); | |||
3740 | Type *CombinedIntegerType = nullptr; | |||
3741 | for (const SCEV *BaseReg : Base.BaseRegs) { | |||
3742 | if (SE.properlyDominates(BaseReg, L->getHeader()) && | |||
3743 | !SE.hasComputableLoopEvolution(BaseReg, L)) { | |||
3744 | if (!CombinedIntegerType) | |||
3745 | CombinedIntegerType = SE.getEffectiveSCEVType(BaseReg->getType()); | |||
3746 | Ops.push_back(BaseReg); | |||
3747 | } | |||
3748 | else | |||
3749 | NewBase.BaseRegs.push_back(BaseReg); | |||
3750 | } | |||
3751 | ||||
3752 | // If no register is relevant, we're done. | |||
3753 | if (Ops.size() == 0) | |||
3754 | return; | |||
3755 | ||||
3756 | // Utility function for generating the required variants of the combined | |||
3757 | // registers. | |||
3758 | auto GenerateFormula = [&](const SCEV *Sum) { | |||
3759 | Formula F = NewBase; | |||
3760 | ||||
3761 | // TODO: If Sum is zero, it probably means ScalarEvolution missed an | |||
3762 | // opportunity to fold something. For now, just ignore such cases | |||
3763 | // rather than proceed with zero in a register. | |||
3764 | if (Sum->isZero()) | |||
3765 | return; | |||
3766 | ||||
3767 | F.BaseRegs.push_back(Sum); | |||
3768 | F.canonicalize(*L); | |||
3769 | (void)InsertFormula(LU, LUIdx, F); | |||
3770 | }; | |||
3771 | ||||
3772 | // If we collected at least two registers, generate a formula combining them. | |||
3773 | if (Ops.size() > 1) { | |||
3774 | SmallVector<const SCEV *, 4> OpsCopy(Ops); // Don't let SE modify Ops. | |||
3775 | GenerateFormula(SE.getAddExpr(OpsCopy)); | |||
3776 | } | |||
3777 | ||||
3778 | // If we have an unfolded offset, generate a formula combining it with the | |||
3779 | // registers collected. | |||
3780 | if (NewBase.UnfoldedOffset) { | |||
3781 | assert(CombinedIntegerType && "Missing a type for the unfolded offset")((void)0); | |||
3782 | Ops.push_back(SE.getConstant(CombinedIntegerType, NewBase.UnfoldedOffset, | |||
3783 | true)); | |||
3784 | NewBase.UnfoldedOffset = 0; | |||
3785 | GenerateFormula(SE.getAddExpr(Ops)); | |||
3786 | } | |||
3787 | } | |||
3788 | ||||
3789 | /// Helper function for LSRInstance::GenerateSymbolicOffsets. | |||
3790 | void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, | |||
3791 | const Formula &Base, size_t Idx, | |||
3792 | bool IsScaledReg) { | |||
3793 | const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; | |||
3794 | GlobalValue *GV = ExtractSymbol(G, SE); | |||
3795 | if (G->isZero() || !GV) | |||
3796 | return; | |||
3797 | Formula F = Base; | |||
3798 | F.BaseGV = GV; | |||
3799 | if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) | |||
3800 | return; | |||
3801 | if (IsScaledReg) | |||
3802 | F.ScaledReg = G; | |||
3803 | else | |||
3804 | F.BaseRegs[Idx] = G; | |||
3805 | (void)InsertFormula(LU, LUIdx, F); | |||
3806 | } | |||
3807 | ||||
3808 | /// Generate reuse formulae using symbolic offsets. | |||
3809 | void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, | |||
3810 | Formula Base) { | |||
3811 | // We can't add a symbolic offset if the address already contains one. | |||
3812 | if (Base.BaseGV) return; | |||
3813 | ||||
3814 | for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) | |||
3815 | GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i); | |||
3816 | if (Base.Scale == 1) | |||
3817 | GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1, | |||
3818 | /* IsScaledReg */ true); | |||
3819 | } | |||
3820 | ||||
3821 | /// Helper function for LSRInstance::GenerateConstantOffsets. | |||
3822 | void LSRInstance::GenerateConstantOffsetsImpl( | |||
3823 | LSRUse &LU, unsigned LUIdx, const Formula &Base, | |||
3824 | const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) { | |||
3825 | ||||
3826 | auto GenerateOffset = [&](const SCEV *G, int64_t Offset) { | |||
3827 | Formula F = Base; | |||
3828 | F.BaseOffset = (uint64_t)Base.BaseOffset - Offset; | |||
3829 | ||||
3830 | if (isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) { | |||
3831 | // Add the offset to the base register. | |||
3832 | const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G); | |||
3833 | // If it cancelled out, drop the base register, otherwise update it. | |||
3834 | if (NewG->isZero()) { | |||
3835 | if (IsScaledReg) { | |||
3836 | F.Scale = 0; | |||
3837 | F.ScaledReg = nullptr; | |||
3838 | } else | |||
3839 | F.deleteBaseReg(F.BaseRegs[Idx]); | |||
3840 | F.canonicalize(*L); | |||
3841 | } else if (IsScaledReg) | |||
3842 | F.ScaledReg = NewG; | |||
3843 | else | |||
3844 | F.BaseRegs[Idx] = NewG; | |||
3845 | ||||
3846 | (void)InsertFormula(LU, LUIdx, F); | |||
3847 | } | |||
3848 | }; | |||
3849 | ||||
3850 | const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; | |||
3851 | ||||
3852 | // With constant offsets and constant steps, we can generate pre-inc | |||
3853 | // accesses by having the offset equal the step. So, for access #0 with a | |||
3854 | // step of 8, we generate a G - 8 base which would require the first access | |||
3855 | // to be ((G - 8) + 8),+,8. The pre-indexed access then updates the pointer | |||
3856 | // for itself and hopefully becomes the base for other accesses. This means | |||
3857 | // means that a single pre-indexed access can be generated to become the new | |||
3858 | // base pointer for each iteration of the loop, resulting in no extra add/sub | |||
3859 | // instructions for pointer updating. | |||
3860 | if (AMK == TTI::AMK_PreIndexed && LU.Kind == LSRUse::Address) { | |||
3861 | if (auto *GAR = dyn_cast<SCEVAddRecExpr>(G)) { | |||
3862 | if (auto *StepRec = | |||
3863 | dyn_cast<SCEVConstant>(GAR->getStepRecurrence(SE))) { | |||
3864 | const APInt &StepInt = StepRec->getAPInt(); | |||
3865 | int64_t Step = StepInt.isNegative() ? | |||
3866 | StepInt.getSExtValue() : StepInt.getZExtValue(); | |||
3867 | ||||
3868 | for (int64_t Offset : Worklist) { | |||
3869 | Offset -= Step; | |||
3870 | GenerateOffset(G, Offset); | |||
3871 | } | |||
3872 | } | |||
3873 | } | |||
3874 | } | |||
3875 | for (int64_t Offset : Worklist) | |||
3876 | GenerateOffset(G, Offset); | |||
3877 | ||||
3878 | int64_t Imm = ExtractImmediate(G, SE); | |||
3879 | if (G->isZero() || Imm == 0) | |||
3880 | return; | |||
3881 | Formula F = Base; | |||
3882 | F.BaseOffset = (uint64_t)F.BaseOffset + Imm; | |||
3883 | if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) | |||
3884 | return; | |||
3885 | if (IsScaledReg) { | |||
3886 | F.ScaledReg = G; | |||
3887 | } else { | |||
3888 | F.BaseRegs[Idx] = G; | |||
3889 | // We may generate non canonical Formula if G is a recurrent expr reg | |||
3890 | // related with current loop while F.ScaledReg is not. | |||
3891 | F.canonicalize(*L); | |||
3892 | } | |||
3893 | (void)InsertFormula(LU, LUIdx, F); | |||
3894 | } | |||
3895 | ||||
3896 | /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. | |||
3897 | void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, | |||
3898 | Formula Base) { | |||
3899 | // TODO: For now, just add the min and max offset, because it usually isn't | |||
3900 | // worthwhile looking at everything inbetween. | |||
3901 | SmallVector<int64_t, 2> Worklist; | |||
3902 | Worklist.push_back(LU.MinOffset); | |||
3903 | if (LU.MaxOffset != LU.MinOffset) | |||
3904 | Worklist.push_back(LU.MaxOffset); | |||
3905 | ||||
3906 | for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) | |||
3907 | GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i); | |||
3908 | if (Base.Scale == 1) | |||
3909 | GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1, | |||
3910 | /* IsScaledReg */ true); | |||
3911 | } | |||
3912 | ||||
3913 | /// For ICmpZero, check to see if we can scale up the comparison. For example, x | |||
3914 | /// == y -> x*c == y*c. | |||
3915 | void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, | |||
3916 | Formula Base) { | |||
3917 | if (LU.Kind != LSRUse::ICmpZero) return; | |||
3918 | ||||
3919 | // Determine the integer type for the base formula. | |||
3920 | Type *IntTy = Base.getType(); | |||
3921 | if (!IntTy) return; | |||
3922 | if (SE.getTypeSizeInBits(IntTy) > 64) return; | |||
3923 | ||||
3924 | // Don't do this if there is more than one offset. | |||
3925 | if (LU.MinOffset != LU.MaxOffset) return; | |||
3926 | ||||
3927 | // Check if transformation is valid. It is illegal to multiply pointer. | |||
3928 | if (Base.ScaledReg && Base.ScaledReg->getType()->isPointerTy()) | |||
3929 | return; | |||
3930 | for (const SCEV *BaseReg : Base.BaseRegs) | |||
3931 | if (BaseReg->getType()->isPointerTy()) | |||
3932 | return; | |||
3933 | assert(!Base.BaseGV && "ICmpZero use is not legal!")((void)0); | |||
3934 | ||||
3935 | // Check each interesting stride. | |||
3936 | for (int64_t Factor : Factors) { | |||
3937 | // Check that the multiplication doesn't overflow. | |||
3938 | if (Base.BaseOffset == std::numeric_limits<int64_t>::min() && Factor == -1) | |||
3939 | continue; | |||
3940 | int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor; | |||
3941 | assert(Factor != 0 && "Zero factor not expected!")((void)0); | |||
3942 | if (NewBaseOffset / Factor != Base.BaseOffset) | |||
3943 | continue; | |||
3944 | // If the offset will be truncated at this use, check that it is in bounds. | |||
3945 | if (!IntTy->isPointerTy() && | |||
3946 | !ConstantInt::isValueValidForType(IntTy, NewBaseOffset)) | |||
3947 | continue; | |||
3948 | ||||
3949 | // Check that multiplying with the use offset doesn't overflow. | |||
3950 | int64_t Offset = LU.MinOffset; | |||
3951 | if (Offset == std::numeric_limits<int64_t>::min() && Factor == -1) | |||
3952 | continue; | |||
3953 | Offset = (uint64_t)Offset * Factor; | |||
3954 | if (Offset / Factor != LU.MinOffset) | |||
3955 | continue; | |||
3956 | // If the offset will be truncated at this use, check that it is in bounds. | |||
3957 | if (!IntTy->isPointerTy() && | |||
3958 | !ConstantInt::isValueValidForType(IntTy, Offset)) | |||
3959 | continue; | |||
3960 | ||||
3961 | Formula F = Base; | |||
3962 | F.BaseOffset = NewBaseOffset; | |||
3963 | ||||
3964 | // Check that this scale is legal. | |||
3965 | if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F)) | |||
3966 | continue; | |||
3967 | ||||
3968 | // Compensate for the use having MinOffset built into it. | |||
3969 | F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset; | |||
3970 | ||||
3971 | const SCEV *FactorS = SE.getConstant(IntTy, Factor); | |||
3972 | ||||
3973 | // Check that multiplying with each base register doesn't overflow. | |||
3974 | for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { | |||
3975 | F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); | |||
3976 | if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) | |||
3977 | goto next; | |||
3978 | } | |||
3979 | ||||
3980 | // Check that multiplying with the scaled register doesn't overflow. | |||
3981 | if (F.ScaledReg) { | |||
3982 | F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); | |||
3983 | if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) | |||
3984 | continue; | |||
3985 | } | |||
3986 | ||||
3987 | // Check that multiplying with the unfolded offset doesn't overflow. | |||
3988 | if (F.UnfoldedOffset != 0) { | |||
3989 | if (F.UnfoldedOffset == std::numeric_limits<int64_t>::min() && | |||
3990 | Factor == -1) | |||
3991 | continue; | |||
3992 | F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; | |||
3993 | if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) | |||
3994 | continue; | |||
3995 | // If the offset will be truncated, check that it is in bounds. | |||
3996 | if (!IntTy->isPointerTy() && | |||
3997 | !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset)) | |||
3998 | continue; | |||
3999 | } | |||
4000 | ||||
4001 | // If we make it here and it's legal, add it. | |||
4002 | (void)InsertFormula(LU, LUIdx, F); | |||
4003 | next:; | |||
4004 | } | |||
4005 | } | |||
4006 | ||||
4007 | /// Generate stride factor reuse formulae by making use of scaled-offset address | |||
4008 | /// modes, for example. | |||
4009 | void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { | |||
4010 | // Determine the integer type for the base formula. | |||
4011 | Type *IntTy = Base.getType(); | |||
4012 | if (!IntTy) return; | |||
4013 | ||||
4014 | // If this Formula already has a scaled register, we can't add another one. | |||
4015 | // Try to unscale the formula to generate a better scale. | |||
4016 | if (Base.Scale != 0 && !Base.unscale()) | |||
4017 | return; | |||
4018 | ||||
4019 | assert(Base.Scale == 0 && "unscale did not did its job!")((void)0); | |||
4020 | ||||
4021 | // Check each interesting stride. | |||
4022 | for (int64_t Factor : Factors) { | |||
4023 | Base.Scale = Factor; | |||
4024 | Base.HasBaseReg = Base.BaseRegs.size() > 1; | |||
4025 | // Check whether this scale is going to be legal. | |||
4026 | if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, | |||
4027 | Base)) { | |||
4028 | // As a special-case, handle special out-of-loop Basic users specially. | |||
4029 | // TODO: Reconsider this special case. | |||
4030 | if (LU.Kind == LSRUse::Basic && | |||
4031 | isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special, | |||
4032 | LU.AccessTy, Base) && | |||
4033 | LU.AllFixupsOutsideLoop) | |||
4034 | LU.Kind = LSRUse::Special; | |||
4035 | else | |||
4036 | continue; | |||
4037 | } | |||
4038 | // For an ICmpZero, negating a solitary base register won't lead to | |||
4039 | // new solutions. | |||
4040 | if (LU.Kind == LSRUse::ICmpZero && | |||
4041 | !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV) | |||
4042 | continue; | |||
4043 | // For each addrec base reg, if its loop is current loop, apply the scale. | |||
4044 | for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { | |||
4045 | const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i]); | |||
4046 | if (AR && (AR->getLoop() == L || LU.AllFixupsOutsideLoop)) { | |||
4047 | const SCEV *FactorS = SE.getConstant(IntTy, Factor); | |||
4048 | if (FactorS->isZero()) | |||
4049 | continue; | |||
4050 | // Divide out the factor, ignoring high bits, since we'll be | |||
4051 | // scaling the value back up in the end. | |||
4052 | if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { | |||
4053 | // TODO: This could be optimized to avoid all the copying. | |||
4054 | Formula F = Base; | |||
4055 | F.ScaledReg = Quotient; | |||
4056 | F.deleteBaseReg(F.BaseRegs[i]); | |||
4057 | // The canonical representation of 1*reg is reg, which is already in | |||
4058 | // Base. In that case, do not try to insert the formula, it will be | |||
4059 | // rejected anyway. | |||
4060 | if (F.Scale == 1 && (F.BaseRegs.empty() || | |||
4061 | (AR->getLoop() != L && LU.AllFixupsOutsideLoop))) | |||
4062 | continue; | |||
4063 | // If AllFixupsOutsideLoop is true and F.Scale is 1, we may generate | |||
4064 | // non canonical Formula with ScaledReg's loop not being L. | |||
4065 | if (F.Scale == 1 && LU.AllFixupsOutsideLoop) | |||
4066 | F.canonicalize(*L); | |||
4067 | (void)InsertFormula(LU, LUIdx, F); | |||
4068 | } | |||
4069 | } | |||
4070 | } | |||
4071 | } | |||
4072 | } | |||
4073 | ||||
4074 | /// Generate reuse formulae from different IV types. | |||
4075 | void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { | |||
4076 | // Don't bother truncating symbolic values. | |||
4077 | if (Base.BaseGV) return; | |||
4078 | ||||
4079 | // Determine the integer type for the base formula. | |||
4080 | Type *DstTy = Base.getType(); | |||
4081 | if (!DstTy) return; | |||
4082 | if (DstTy->isPointerTy()) | |||
4083 | return; | |||
4084 | ||||
4085 | for (Type *SrcTy : Types) { | |||
4086 | if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) { | |||
4087 | Formula F = Base; | |||
4088 | ||||
4089 | // Sometimes SCEV is able to prove zero during ext transform. It may | |||
4090 | // happen if SCEV did not do all possible transforms while creating the | |||
4091 | // initial node (maybe due to depth limitations), but it can do them while | |||
4092 | // taking ext. | |||
4093 | if (F.ScaledReg) { | |||
4094 | const SCEV *NewScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy); | |||
4095 | if (NewScaledReg->isZero()) | |||
4096 | continue; | |||
4097 | F.ScaledReg = NewScaledReg; | |||
4098 | } | |||
4099 | bool HasZeroBaseReg = false; | |||
4100 | for (const SCEV *&BaseReg : F.BaseRegs) { | |||
4101 | const SCEV *NewBaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy); | |||
4102 | if (NewBaseReg->isZero()) { | |||
4103 | HasZeroBaseReg = true; | |||
4104 | break; | |||
4105 | } | |||
4106 | BaseReg = NewBaseReg; | |||
4107 | } | |||
4108 | if (HasZeroBaseReg) | |||
4109 | continue; | |||
4110 | ||||
4111 | // TODO: This assumes we've done basic processing on all uses and | |||
4112 | // have an idea what the register usage is. | |||
4113 | if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) | |||
4114 | continue; | |||
4115 | ||||
4116 | F.canonicalize(*L); | |||
4117 | (void)InsertFormula(LU, LUIdx, F); | |||
4118 | } | |||
4119 | } | |||
4120 | } | |||
4121 | ||||
4122 | namespace { | |||
4123 | ||||
4124 | /// Helper class for GenerateCrossUseConstantOffsets. It's used to defer | |||
4125 | /// modifications so that the search phase doesn't have to worry about the data | |||
4126 | /// structures moving underneath it. | |||
4127 | struct WorkItem { | |||
4128 | size_t LUIdx; | |||
4129 | int64_t Imm; | |||
4130 | const SCEV *OrigReg; | |||
4131 | ||||
4132 | WorkItem(size_t LI, int64_t I, const SCEV *R) | |||
4133 | : LUIdx(LI), Imm(I), OrigReg(R) {} | |||
4134 | ||||
4135 | void print(raw_ostream &OS) const; | |||
4136 | void dump() const; | |||
4137 | }; | |||
4138 | ||||
4139 | } // end anonymous namespace | |||
4140 | ||||
4141 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
4142 | void WorkItem::print(raw_ostream &OS) const { | |||
4143 | OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx | |||
4144 | << " , add offset " << Imm; | |||
4145 | } | |||
4146 | ||||
4147 | LLVM_DUMP_METHOD__attribute__((noinline)) void WorkItem::dump() const { | |||
4148 | print(errs()); errs() << '\n'; | |||
4149 | } | |||
4150 | #endif | |||
4151 | ||||
4152 | /// Look for registers which are a constant distance apart and try to form reuse | |||
4153 | /// opportunities between them. | |||
4154 | void LSRInstance::GenerateCrossUseConstantOffsets() { | |||
4155 | // Group the registers by their value without any added constant offset. | |||
4156 | using ImmMapTy = std::map<int64_t, const SCEV *>; | |||
4157 | ||||
4158 | DenseMap<const SCEV *, ImmMapTy> Map; | |||
4159 | DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; | |||
4160 | SmallVector<const SCEV *, 8> Sequence; | |||
4161 | for (const SCEV *Use : RegUses) { | |||
4162 | const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify. | |||
4163 | int64_t Imm = ExtractImmediate(Reg, SE); | |||
4164 | auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy())); | |||
4165 | if (Pair.second) | |||
4166 | Sequence.push_back(Reg); | |||
4167 | Pair.first->second.insert(std::make_pair(Imm, Use)); | |||
4168 | UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use); | |||
4169 | } | |||
4170 | ||||
4171 | // Now examine each set of registers with the same base value. Build up | |||
4172 | // a list of work to do and do the work in a separate step so that we're | |||
4173 | // not adding formulae and register counts while we're searching. | |||
4174 | SmallVector<WorkItem, 32> WorkItems; | |||
4175 | SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; | |||
4176 | for (const SCEV *Reg : Sequence) { | |||
4177 | const ImmMapTy &Imms = Map.find(Reg)->second; | |||
4178 | ||||
4179 | // It's not worthwhile looking for reuse if there's only one offset. | |||
4180 | if (Imms.size() == 1) | |||
4181 | continue; | |||
4182 | ||||
4183 | LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';do { } while (false) | |||
4184 | for (const auto &Entrydo { } while (false) | |||
4185 | : Imms) dbgs()do { } while (false) | |||
4186 | << ' ' << Entry.first;do { } while (false) | |||
4187 | dbgs() << '\n')do { } while (false); | |||
4188 | ||||
4189 | // Examine each offset. | |||
4190 | for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); | |||
4191 | J != JE; ++J) { | |||
4192 | const SCEV *OrigReg = J->second; | |||
4193 | ||||
4194 | int64_t JImm = J->first; | |||
4195 | const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); | |||
4196 | ||||
4197 | if (!isa<SCEVConstant>(OrigReg) && | |||
4198 | UsedByIndicesMap[Reg].count() == 1) { | |||
4199 | LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigRegdo { } while (false) | |||
4200 | << '\n')do { } while (false); | |||
4201 | continue; | |||
4202 | } | |||
4203 | ||||
4204 | // Conservatively examine offsets between this orig reg a few selected | |||
4205 | // other orig regs. | |||
4206 | int64_t First = Imms.begin()->first; | |||
4207 | int64_t Last = std::prev(Imms.end())->first; | |||
4208 | // Compute (First + Last) / 2 without overflow using the fact that | |||
4209 | // First + Last = 2 * (First + Last) + (First ^ Last). | |||
4210 | int64_t Avg = (First & Last) + ((First ^ Last) >> 1); | |||
4211 | // If the result is negative and First is odd and Last even (or vice versa), | |||
4212 | // we rounded towards -inf. Add 1 in that case, to round towards 0. | |||
4213 | Avg = Avg + ((First ^ Last) & ((uint64_t)Avg >> 63)); | |||
4214 | ImmMapTy::const_iterator OtherImms[] = { | |||
4215 | Imms.begin(), std::prev(Imms.end()), | |||
4216 | Imms.lower_bound(Avg)}; | |||
4217 | for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { | |||
4218 | ImmMapTy::const_iterator M = OtherImms[i]; | |||
4219 | if (M == J || M == JE) continue; | |||
4220 | ||||
4221 | // Compute the difference between the two. | |||
4222 | int64_t Imm = (uint64_t)JImm - M->first; | |||
4223 | for (unsigned LUIdx : UsedByIndices.set_bits()) | |||
4224 | // Make a memo of this use, offset, and register tuple. | |||
4225 | if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second) | |||
4226 | WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); | |||
4227 | } | |||
4228 | } | |||
4229 | } | |||
4230 | ||||
4231 | Map.clear(); | |||
4232 | Sequence.clear(); | |||
4233 | UsedByIndicesMap.clear(); | |||
4234 | UniqueItems.clear(); | |||
4235 | ||||
4236 | // Now iterate through the worklist and add new formulae. | |||
4237 | for (const WorkItem &WI : WorkItems) { | |||
4238 | size_t LUIdx = WI.LUIdx; | |||
4239 | LSRUse &LU = Uses[LUIdx]; | |||
4240 | int64_t Imm = WI.Imm; | |||
4241 | const SCEV *OrigReg = WI.OrigReg; | |||
4242 | ||||
4243 | Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); | |||
4244 | const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); | |||
4245 | unsigned BitWidth = SE.getTypeSizeInBits(IntTy); | |||
4246 | ||||
4247 | // TODO: Use a more targeted data structure. | |||
4248 | for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { | |||
4249 | Formula F = LU.Formulae[L]; | |||
4250 | // FIXME: The code for the scaled and unscaled registers looks | |||
4251 | // very similar but slightly different. Investigate if they | |||
4252 | // could be merged. That way, we would not have to unscale the | |||
4253 | // Formula. | |||
4254 | F.unscale(); | |||
4255 | // Use the immediate in the scaled register. | |||
4256 | if (F.ScaledReg == OrigReg) { | |||
4257 | int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale; | |||
4258 | // Don't create 50 + reg(-50). | |||
4259 | if (F.referencesReg(SE.getSCEV( | |||
4260 | ConstantInt::get(IntTy, -(uint64_t)Offset)))) | |||
4261 | continue; | |||
4262 | Formula NewF = F; | |||
4263 | NewF.BaseOffset = Offset; | |||
4264 | if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, | |||
4265 | NewF)) | |||
4266 | continue; | |||
4267 | NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); | |||
4268 | ||||
4269 | // If the new scale is a constant in a register, and adding the constant | |||
4270 | // value to the immediate would produce a value closer to zero than the | |||
4271 | // immediate itself, then the formula isn't worthwhile. | |||
4272 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) | |||
4273 | if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) && | |||
4274 | (C->getAPInt().abs() * APInt(BitWidth, F.Scale)) | |||
4275 | .ule(std::abs(NewF.BaseOffset))) | |||
4276 | continue; | |||
4277 | ||||
4278 | // OK, looks good. | |||
4279 | NewF.canonicalize(*this->L); | |||
4280 | (void)InsertFormula(LU, LUIdx, NewF); | |||
4281 | } else { | |||
4282 | // Use the immediate in a base register. | |||
4283 | for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { | |||
4284 | const SCEV *BaseReg = F.BaseRegs[N]; | |||
4285 | if (BaseReg != OrigReg) | |||
4286 | continue; | |||
4287 | Formula NewF = F; | |||
4288 | NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm; | |||
4289 | if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, | |||
4290 | LU.Kind, LU.AccessTy, NewF)) { | |||
4291 | if (AMK == TTI::AMK_PostIndexed && | |||
4292 | mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE)) | |||
4293 | continue; | |||
4294 | if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) | |||
4295 | continue; | |||
4296 | NewF = F; | |||
4297 | NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; | |||
4298 | } | |||
4299 | NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); | |||
4300 | ||||
4301 | // If the new formula has a constant in a register, and adding the | |||
4302 | // constant value to the immediate would produce a value closer to | |||
4303 | // zero than the immediate itself, then the formula isn't worthwhile. | |||
4304 | for (const SCEV *NewReg : NewF.BaseRegs) | |||
4305 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewReg)) | |||
4306 | if ((C->getAPInt() + NewF.BaseOffset) | |||
4307 | .abs() | |||
4308 | .slt(std::abs(NewF.BaseOffset)) && | |||
4309 | (C->getAPInt() + NewF.BaseOffset).countTrailingZeros() >= | |||
4310 | countTrailingZeros<uint64_t>(NewF.BaseOffset)) | |||
4311 | goto skip_formula; | |||
4312 | ||||
4313 | // Ok, looks good. | |||
4314 | NewF.canonicalize(*this->L); | |||
4315 | (void)InsertFormula(LU, LUIdx, NewF); | |||
4316 | break; | |||
4317 | skip_formula:; | |||
4318 | } | |||
4319 | } | |||
4320 | } | |||
4321 | } | |||
4322 | } | |||
4323 | ||||
4324 | /// Generate formulae for each use. | |||
4325 | void | |||
4326 | LSRInstance::GenerateAllReuseFormulae() { | |||
4327 | // This is split into multiple loops so that hasRegsUsedByUsesOtherThan | |||
4328 | // queries are more precise. | |||
4329 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4330 | LSRUse &LU = Uses[LUIdx]; | |||
4331 | for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) | |||
4332 | GenerateReassociations(LU, LUIdx, LU.Formulae[i]); | |||
4333 | for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) | |||
4334 | GenerateCombinations(LU, LUIdx, LU.Formulae[i]); | |||
4335 | } | |||
4336 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4337 | LSRUse &LU = Uses[LUIdx]; | |||
4338 | for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) | |||
4339 | GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); | |||
4340 | for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) | |||
4341 | GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); | |||
4342 | for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) | |||
4343 | GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); | |||
4344 | for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) | |||
4345 | GenerateScales(LU, LUIdx, LU.Formulae[i]); | |||
4346 | } | |||
4347 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4348 | LSRUse &LU = Uses[LUIdx]; | |||
4349 | for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) | |||
4350 | GenerateTruncates(LU, LUIdx, LU.Formulae[i]); | |||
4351 | } | |||
4352 | ||||
4353 | GenerateCrossUseConstantOffsets(); | |||
4354 | ||||
4355 | LLVM_DEBUG(dbgs() << "\n"do { } while (false) | |||
4356 | "After generating reuse formulae:\n";do { } while (false) | |||
4357 | print_uses(dbgs()))do { } while (false); | |||
4358 | } | |||
4359 | ||||
4360 | /// If there are multiple formulae with the same set of registers used | |||
4361 | /// by other uses, pick the best one and delete the others. | |||
4362 | void LSRInstance::FilterOutUndesirableDedicatedRegisters() { | |||
4363 | DenseSet<const SCEV *> VisitedRegs; | |||
4364 | SmallPtrSet<const SCEV *, 16> Regs; | |||
4365 | SmallPtrSet<const SCEV *, 16> LoserRegs; | |||
4366 | #ifndef NDEBUG1 | |||
4367 | bool ChangedFormulae = false; | |||
4368 | #endif | |||
4369 | ||||
4370 | // Collect the best formula for each unique set of shared registers. This | |||
4371 | // is reset for each use. | |||
4372 | using BestFormulaeTy = | |||
4373 | DenseMap<SmallVector<const SCEV *, 4>, size_t, UniquifierDenseMapInfo>; | |||
4374 | ||||
4375 | BestFormulaeTy BestFormulae; | |||
4376 | ||||
4377 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4378 | LSRUse &LU = Uses[LUIdx]; | |||
4379 | LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());do { } while (false) | |||
4380 | dbgs() << '\n')do { } while (false); | |||
4381 | ||||
4382 | bool Any = false; | |||
4383 | for (size_t FIdx = 0, NumForms = LU.Formulae.size(); | |||
4384 | FIdx != NumForms; ++FIdx) { | |||
4385 | Formula &F = LU.Formulae[FIdx]; | |||
4386 | ||||
4387 | // Some formulas are instant losers. For example, they may depend on | |||
4388 | // nonexistent AddRecs from other loops. These need to be filtered | |||
4389 | // immediately, otherwise heuristics could choose them over others leading | |||
4390 | // to an unsatisfactory solution. Passing LoserRegs into RateFormula here | |||
4391 | // avoids the need to recompute this information across formulae using the | |||
4392 | // same bad AddRec. Passing LoserRegs is also essential unless we remove | |||
4393 | // the corresponding bad register from the Regs set. | |||
4394 | Cost CostF(L, SE, TTI, AMK); | |||
4395 | Regs.clear(); | |||
4396 | CostF.RateFormula(F, Regs, VisitedRegs, LU, &LoserRegs); | |||
4397 | if (CostF.isLoser()) { | |||
4398 | // During initial formula generation, undesirable formulae are generated | |||
4399 | // by uses within other loops that have some non-trivial address mode or | |||
4400 | // use the postinc form of the IV. LSR needs to provide these formulae | |||
4401 | // as the basis of rediscovering the desired formula that uses an AddRec | |||
4402 | // corresponding to the existing phi. Once all formulae have been | |||
4403 | // generated, these initial losers may be pruned. | |||
4404 | LLVM_DEBUG(dbgs() << " Filtering loser "; F.print(dbgs());do { } while (false) | |||
4405 | dbgs() << "\n")do { } while (false); | |||
4406 | } | |||
4407 | else { | |||
4408 | SmallVector<const SCEV *, 4> Key; | |||
4409 | for (const SCEV *Reg : F.BaseRegs) { | |||
4410 | if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) | |||
4411 | Key.push_back(Reg); | |||
4412 | } | |||
4413 | if (F.ScaledReg && | |||
4414 | RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) | |||
4415 | Key.push_back(F.ScaledReg); | |||
4416 | // Unstable sort by host order ok, because this is only used for | |||
4417 | // uniquifying. | |||
4418 | llvm::sort(Key); | |||
4419 | ||||
4420 | std::pair<BestFormulaeTy::const_iterator, bool> P = | |||
4421 | BestFormulae.insert(std::make_pair(Key, FIdx)); | |||
4422 | if (P.second) | |||
4423 | continue; | |||
4424 | ||||
4425 | Formula &Best = LU.Formulae[P.first->second]; | |||
4426 | ||||
4427 | Cost CostBest(L, SE, TTI, AMK); | |||
4428 | Regs.clear(); | |||
4429 | CostBest.RateFormula(Best, Regs, VisitedRegs, LU); | |||
4430 | if (CostF.isLess(CostBest)) | |||
4431 | std::swap(F, Best); | |||
4432 | LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());do { } while (false) | |||
4433 | dbgs() << "\n"do { } while (false) | |||
4434 | " in favor of formula ";do { } while (false) | |||
4435 | Best.print(dbgs()); dbgs() << '\n')do { } while (false); | |||
4436 | } | |||
4437 | #ifndef NDEBUG1 | |||
4438 | ChangedFormulae = true; | |||
4439 | #endif | |||
4440 | LU.DeleteFormula(F); | |||
4441 | --FIdx; | |||
4442 | --NumForms; | |||
4443 | Any = true; | |||
4444 | } | |||
4445 | ||||
4446 | // Now that we've filtered out some formulae, recompute the Regs set. | |||
4447 | if (Any) | |||
4448 | LU.RecomputeRegs(LUIdx, RegUses); | |||
4449 | ||||
4450 | // Reset this to prepare for the next use. | |||
4451 | BestFormulae.clear(); | |||
4452 | } | |||
4453 | ||||
4454 | LLVM_DEBUG(if (ChangedFormulae) {do { } while (false) | |||
4455 | dbgs() << "\n"do { } while (false) | |||
4456 | "After filtering out undesirable candidates:\n";do { } while (false) | |||
4457 | print_uses(dbgs());do { } while (false) | |||
4458 | })do { } while (false); | |||
4459 | } | |||
4460 | ||||
4461 | /// Estimate the worst-case number of solutions the solver might have to | |||
4462 | /// consider. It almost never considers this many solutions because it prune the | |||
4463 | /// search space, but the pruning isn't always sufficient. | |||
4464 | size_t LSRInstance::EstimateSearchSpaceComplexity() const { | |||
4465 | size_t Power = 1; | |||
4466 | for (const LSRUse &LU : Uses) { | |||
4467 | size_t FSize = LU.Formulae.size(); | |||
4468 | if (FSize >= ComplexityLimit) { | |||
4469 | Power = ComplexityLimit; | |||
4470 | break; | |||
4471 | } | |||
4472 | Power *= FSize; | |||
4473 | if (Power >= ComplexityLimit) | |||
4474 | break; | |||
4475 | } | |||
4476 | return Power; | |||
4477 | } | |||
4478 | ||||
4479 | /// When one formula uses a superset of the registers of another formula, it | |||
4480 | /// won't help reduce register pressure (though it may not necessarily hurt | |||
4481 | /// register pressure); remove it to simplify the system. | |||
4482 | void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { | |||
4483 | if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { | |||
4484 | LLVM_DEBUG(dbgs() << "The search space is too complex.\n")do { } while (false); | |||
4485 | ||||
4486 | LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "do { } while (false) | |||
4487 | "which use a superset of registers used by other "do { } while (false) | |||
4488 | "formulae.\n")do { } while (false); | |||
4489 | ||||
4490 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4491 | LSRUse &LU = Uses[LUIdx]; | |||
4492 | bool Any = false; | |||
4493 | for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { | |||
4494 | Formula &F = LU.Formulae[i]; | |||
4495 | // Look for a formula with a constant or GV in a register. If the use | |||
4496 | // also has a formula with that same value in an immediate field, | |||
4497 | // delete the one that uses a register. | |||
4498 | for (SmallVectorImpl<const SCEV *>::const_iterator | |||
4499 | I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { | |||
4500 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { | |||
4501 | Formula NewF = F; | |||
4502 | //FIXME: Formulas should store bitwidth to do wrapping properly. | |||
4503 | // See PR41034. | |||
4504 | NewF.BaseOffset += (uint64_t)C->getValue()->getSExtValue(); | |||
4505 | NewF.BaseRegs.erase(NewF.BaseRegs.begin() + | |||
4506 | (I - F.BaseRegs.begin())); | |||
4507 | if (LU.HasFormulaWithSameRegs(NewF)) { | |||
4508 | LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs());do { } while (false) | |||
4509 | dbgs() << '\n')do { } while (false); | |||
4510 | LU.DeleteFormula(F); | |||
4511 | --i; | |||
4512 | --e; | |||
4513 | Any = true; | |||
4514 | break; | |||
4515 | } | |||
4516 | } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { | |||
4517 | if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) | |||
4518 | if (!F.BaseGV) { | |||
4519 | Formula NewF = F; | |||
4520 | NewF.BaseGV = GV; | |||
4521 | NewF.BaseRegs.erase(NewF.BaseRegs.begin() + | |||
4522 | (I - F.BaseRegs.begin())); | |||
4523 | if (LU.HasFormulaWithSameRegs(NewF)) { | |||
4524 | LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs());do { } while (false) | |||
4525 | dbgs() << '\n')do { } while (false); | |||
4526 | LU.DeleteFormula(F); | |||
4527 | --i; | |||
4528 | --e; | |||
4529 | Any = true; | |||
4530 | break; | |||
4531 | } | |||
4532 | } | |||
4533 | } | |||
4534 | } | |||
4535 | } | |||
4536 | if (Any) | |||
4537 | LU.RecomputeRegs(LUIdx, RegUses); | |||
4538 | } | |||
4539 | ||||
4540 | LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { } while (false); | |||
4541 | } | |||
4542 | } | |||
4543 | ||||
4544 | /// When there are many registers for expressions like A, A+1, A+2, etc., | |||
4545 | /// allocate a single register for them. | |||
4546 | void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { | |||
4547 | if (EstimateSearchSpaceComplexity() < ComplexityLimit) | |||
4548 | return; | |||
4549 | ||||
4550 | LLVM_DEBUG(do { } while (false) | |||
4551 | dbgs() << "The search space is too complex.\n"do { } while (false) | |||
4552 | "Narrowing the search space by assuming that uses separated "do { } while (false) | |||
4553 | "by a constant offset will use the same registers.\n")do { } while (false); | |||
4554 | ||||
4555 | // This is especially useful for unrolled loops. | |||
4556 | ||||
4557 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4558 | LSRUse &LU = Uses[LUIdx]; | |||
4559 | for (const Formula &F : LU.Formulae) { | |||
4560 | if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1)) | |||
4561 | continue; | |||
4562 | ||||
4563 | LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU); | |||
4564 | if (!LUThatHas) | |||
4565 | continue; | |||
4566 | ||||
4567 | if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false, | |||
4568 | LU.Kind, LU.AccessTy)) | |||
4569 | continue; | |||
4570 | ||||
4571 | LLVM_DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n')do { } while (false); | |||
4572 | ||||
4573 | LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; | |||
4574 | ||||
4575 | // Transfer the fixups of LU to LUThatHas. | |||
4576 | for (LSRFixup &Fixup : LU.Fixups) { | |||
4577 | Fixup.Offset += F.BaseOffset; | |||
4578 | LUThatHas->pushFixup(Fixup); | |||
4579 | LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n')do { } while (false); | |||
4580 | } | |||
4581 | ||||
4582 | // Delete formulae from the new use which are no longer legal. | |||
4583 | bool Any = false; | |||
4584 | for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { | |||
4585 | Formula &F = LUThatHas->Formulae[i]; | |||
4586 | if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset, | |||
4587 | LUThatHas->Kind, LUThatHas->AccessTy, F)) { | |||
4588 | LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n')do { } while (false); | |||
4589 | LUThatHas->DeleteFormula(F); | |||
4590 | --i; | |||
4591 | --e; | |||
4592 | Any = true; | |||
4593 | } | |||
4594 | } | |||
4595 | ||||
4596 | if (Any) | |||
4597 | LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); | |||
4598 | ||||
4599 | // Delete the old use. | |||
4600 | DeleteUse(LU, LUIdx); | |||
4601 | --LUIdx; | |||
4602 | --NumUses; | |||
4603 | break; | |||
4604 | } | |||
4605 | } | |||
4606 | ||||
4607 | LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { } while (false); | |||
4608 | } | |||
4609 | ||||
4610 | /// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that | |||
4611 | /// we've done more filtering, as it may be able to find more formulae to | |||
4612 | /// eliminate. | |||
4613 | void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ | |||
4614 | if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { | |||
4615 | LLVM_DEBUG(dbgs() << "The search space is too complex.\n")do { } while (false); | |||
4616 | ||||
4617 | LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out "do { } while (false) | |||
4618 | "undesirable dedicated registers.\n")do { } while (false); | |||
4619 | ||||
4620 | FilterOutUndesirableDedicatedRegisters(); | |||
4621 | ||||
4622 | LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { } while (false); | |||
4623 | } | |||
4624 | } | |||
4625 | ||||
4626 | /// If a LSRUse has multiple formulae with the same ScaledReg and Scale. | |||
4627 | /// Pick the best one and delete the others. | |||
4628 | /// This narrowing heuristic is to keep as many formulae with different | |||
4629 | /// Scale and ScaledReg pair as possible while narrowing the search space. | |||
4630 | /// The benefit is that it is more likely to find out a better solution | |||
4631 | /// from a formulae set with more Scale and ScaledReg variations than | |||
4632 | /// a formulae set with the same Scale and ScaledReg. The picking winner | |||
4633 | /// reg heuristic will often keep the formulae with the same Scale and | |||
4634 | /// ScaledReg and filter others, and we want to avoid that if possible. | |||
4635 | void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() { | |||
4636 | if (EstimateSearchSpaceComplexity() < ComplexityLimit) | |||
4637 | return; | |||
4638 | ||||
4639 | LLVM_DEBUG(do { } while (false) | |||
4640 | dbgs() << "The search space is too complex.\n"do { } while (false) | |||
4641 | "Narrowing the search space by choosing the best Formula "do { } while (false) | |||
4642 | "from the Formulae with the same Scale and ScaledReg.\n")do { } while (false); | |||
4643 | ||||
4644 | // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse. | |||
4645 | using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>, size_t>; | |||
4646 | ||||
4647 | BestFormulaeTy BestFormulae; | |||
4648 | #ifndef NDEBUG1 | |||
4649 | bool ChangedFormulae = false; | |||
4650 | #endif | |||
4651 | DenseSet<const SCEV *> VisitedRegs; | |||
4652 | SmallPtrSet<const SCEV *, 16> Regs; | |||
4653 | ||||
4654 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4655 | LSRUse &LU = Uses[LUIdx]; | |||
4656 | LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());do { } while (false) | |||
4657 | dbgs() << '\n')do { } while (false); | |||
4658 | ||||
4659 | // Return true if Formula FA is better than Formula FB. | |||
4660 | auto IsBetterThan = [&](Formula &FA, Formula &FB) { | |||
4661 | // First we will try to choose the Formula with fewer new registers. | |||
4662 | // For a register used by current Formula, the more the register is | |||
4663 | // shared among LSRUses, the less we increase the register number | |||
4664 | // counter of the formula. | |||
4665 | size_t FARegNum = 0; | |||
4666 | for (const SCEV *Reg : FA.BaseRegs) { | |||
4667 | const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg); | |||
4668 | FARegNum += (NumUses - UsedByIndices.count() + 1); | |||
4669 | } | |||
4670 | size_t FBRegNum = 0; | |||
4671 | for (const SCEV *Reg : FB.BaseRegs) { | |||
4672 | const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg); | |||
4673 | FBRegNum += (NumUses - UsedByIndices.count() + 1); | |||
4674 | } | |||
4675 | if (FARegNum != FBRegNum) | |||
4676 | return FARegNum < FBRegNum; | |||
4677 | ||||
4678 | // If the new register numbers are the same, choose the Formula with | |||
4679 | // less Cost. | |||
4680 | Cost CostFA(L, SE, TTI, AMK); | |||
4681 | Cost CostFB(L, SE, TTI, AMK); | |||
4682 | Regs.clear(); | |||
4683 | CostFA.RateFormula(FA, Regs, VisitedRegs, LU); | |||
4684 | Regs.clear(); | |||
4685 | CostFB.RateFormula(FB, Regs, VisitedRegs, LU); | |||
4686 | return CostFA.isLess(CostFB); | |||
4687 | }; | |||
4688 | ||||
4689 | bool Any = false; | |||
4690 | for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms; | |||
4691 | ++FIdx) { | |||
4692 | Formula &F = LU.Formulae[FIdx]; | |||
4693 | if (!F.ScaledReg) | |||
4694 | continue; | |||
4695 | auto P = BestFormulae.insert({{F.ScaledReg, F.Scale}, FIdx}); | |||
4696 | if (P.second) | |||
4697 | continue; | |||
4698 | ||||
4699 | Formula &Best = LU.Formulae[P.first->second]; | |||
4700 | if (IsBetterThan(F, Best)) | |||
4701 | std::swap(F, Best); | |||
4702 | LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());do { } while (false) | |||
4703 | dbgs() << "\n"do { } while (false) | |||
4704 | " in favor of formula ";do { } while (false) | |||
4705 | Best.print(dbgs()); dbgs() << '\n')do { } while (false); | |||
4706 | #ifndef NDEBUG1 | |||
4707 | ChangedFormulae = true; | |||
4708 | #endif | |||
4709 | LU.DeleteFormula(F); | |||
4710 | --FIdx; | |||
4711 | --NumForms; | |||
4712 | Any = true; | |||
4713 | } | |||
4714 | if (Any) | |||
4715 | LU.RecomputeRegs(LUIdx, RegUses); | |||
4716 | ||||
4717 | // Reset this to prepare for the next use. | |||
4718 | BestFormulae.clear(); | |||
4719 | } | |||
4720 | ||||
4721 | LLVM_DEBUG(if (ChangedFormulae) {do { } while (false) | |||
4722 | dbgs() << "\n"do { } while (false) | |||
4723 | "After filtering out undesirable candidates:\n";do { } while (false) | |||
4724 | print_uses(dbgs());do { } while (false) | |||
4725 | })do { } while (false); | |||
4726 | } | |||
4727 | ||||
4728 | /// If we are over the complexity limit, filter out any post-inc prefering | |||
4729 | /// variables to only post-inc values. | |||
4730 | void LSRInstance::NarrowSearchSpaceByFilterPostInc() { | |||
4731 | if (AMK != TTI::AMK_PostIndexed) | |||
4732 | return; | |||
4733 | if (EstimateSearchSpaceComplexity() < ComplexityLimit) | |||
4734 | return; | |||
4735 | ||||
4736 | LLVM_DEBUG(dbgs() << "The search space is too complex.\n"do { } while (false) | |||
4737 | "Narrowing the search space by choosing the lowest "do { } while (false) | |||
4738 | "register Formula for PostInc Uses.\n")do { } while (false); | |||
4739 | ||||
4740 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4741 | LSRUse &LU = Uses[LUIdx]; | |||
4742 | ||||
4743 | if (LU.Kind != LSRUse::Address) | |||
4744 | continue; | |||
4745 | if (!TTI.isIndexedLoadLegal(TTI.MIM_PostInc, LU.AccessTy.getType()) && | |||
4746 | !TTI.isIndexedStoreLegal(TTI.MIM_PostInc, LU.AccessTy.getType())) | |||
4747 | continue; | |||
4748 | ||||
4749 | size_t MinRegs = std::numeric_limits<size_t>::max(); | |||
4750 | for (const Formula &F : LU.Formulae) | |||
4751 | MinRegs = std::min(F.getNumRegs(), MinRegs); | |||
4752 | ||||
4753 | bool Any = false; | |||
4754 | for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms; | |||
4755 | ++FIdx) { | |||
4756 | Formula &F = LU.Formulae[FIdx]; | |||
4757 | if (F.getNumRegs() > MinRegs) { | |||
4758 | LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());do { } while (false) | |||
4759 | dbgs() << "\n")do { } while (false); | |||
4760 | LU.DeleteFormula(F); | |||
4761 | --FIdx; | |||
4762 | --NumForms; | |||
4763 | Any = true; | |||
4764 | } | |||
4765 | } | |||
4766 | if (Any) | |||
4767 | LU.RecomputeRegs(LUIdx, RegUses); | |||
4768 | ||||
4769 | if (EstimateSearchSpaceComplexity() < ComplexityLimit) | |||
4770 | break; | |||
4771 | } | |||
4772 | ||||
4773 | LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { } while (false); | |||
4774 | } | |||
4775 | ||||
4776 | /// The function delete formulas with high registers number expectation. | |||
4777 | /// Assuming we don't know the value of each formula (already delete | |||
4778 | /// all inefficient), generate probability of not selecting for each | |||
4779 | /// register. | |||
4780 | /// For example, | |||
4781 | /// Use1: | |||
4782 | /// reg(a) + reg({0,+,1}) | |||
4783 | /// reg(a) + reg({-1,+,1}) + 1 | |||
4784 | /// reg({a,+,1}) | |||
4785 | /// Use2: | |||
4786 | /// reg(b) + reg({0,+,1}) | |||
4787 | /// reg(b) + reg({-1,+,1}) + 1 | |||
4788 | /// reg({b,+,1}) | |||
4789 | /// Use3: | |||
4790 | /// reg(c) + reg(b) + reg({0,+,1}) | |||
4791 | /// reg(c) + reg({b,+,1}) | |||
4792 | /// | |||
4793 | /// Probability of not selecting | |||
4794 | /// Use1 Use2 Use3 | |||
4795 | /// reg(a) (1/3) * 1 * 1 | |||
4796 | /// reg(b) 1 * (1/3) * (1/2) | |||
4797 | /// reg({0,+,1}) (2/3) * (2/3) * (1/2) | |||
4798 | /// reg({-1,+,1}) (2/3) * (2/3) * 1 | |||
4799 | /// reg({a,+,1}) (2/3) * 1 * 1 | |||
4800 | /// reg({b,+,1}) 1 * (2/3) * (2/3) | |||
4801 | /// reg(c) 1 * 1 * 0 | |||
4802 | /// | |||
4803 | /// Now count registers number mathematical expectation for each formula: | |||
4804 | /// Note that for each use we exclude probability if not selecting for the use. | |||
4805 | /// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding | |||
4806 | /// probabilty 1/3 of not selecting for Use1). | |||
4807 | /// Use1: | |||
4808 | /// reg(a) + reg({0,+,1}) 1 + 1/3 -- to be deleted | |||
4809 | /// reg(a) + reg({-1,+,1}) + 1 1 + 4/9 -- to be deleted | |||
4810 | /// reg({a,+,1}) 1 | |||
4811 | /// Use2: | |||
4812 | /// reg(b) + reg({0,+,1}) 1/2 + 1/3 -- to be deleted | |||
4813 | /// reg(b) + reg({-1,+,1}) + 1 1/2 + 2/3 -- to be deleted | |||
4814 | /// reg({b,+,1}) 2/3 | |||
4815 | /// Use3: | |||
4816 | /// reg(c) + reg(b) + reg({0,+,1}) 1 + 1/3 + 4/9 -- to be deleted | |||
4817 | /// reg(c) + reg({b,+,1}) 1 + 2/3 | |||
4818 | void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() { | |||
4819 | if (EstimateSearchSpaceComplexity() < ComplexityLimit) | |||
4820 | return; | |||
4821 | // Ok, we have too many of formulae on our hands to conveniently handle. | |||
4822 | // Use a rough heuristic to thin out the list. | |||
4823 | ||||
4824 | // Set of Regs wich will be 100% used in final solution. | |||
4825 | // Used in each formula of a solution (in example above this is reg(c)). | |||
4826 | // We can skip them in calculations. | |||
4827 | SmallPtrSet<const SCEV *, 4> UniqRegs; | |||
4828 | LLVM_DEBUG(dbgs() << "The search space is too complex.\n")do { } while (false); | |||
4829 | ||||
4830 | // Map each register to probability of not selecting | |||
4831 | DenseMap <const SCEV *, float> RegNumMap; | |||
4832 | for (const SCEV *Reg : RegUses) { | |||
4833 | if (UniqRegs.count(Reg)) | |||
4834 | continue; | |||
4835 | float PNotSel = 1; | |||
4836 | for (const LSRUse &LU : Uses) { | |||
4837 | if (!LU.Regs.count(Reg)) | |||
4838 | continue; | |||
4839 | float P = LU.getNotSelectedProbability(Reg); | |||
4840 | if (P != 0.0) | |||
4841 | PNotSel *= P; | |||
4842 | else | |||
4843 | UniqRegs.insert(Reg); | |||
4844 | } | |||
4845 | RegNumMap.insert(std::make_pair(Reg, PNotSel)); | |||
4846 | } | |||
4847 | ||||
4848 | LLVM_DEBUG(do { } while (false) | |||
4849 | dbgs() << "Narrowing the search space by deleting costly formulas\n")do { } while (false); | |||
4850 | ||||
4851 | // Delete formulas where registers number expectation is high. | |||
4852 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4853 | LSRUse &LU = Uses[LUIdx]; | |||
4854 | // If nothing to delete - continue. | |||
4855 | if (LU.Formulae.size() < 2) | |||
4856 | continue; | |||
4857 | // This is temporary solution to test performance. Float should be | |||
4858 | // replaced with round independent type (based on integers) to avoid | |||
4859 | // different results for different target builds. | |||
4860 | float FMinRegNum = LU.Formulae[0].getNumRegs(); | |||
4861 | float FMinARegNum = LU.Formulae[0].getNumRegs(); | |||
4862 | size_t MinIdx = 0; | |||
4863 | for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { | |||
4864 | Formula &F = LU.Formulae[i]; | |||
4865 | float FRegNum = 0; | |||
4866 | float FARegNum = 0; | |||
4867 | for (const SCEV *BaseReg : F.BaseRegs) { | |||
4868 | if (UniqRegs.count(BaseReg)) | |||
4869 | continue; | |||
4870 | FRegNum += RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg); | |||
4871 | if (isa<SCEVAddRecExpr>(BaseReg)) | |||
4872 | FARegNum += | |||
4873 | RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg); | |||
4874 | } | |||
4875 | if (const SCEV *ScaledReg = F.ScaledReg) { | |||
4876 | if (!UniqRegs.count(ScaledReg)) { | |||
4877 | FRegNum += | |||
4878 | RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg); | |||
4879 | if (isa<SCEVAddRecExpr>(ScaledReg)) | |||
4880 | FARegNum += | |||
4881 | RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg); | |||
4882 | } | |||
4883 | } | |||
4884 | if (FMinRegNum > FRegNum || | |||
4885 | (FMinRegNum == FRegNum && FMinARegNum > FARegNum)) { | |||
4886 | FMinRegNum = FRegNum; | |||
4887 | FMinARegNum = FARegNum; | |||
4888 | MinIdx = i; | |||
4889 | } | |||
4890 | } | |||
4891 | LLVM_DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs());do { } while (false) | |||
4892 | dbgs() << " with min reg num " << FMinRegNum << '\n')do { } while (false); | |||
4893 | if (MinIdx != 0) | |||
4894 | std::swap(LU.Formulae[MinIdx], LU.Formulae[0]); | |||
4895 | while (LU.Formulae.size() != 1) { | |||
4896 | LLVM_DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs());do { } while (false) | |||
4897 | dbgs() << '\n')do { } while (false); | |||
4898 | LU.Formulae.pop_back(); | |||
4899 | } | |||
4900 | LU.RecomputeRegs(LUIdx, RegUses); | |||
4901 | assert(LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula")((void)0); | |||
4902 | Formula &F = LU.Formulae[0]; | |||
4903 | LLVM_DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n')do { } while (false); | |||
4904 | // When we choose the formula, the regs become unique. | |||
4905 | UniqRegs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); | |||
4906 | if (F.ScaledReg) | |||
4907 | UniqRegs.insert(F.ScaledReg); | |||
4908 | } | |||
4909 | LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { } while (false); | |||
4910 | } | |||
4911 | ||||
4912 | /// Pick a register which seems likely to be profitable, and then in any use | |||
4913 | /// which has any reference to that register, delete all formulae which do not | |||
4914 | /// reference that register. | |||
4915 | void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { | |||
4916 | // With all other options exhausted, loop until the system is simple | |||
4917 | // enough to handle. | |||
4918 | SmallPtrSet<const SCEV *, 4> Taken; | |||
4919 | while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { | |||
4920 | // Ok, we have too many of formulae on our hands to conveniently handle. | |||
4921 | // Use a rough heuristic to thin out the list. | |||
4922 | LLVM_DEBUG(dbgs() << "The search space is too complex.\n")do { } while (false); | |||
4923 | ||||
4924 | // Pick the register which is used by the most LSRUses, which is likely | |||
4925 | // to be a good reuse register candidate. | |||
4926 | const SCEV *Best = nullptr; | |||
4927 | unsigned BestNum = 0; | |||
4928 | for (const SCEV *Reg : RegUses) { | |||
4929 | if (Taken.count(Reg)) | |||
4930 | continue; | |||
4931 | if (!Best) { | |||
4932 | Best = Reg; | |||
4933 | BestNum = RegUses.getUsedByIndices(Reg).count(); | |||
4934 | } else { | |||
4935 | unsigned Count = RegUses.getUsedByIndices(Reg).count(); | |||
4936 | if (Count > BestNum) { | |||
4937 | Best = Reg; | |||
4938 | BestNum = Count; | |||
4939 | } | |||
4940 | } | |||
4941 | } | |||
4942 | assert(Best && "Failed to find best LSRUse candidate")((void)0); | |||
4943 | ||||
4944 | LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Bestdo { } while (false) | |||
4945 | << " will yield profitable reuse.\n")do { } while (false); | |||
4946 | Taken.insert(Best); | |||
4947 | ||||
4948 | // In any use with formulae which references this register, delete formulae | |||
4949 | // which don't reference it. | |||
4950 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { | |||
4951 | LSRUse &LU = Uses[LUIdx]; | |||
4952 | if (!LU.Regs.count(Best)) continue; | |||
4953 | ||||
4954 | bool Any = false; | |||
4955 | for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { | |||
4956 | Formula &F = LU.Formulae[i]; | |||
4957 | if (!F.referencesReg(Best)) { | |||
4958 | LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n')do { } while (false); | |||
4959 | LU.DeleteFormula(F); | |||
4960 | --e; | |||
4961 | --i; | |||
4962 | Any = true; | |||
4963 | assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?")((void)0); | |||
4964 | continue; | |||
4965 | } | |||
4966 | } | |||
4967 | ||||
4968 | if (Any) | |||
4969 | LU.RecomputeRegs(LUIdx, RegUses); | |||
4970 | } | |||
4971 | ||||
4972 | LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { } while (false); | |||
4973 | } | |||
4974 | } | |||
4975 | ||||
4976 | /// If there are an extraordinary number of formulae to choose from, use some | |||
4977 | /// rough heuristics to prune down the number of formulae. This keeps the main | |||
4978 | /// solver from taking an extraordinary amount of time in some worst-case | |||
4979 | /// scenarios. | |||
4980 | void LSRInstance::NarrowSearchSpaceUsingHeuristics() { | |||
4981 | NarrowSearchSpaceByDetectingSupersets(); | |||
4982 | NarrowSearchSpaceByCollapsingUnrolledCode(); | |||
4983 | NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); | |||
4984 | if (FilterSameScaledReg) | |||
4985 | NarrowSearchSpaceByFilterFormulaWithSameScaledReg(); | |||
4986 | NarrowSearchSpaceByFilterPostInc(); | |||
4987 | if (LSRExpNarrow) | |||
4988 | NarrowSearchSpaceByDeletingCostlyFormulas(); | |||
4989 | else | |||
4990 | NarrowSearchSpaceByPickingWinnerRegs(); | |||
4991 | } | |||
4992 | ||||
4993 | /// This is the recursive solver. | |||
4994 | void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, | |||
4995 | Cost &SolutionCost, | |||
4996 | SmallVectorImpl<const Formula *> &Workspace, | |||
4997 | const Cost &CurCost, | |||
4998 | const SmallPtrSet<const SCEV *, 16> &CurRegs, | |||
4999 | DenseSet<const SCEV *> &VisitedRegs) const { | |||
5000 | // Some ideas: | |||
5001 | // - prune more: | |||
5002 | // - use more aggressive filtering | |||
5003 | // - sort the formula so that the most profitable solutions are found first | |||
5004 | // - sort the uses too | |||
5005 | // - search faster: | |||
5006 | // - don't compute a cost, and then compare. compare while computing a cost | |||
5007 | // and bail early. | |||
5008 | // - track register sets with SmallBitVector | |||
5009 | ||||
5010 | const LSRUse &LU = Uses[Workspace.size()]; | |||
5011 | ||||
5012 | // If this use references any register that's already a part of the | |||
5013 | // in-progress solution, consider it a requirement that a formula must | |||
5014 | // reference that register in order to be considered. This prunes out | |||
5015 | // unprofitable searching. | |||
5016 | SmallSetVector<const SCEV *, 4> ReqRegs; | |||
5017 | for (const SCEV *S : CurRegs) | |||
5018 | if (LU.Regs.count(S)) | |||
5019 | ReqRegs.insert(S); | |||
5020 | ||||
5021 | SmallPtrSet<const SCEV *, 16> NewRegs; | |||
5022 | Cost NewCost(L, SE, TTI, AMK); | |||
5023 | for (const Formula &F : LU.Formulae) { | |||
5024 | // Ignore formulae which may not be ideal in terms of register reuse of | |||
5025 | // ReqRegs. The formula should use all required registers before | |||
5026 | // introducing new ones. | |||
5027 | // This can sometimes (notably when trying to favour postinc) lead to | |||
5028 | // sub-optimial decisions. There it is best left to the cost modelling to | |||
5029 | // get correct. | |||
5030 | if (AMK != TTI::AMK_PostIndexed || LU.Kind != LSRUse::Address) { | |||
5031 | int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size()); | |||
5032 | for (const SCEV *Reg : ReqRegs) { | |||
5033 | if ((F.ScaledReg && F.ScaledReg == Reg) || | |||
5034 | is_contained(F.BaseRegs, Reg)) { | |||
5035 | --NumReqRegsToFind; | |||
5036 | if (NumReqRegsToFind == 0) | |||
5037 | break; | |||
5038 | } | |||
5039 | } | |||
5040 | if (NumReqRegsToFind != 0) { | |||
5041 | // If none of the formulae satisfied the required registers, then we could | |||
5042 | // clear ReqRegs and try again. Currently, we simply give up in this case. | |||
5043 | continue; | |||
5044 | } | |||
5045 | } | |||
5046 | ||||
5047 | // Evaluate the cost of the current formula. If it's already worse than | |||
5048 | // the current best, prune the search at that point. | |||
5049 | NewCost = CurCost; | |||
5050 | NewRegs = CurRegs; | |||
5051 | NewCost.RateFormula(F, NewRegs, VisitedRegs, LU); | |||
5052 | if (NewCost.isLess(SolutionCost)) { | |||
5053 | Workspace.push_back(&F); | |||
5054 | if (Workspace.size() != Uses.size()) { | |||
5055 | SolveRecurse(Solution, SolutionCost, Workspace, NewCost, | |||
5056 | NewRegs, VisitedRegs); | |||
5057 | if (F.getNumRegs() == 1 && Workspace.size() == 1) | |||
5058 | VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); | |||
5059 | } else { | |||
5060 | LLVM_DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());do { } while (false) | |||
5061 | dbgs() << ".\nRegs:\n";do { } while (false) | |||
5062 | for (const SCEV *S : NewRegs) dbgs()do { } while (false) | |||
5063 | << "- " << *S << "\n";do { } while (false) | |||
5064 | dbgs() << '\n')do { } while (false); | |||
5065 | ||||
5066 | SolutionCost = NewCost; | |||
5067 | Solution = Workspace; | |||
5068 | } | |||
5069 | Workspace.pop_back(); | |||
5070 | } | |||
5071 | } | |||
5072 | } | |||
5073 | ||||
5074 | /// Choose one formula from each use. Return the results in the given Solution | |||
5075 | /// vector. | |||
5076 | void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { | |||
5077 | SmallVector<const Formula *, 8> Workspace; | |||
5078 | Cost SolutionCost(L, SE, TTI, AMK); | |||
5079 | SolutionCost.Lose(); | |||
5080 | Cost CurCost(L, SE, TTI, AMK); | |||
5081 | SmallPtrSet<const SCEV *, 16> CurRegs; | |||
5082 | DenseSet<const SCEV *> VisitedRegs; | |||
5083 | Workspace.reserve(Uses.size()); | |||
5084 | ||||
5085 | // SolveRecurse does all the work. | |||
5086 | SolveRecurse(Solution, SolutionCost, Workspace, CurCost, | |||
5087 | CurRegs, VisitedRegs); | |||
5088 | if (Solution.empty()) { | |||
5089 | LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n")do { } while (false); | |||
5090 | return; | |||
5091 | } | |||
5092 | ||||
5093 | // Ok, we've now made all our decisions. | |||
5094 | LLVM_DEBUG(dbgs() << "\n"do { } while (false) | |||
5095 | "The chosen solution requires ";do { } while (false) | |||
5096 | SolutionCost.print(dbgs()); dbgs() << ":\n";do { } while (false) | |||
5097 | for (size_t i = 0, e = Uses.size(); i != e; ++i) {do { } while (false) | |||
5098 | dbgs() << " ";do { } while (false) | |||
5099 | Uses[i].print(dbgs());do { } while (false) | |||
5100 | dbgs() << "\n"do { } while (false) | |||
5101 | " ";do { } while (false) | |||
5102 | Solution[i]->print(dbgs());do { } while (false) | |||
5103 | dbgs() << '\n';do { } while (false) | |||
5104 | })do { } while (false); | |||
5105 | ||||
5106 | assert(Solution.size() == Uses.size() && "Malformed solution!")((void)0); | |||
5107 | } | |||
5108 | ||||
5109 | /// Helper for AdjustInsertPositionForExpand. Climb up the dominator tree far as | |||
5110 | /// we can go while still being dominated by the input positions. This helps | |||
5111 | /// canonicalize the insert position, which encourages sharing. | |||
5112 | BasicBlock::iterator | |||
5113 | LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, | |||
5114 | const SmallVectorImpl<Instruction *> &Inputs) | |||
5115 | const { | |||
5116 | Instruction *Tentative = &*IP; | |||
5117 | while (true) { | |||
5118 | bool AllDominate = true; | |||
5119 | Instruction *BetterPos = nullptr; | |||
5120 | // Don't bother attempting to insert before a catchswitch, their basic block | |||
5121 | // cannot have other non-PHI instructions. | |||
5122 | if (isa<CatchSwitchInst>(Tentative)) | |||
5123 | return IP; | |||
5124 | ||||
5125 | for (Instruction *Inst : Inputs) { | |||
5126 | if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { | |||
5127 | AllDominate = false; | |||
5128 | break; | |||
5129 | } | |||
5130 | // Attempt to find an insert position in the middle of the block, | |||
5131 | // instead of at the end, so that it can be used for other expansions. | |||
5132 | if (Tentative->getParent() == Inst->getParent() && | |||
5133 | (!BetterPos || !DT.dominates(Inst, BetterPos))) | |||
5134 | BetterPos = &*std::next(BasicBlock::iterator(Inst)); | |||
5135 | } | |||
5136 | if (!AllDominate) | |||
5137 | break; | |||
5138 | if (BetterPos) | |||
5139 | IP = BetterPos->getIterator(); | |||
5140 | else | |||
5141 | IP = Tentative->getIterator(); | |||
5142 | ||||
5143 | const Loop *IPLoop = LI.getLoopFor(IP->getParent()); | |||
5144 | unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; | |||
5145 | ||||
5146 | BasicBlock *IDom; | |||
5147 | for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { | |||
5148 | if (!Rung) return IP; | |||
5149 | Rung = Rung->getIDom(); | |||
5150 | if (!Rung) return IP; | |||
5151 | IDom = Rung->getBlock(); | |||
5152 | ||||
5153 | // Don't climb into a loop though. | |||
5154 | const Loop *IDomLoop = LI.getLoopFor(IDom); | |||
5155 | unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; | |||
5156 | if (IDomDepth <= IPLoopDepth && | |||
5157 | (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) | |||
5158 | break; | |||
5159 | } | |||
5160 | ||||
5161 | Tentative = IDom->getTerminator(); | |||
5162 | } | |||
5163 | ||||
5164 | return IP; | |||
5165 | } | |||
5166 | ||||
5167 | /// Determine an input position which will be dominated by the operands and | |||
5168 | /// which will dominate the result. | |||
5169 | BasicBlock::iterator | |||
5170 | LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, | |||
5171 | const LSRFixup &LF, | |||
5172 | const LSRUse &LU, | |||
5173 | SCEVExpander &Rewriter) const { | |||
5174 | // Collect some instructions which must be dominated by the | |||
5175 | // expanding replacement. These must be dominated by any operands that | |||
5176 | // will be required in the expansion. | |||
5177 | SmallVector<Instruction *, 4> Inputs; | |||
5178 | if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) | |||
5179 | Inputs.push_back(I); | |||
5180 | if (LU.Kind == LSRUse::ICmpZero) | |||
5181 | if (Instruction *I = | |||
5182 | dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) | |||
5183 | Inputs.push_back(I); | |||
5184 | if (LF.PostIncLoops.count(L)) { | |||
5185 | if (LF.isUseFullyOutsideLoop(L)) | |||
5186 | Inputs.push_back(L->getLoopLatch()->getTerminator()); | |||
5187 | else | |||
5188 | Inputs.push_back(IVIncInsertPos); | |||
5189 | } | |||
5190 | // The expansion must also be dominated by the increment positions of any | |||
5191 | // loops it for which it is using post-inc mode. | |||
5192 | for (const Loop *PIL : LF.PostIncLoops) { | |||
5193 | if (PIL == L) continue; | |||
5194 | ||||
5195 | // Be dominated by the loop exit. | |||
5196 | SmallVector<BasicBlock *, 4> ExitingBlocks; | |||
5197 | PIL->getExitingBlocks(ExitingBlocks); | |||
5198 | if (!ExitingBlocks.empty()) { | |||
5199 | BasicBlock *BB = ExitingBlocks[0]; | |||
5200 | for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) | |||
5201 | BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); | |||
5202 | Inputs.push_back(BB->getTerminator()); | |||
5203 | } | |||
5204 | } | |||
5205 | ||||
5206 | assert(!isa<PHINode>(LowestIP) && !LowestIP->isEHPad()((void)0) | |||
5207 | && !isa<DbgInfoIntrinsic>(LowestIP) &&((void)0) | |||
5208 | "Insertion point must be a normal instruction")((void)0); | |||
5209 | ||||
5210 | // Then, climb up the immediate dominator tree as far as we can go while | |||
5211 | // still being dominated by the input positions. | |||
5212 | BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs); | |||
5213 | ||||
5214 | // Don't insert instructions before PHI nodes. | |||
5215 | while (isa<PHINode>(IP)) ++IP; | |||
5216 | ||||
5217 | // Ignore landingpad instructions. | |||
5218 | while (IP->isEHPad()) ++IP; | |||
5219 | ||||
5220 | // Ignore debug intrinsics. | |||
5221 | while (isa<DbgInfoIntrinsic>(IP)) ++IP; | |||
5222 | ||||
5223 | // Set IP below instructions recently inserted by SCEVExpander. This keeps the | |||
5224 | // IP consistent across expansions and allows the previously inserted | |||
5225 | // instructions to be reused by subsequent expansion. | |||
5226 | while (Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP) | |||
5227 | ++IP; | |||
5228 | ||||
5229 | return IP; | |||
5230 | } | |||
5231 | ||||
5232 | /// Emit instructions for the leading candidate expression for this LSRUse (this | |||
5233 | /// is called "expanding"). | |||
5234 | Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF, | |||
5235 | const Formula &F, BasicBlock::iterator IP, | |||
5236 | SCEVExpander &Rewriter, | |||
5237 | SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { | |||
5238 | if (LU.RigidFormula) | |||
5239 | return LF.OperandValToReplace; | |||
5240 | ||||
5241 | // Determine an input position which will be dominated by the operands and | |||
5242 | // which will dominate the result. | |||
5243 | IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter); | |||
5244 | Rewriter.setInsertPoint(&*IP); | |||
5245 | ||||
5246 | // Inform the Rewriter if we have a post-increment use, so that it can | |||
5247 | // perform an advantageous expansion. | |||
5248 | Rewriter.setPostInc(LF.PostIncLoops); | |||
5249 | ||||
5250 | // This is the type that the user actually needs. | |||
5251 | Type *OpTy = LF.OperandValToReplace->getType(); | |||
5252 | // This will be the type that we'll initially expand to. | |||
5253 | Type *Ty = F.getType(); | |||
5254 | if (!Ty) | |||
5255 | // No type known; just expand directly to the ultimate type. | |||
5256 | Ty = OpTy; | |||
5257 | else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) | |||
5258 | // Expand directly to the ultimate type if it's the right size. | |||
5259 | Ty = OpTy; | |||
5260 | // This is the type to do integer arithmetic in. | |||
5261 | Type *IntTy = SE.getEffectiveSCEVType(Ty); | |||
5262 | ||||
5263 | // Build up a list of operands to add together to form the full base. | |||
5264 | SmallVector<const SCEV *, 8> Ops; | |||
5265 | ||||
5266 | // Expand the BaseRegs portion. | |||
5267 | for (const SCEV *Reg : F.BaseRegs) { | |||
5268 | assert(!Reg->isZero() && "Zero allocated in a base register!")((void)0); | |||
5269 | ||||
5270 | // If we're expanding for a post-inc user, make the post-inc adjustment. | |||
5271 | Reg = denormalizeForPostIncUse(Reg, LF.PostIncLoops, SE); | |||
5272 | Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr))); | |||
5273 | } | |||
5274 | ||||
5275 | // Expand the ScaledReg portion. | |||
5276 | Value *ICmpScaledV = nullptr; | |||
5277 | if (F.Scale != 0) { | |||
5278 | const SCEV *ScaledS = F.ScaledReg; | |||
5279 | ||||
5280 | // If we're expanding for a post-inc user, make the post-inc adjustment. | |||
5281 | PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); | |||
5282 | ScaledS = denormalizeForPostIncUse(ScaledS, Loops, SE); | |||
5283 | ||||
5284 | if (LU.Kind == LSRUse::ICmpZero) { | |||
5285 | // Expand ScaleReg as if it was part of the base regs. | |||
5286 | if (F.Scale == 1) | |||
5287 | Ops.push_back( | |||
5288 | SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr))); | |||
5289 | else { | |||
5290 | // An interesting way of "folding" with an icmp is to use a negated | |||
5291 | // scale, which we'll implement by inserting it into the other operand | |||
5292 | // of the icmp. | |||
5293 | assert(F.Scale == -1 &&((void)0) | |||
5294 | "The only scale supported by ICmpZero uses is -1!")((void)0); | |||
5295 | ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr); | |||
5296 | } | |||
5297 | } else { | |||
5298 | // Otherwise just expand the scaled register and an explicit scale, | |||
5299 | // which is expected to be matched as part of the address. | |||
5300 | ||||
5301 | // Flush the operand list to suppress SCEVExpander hoisting address modes. | |||
5302 | // Unless the addressing mode will not be folded. | |||
5303 | if (!Ops.empty() && LU.Kind == LSRUse::Address && | |||
5304 | isAMCompletelyFolded(TTI, LU, F)) { | |||
5305 | Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), nullptr); | |||
5306 | Ops.clear(); | |||
5307 | Ops.push_back(SE.getUnknown(FullV)); | |||
5308 | } | |||
5309 | ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr)); | |||
5310 | if (F.Scale != 1) | |||
5311 | ScaledS = | |||
5312 | SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale)); | |||
5313 | Ops.push_back(ScaledS); | |||
5314 | } | |||
5315 | } | |||
5316 | ||||
5317 | // Expand the GV portion. | |||
5318 | if (F.BaseGV) { | |||
5319 | // Flush the operand list to suppress SCEVExpander hoisting. | |||
5320 | if (!Ops.empty()) { | |||
5321 | Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), IntTy); | |||
5322 | Ops.clear(); | |||
5323 | Ops.push_back(SE.getUnknown(FullV)); | |||
5324 | } | |||
5325 | Ops.push_back(SE.getUnknown(F.BaseGV)); | |||
5326 | } | |||
5327 | ||||
5328 | // Flush the operand list to suppress SCEVExpander hoisting of both folded and | |||
5329 | // unfolded offsets. LSR assumes they both live next to their uses. | |||
5330 | if (!Ops.empty()) { | |||
5331 | Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty); | |||
5332 | Ops.clear(); | |||
5333 | Ops.push_back(SE.getUnknown(FullV)); | |||
5334 | } | |||
5335 | ||||
5336 | // Expand the immediate portion. | |||
5337 | int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset; | |||
5338 | if (Offset != 0) { | |||
5339 | if (LU.Kind == LSRUse::ICmpZero) { | |||
5340 | // The other interesting way of "folding" with an ICmpZero is to use a | |||
5341 | // negated immediate. | |||
5342 | if (!ICmpScaledV) | |||
5343 | ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset); | |||
5344 | else { | |||
5345 | Ops.push_back(SE.getUnknown(ICmpScaledV)); | |||
5346 | ICmpScaledV = ConstantInt::get(IntTy, Offset); | |||
5347 | } | |||
5348 | } else { | |||
5349 | // Just add the immediate values. These again are expected to be matched | |||
5350 | // as part of the address. | |||
5351 | Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); | |||
5352 | } | |||
5353 | } | |||
5354 | ||||
5355 | // Expand the unfolded offset portion. | |||
5356 | int64_t UnfoldedOffset = F.UnfoldedOffset; | |||
5357 | if (UnfoldedOffset != 0) { | |||
5358 | // Just add the immediate values. | |||
5359 | Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, | |||
5360 | UnfoldedOffset))); | |||
5361 | } | |||
5362 | ||||
5363 | // Emit instructions summing all the operands. | |||
5364 | const SCEV *FullS = Ops.empty() ? | |||
5365 | SE.getConstant(IntTy, 0) : | |||
5366 | SE.getAddExpr(Ops); | |||
5367 | Value *FullV = Rewriter.expandCodeFor(FullS, Ty); | |||
5368 | ||||
5369 | // We're done expanding now, so reset the rewriter. | |||
5370 | Rewriter.clearPostInc(); | |||
5371 | ||||
5372 | // An ICmpZero Formula represents an ICmp which we're handling as a | |||
5373 | // comparison against zero. Now that we've expanded an expression for that | |||
5374 | // form, update the ICmp's other operand. | |||
5375 | if (LU.Kind == LSRUse::ICmpZero) { | |||
5376 | ICmpInst *CI = cast<ICmpInst>(LF.UserInst); | |||
5377 | if (auto *OperandIsInstr = dyn_cast<Instruction>(CI->getOperand(1))) | |||
5378 | DeadInsts.emplace_back(OperandIsInstr); | |||
5379 | assert(!F.BaseGV && "ICmp does not support folding a global value and "((void)0) | |||
5380 | "a scale at the same time!")((void)0); | |||
5381 | if (F.Scale == -1) { | |||
5382 | if (ICmpScaledV->getType() != OpTy) { | |||
5383 | Instruction *Cast = | |||
5384 | CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, | |||
5385 | OpTy, false), | |||
5386 | ICmpScaledV, OpTy, "tmp", CI); | |||
5387 | ICmpScaledV = Cast; | |||
5388 | } | |||
5389 | CI->setOperand(1, ICmpScaledV); | |||
5390 | } else { | |||
5391 | // A scale of 1 means that the scale has been expanded as part of the | |||
5392 | // base regs. | |||
5393 | assert((F.Scale == 0 || F.Scale == 1) &&((void)0) | |||
5394 | "ICmp does not support folding a global value and "((void)0) | |||
5395 | "a scale at the same time!")((void)0); | |||
5396 | Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), | |||
5397 | -(uint64_t)Offset); | |||
5398 | if (C->getType() != OpTy) | |||
5399 | C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, | |||
5400 | OpTy, false), | |||
5401 | C, OpTy); | |||
5402 | ||||
5403 | CI->setOperand(1, C); | |||
5404 | } | |||
5405 | } | |||
5406 | ||||
5407 | return FullV; | |||
5408 | } | |||
5409 | ||||
5410 | /// Helper for Rewrite. PHI nodes are special because the use of their operands | |||
5411 | /// effectively happens in their predecessor blocks, so the expression may need | |||
5412 | /// to be expanded in multiple places. | |||
5413 | void LSRInstance::RewriteForPHI( | |||
5414 | PHINode *PN, const LSRUse &LU, const LSRFixup &LF, const Formula &F, | |||
5415 | SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { | |||
5416 | DenseMap<BasicBlock *, Value *> Inserted; | |||
5417 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) | |||
5418 | if (PN->getIncomingValue(i) == LF.OperandValToReplace) { | |||
5419 | bool needUpdateFixups = false; | |||
5420 | BasicBlock *BB = PN->getIncomingBlock(i); | |||
5421 | ||||
5422 | // If this is a critical edge, split the edge so that we do not insert | |||
5423 | // the code on all predecessor/successor paths. We do this unless this | |||
5424 | // is the canonical backedge for this loop, which complicates post-inc | |||
5425 | // users. | |||
5426 | if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && | |||
5427 | !isa<IndirectBrInst>(BB->getTerminator()) && | |||
5428 | !isa<CatchSwitchInst>(BB->getTerminator())) { | |||
5429 | BasicBlock *Parent = PN->getParent(); | |||
5430 | Loop *PNLoop = LI.getLoopFor(Parent); | |||
5431 | if (!PNLoop || Parent != PNLoop->getHeader()) { | |||
5432 | // Split the critical edge. | |||
5433 | BasicBlock *NewBB = nullptr; | |||
5434 | if (!Parent->isLandingPad()) { | |||
5435 | NewBB = | |||
5436 | SplitCriticalEdge(BB, Parent, | |||
5437 | CriticalEdgeSplittingOptions(&DT, &LI, MSSAU) | |||
5438 | .setMergeIdenticalEdges() | |||
5439 | .setKeepOneInputPHIs()); | |||
5440 | } else { | |||
5441 | SmallVector<BasicBlock*, 2> NewBBs; | |||
5442 | SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs, &DT, &LI); | |||
5443 | NewBB = NewBBs[0]; | |||
5444 | } | |||
5445 | // If NewBB==NULL, then SplitCriticalEdge refused to split because all | |||
5446 | // phi predecessors are identical. The simple thing to do is skip | |||
5447 | // splitting in this case rather than complicate the API. | |||
5448 | if (NewBB) { | |||
5449 | // If PN is outside of the loop and BB is in the loop, we want to | |||
5450 | // move the block to be immediately before the PHI block, not | |||
5451 | // immediately after BB. | |||
5452 | if (L->contains(BB) && !L->contains(PN)) | |||
5453 | NewBB->moveBefore(PN->getParent()); | |||
5454 | ||||
5455 | // Splitting the edge can reduce the number of PHI entries we have. | |||
5456 | e = PN->getNumIncomingValues(); | |||
5457 | BB = NewBB; | |||
5458 | i = PN->getBasicBlockIndex(BB); | |||
5459 | ||||
5460 | needUpdateFixups = true; | |||
5461 | } | |||
5462 | } | |||
5463 | } | |||
5464 | ||||
5465 | std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = | |||
5466 | Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr))); | |||
5467 | if (!Pair.second) | |||
5468 | PN->setIncomingValue(i, Pair.first->second); | |||
5469 | else { | |||
5470 | Value *FullV = Expand(LU, LF, F, BB->getTerminator()->getIterator(), | |||
5471 | Rewriter, DeadInsts); | |||
5472 | ||||
5473 | // If this is reuse-by-noop-cast, insert the noop cast. | |||
5474 | Type *OpTy = LF.OperandValToReplace->getType(); | |||
5475 | if (FullV->getType() != OpTy) | |||
5476 | FullV = | |||
5477 | CastInst::Create(CastInst::getCastOpcode(FullV, false, | |||
5478 | OpTy, false), | |||
5479 | FullV, LF.OperandValToReplace->getType(), | |||
5480 | "tmp", BB->getTerminator()); | |||
5481 | ||||
5482 | PN->setIncomingValue(i, FullV); | |||
5483 | Pair.first->second = FullV; | |||
5484 | } | |||
5485 | ||||
5486 | // If LSR splits critical edge and phi node has other pending | |||
5487 | // fixup operands, we need to update those pending fixups. Otherwise | |||
5488 | // formulae will not be implemented completely and some instructions | |||
5489 | // will not be eliminated. | |||
5490 | if (needUpdateFixups) { | |||
5491 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) | |||
5492 | for (LSRFixup &Fixup : Uses[LUIdx].Fixups) | |||
5493 | // If fixup is supposed to rewrite some operand in the phi | |||
5494 | // that was just updated, it may be already moved to | |||
5495 | // another phi node. Such fixup requires update. | |||
5496 | if (Fixup.UserInst == PN) { | |||
5497 | // Check if the operand we try to replace still exists in the | |||
5498 | // original phi. | |||
5499 | bool foundInOriginalPHI = false; | |||
5500 | for (const auto &val : PN->incoming_values()) | |||
5501 | if (val == Fixup.OperandValToReplace) { | |||
5502 | foundInOriginalPHI = true; | |||
5503 | break; | |||
5504 | } | |||
5505 | ||||
5506 | // If fixup operand found in original PHI - nothing to do. | |||
5507 | if (foundInOriginalPHI) | |||
5508 | continue; | |||
5509 | ||||
5510 | // Otherwise it might be moved to another PHI and requires update. | |||
5511 | // If fixup operand not found in any of the incoming blocks that | |||
5512 | // means we have already rewritten it - nothing to do. | |||
5513 | for (const auto &Block : PN->blocks()) | |||
5514 | for (BasicBlock::iterator I = Block->begin(); isa<PHINode>(I); | |||
5515 | ++I) { | |||
5516 | PHINode *NewPN = cast<PHINode>(I); | |||
5517 | for (const auto &val : NewPN->incoming_values()) | |||
5518 | if (val == Fixup.OperandValToReplace) | |||
5519 | Fixup.UserInst = NewPN; | |||
5520 | } | |||
5521 | } | |||
5522 | } | |||
5523 | } | |||
5524 | } | |||
5525 | ||||
5526 | /// Emit instructions for the leading candidate expression for this LSRUse (this | |||
5527 | /// is called "expanding"), and update the UserInst to reference the newly | |||
5528 | /// expanded value. | |||
5529 | void LSRInstance::Rewrite(const LSRUse &LU, const LSRFixup &LF, | |||
5530 | const Formula &F, SCEVExpander &Rewriter, | |||
5531 | SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { | |||
5532 | // First, find an insertion point that dominates UserInst. For PHI nodes, | |||
5533 | // find the nearest block which dominates all the relevant uses. | |||
5534 | if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { | |||
5535 | RewriteForPHI(PN, LU, LF, F, Rewriter, DeadInsts); | |||
5536 | } else { | |||
5537 | Value *FullV = | |||
5538 | Expand(LU, LF, F, LF.UserInst->getIterator(), Rewriter, DeadInsts); | |||
5539 | ||||
5540 | // If this is reuse-by-noop-cast, insert the noop cast. | |||
5541 | Type *OpTy = LF.OperandValToReplace->getType(); | |||
5542 | if (FullV->getType() != OpTy) { | |||
5543 | Instruction *Cast = | |||
5544 | CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), | |||
5545 | FullV, OpTy, "tmp", LF.UserInst); | |||
5546 | FullV = Cast; | |||
5547 | } | |||
5548 | ||||
5549 | // Update the user. ICmpZero is handled specially here (for now) because | |||
5550 | // Expand may have updated one of the operands of the icmp already, and | |||
5551 | // its new value may happen to be equal to LF.OperandValToReplace, in | |||
5552 | // which case doing replaceUsesOfWith leads to replacing both operands | |||
5553 | // with the same value. TODO: Reorganize this. | |||
5554 | if (LU.Kind == LSRUse::ICmpZero) | |||
5555 | LF.UserInst->setOperand(0, FullV); | |||
5556 | else | |||
5557 | LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); | |||
5558 | } | |||
5559 | ||||
5560 | if (auto *OperandIsInstr = dyn_cast<Instruction>(LF.OperandValToReplace)) | |||
5561 | DeadInsts.emplace_back(OperandIsInstr); | |||
5562 | } | |||
5563 | ||||
5564 | /// Rewrite all the fixup locations with new values, following the chosen | |||
5565 | /// solution. | |||
5566 | void LSRInstance::ImplementSolution( | |||
5567 | const SmallVectorImpl<const Formula *> &Solution) { | |||
5568 | // Keep track of instructions we may have made dead, so that | |||
5569 | // we can remove them after we are done working. | |||
5570 | SmallVector<WeakTrackingVH, 16> DeadInsts; | |||
5571 | ||||
5572 | SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(), "lsr", | |||
5573 | false); | |||
5574 | #ifndef NDEBUG1 | |||
5575 | Rewriter.setDebugType(DEBUG_TYPE"loop-reduce"); | |||
5576 | #endif | |||
5577 | Rewriter.disableCanonicalMode(); | |||
5578 | Rewriter.enableLSRMode(); | |||
5579 | Rewriter.setIVIncInsertPos(L, IVIncInsertPos); | |||
5580 | ||||
5581 | // Mark phi nodes that terminate chains so the expander tries to reuse them. | |||
5582 | for (const IVChain &Chain : IVChainVec) { | |||
5583 | if (PHINode *PN = dyn_cast<PHINode>(Chain.tailUserInst())) | |||
5584 | Rewriter.setChainedPhi(PN); | |||
5585 | } | |||
5586 | ||||
5587 | // Expand the new value definitions and update the users. | |||
5588 | for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) | |||
5589 | for (const LSRFixup &Fixup : Uses[LUIdx].Fixups) { | |||
5590 | Rewrite(Uses[LUIdx], Fixup, *Solution[LUIdx], Rewriter, DeadInsts); | |||
5591 | Changed = true; | |||
5592 | } | |||
5593 | ||||
5594 | for (const IVChain &Chain : IVChainVec) { | |||
5595 | GenerateIVChain(Chain, Rewriter, DeadInsts); | |||
5596 | Changed = true; | |||
5597 | } | |||
5598 | ||||
5599 | for (const WeakVH &IV : Rewriter.getInsertedIVs()) | |||
5600 | if (IV && dyn_cast<Instruction>(&*IV)->getParent()) | |||
| ||||
5601 | ScalarEvolutionIVs.push_back(IV); | |||
5602 | ||||
5603 | // Clean up after ourselves. This must be done before deleting any | |||
5604 | // instructions. | |||
5605 | Rewriter.clear(); | |||
5606 | ||||
5607 | Changed |= RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts, | |||
5608 | &TLI, MSSAU); | |||
5609 | ||||
5610 | // In our cost analysis above, we assume that each addrec consumes exactly | |||
5611 | // one register, and arrange to have increments inserted just before the | |||
5612 | // latch to maximimize the chance this is true. However, if we reused | |||
5613 | // existing IVs, we now need to move the increments to match our | |||
5614 | // expectations. Otherwise, our cost modeling results in us having a | |||
5615 | // chosen a non-optimal result for the actual schedule. (And yes, this | |||
5616 | // scheduling decision does impact later codegen.) | |||
5617 | for (PHINode &PN : L->getHeader()->phis()) { | |||
5618 | BinaryOperator *BO = nullptr; | |||
5619 | Value *Start = nullptr, *Step = nullptr; | |||
5620 | if (!matchSimpleRecurrence(&PN, BO, Start, Step)) | |||
5621 | continue; | |||
5622 | ||||
5623 | switch (BO->getOpcode()) { | |||
5624 | case Instruction::Sub: | |||
5625 | if (BO->getOperand(0) != &PN) | |||
5626 | // sub is non-commutative - match handling elsewhere in LSR | |||
5627 | continue; | |||
5628 | break; | |||
5629 | case Instruction::Add: | |||
5630 | break; | |||
5631 | default: | |||
5632 | continue; | |||
5633 | }; | |||
5634 | ||||
5635 | if (!isa<Constant>(Step)) | |||
5636 | // If not a constant step, might increase register pressure | |||
5637 | // (We assume constants have been canonicalized to RHS) | |||
5638 | continue; | |||
5639 | ||||
5640 | if (BO->getParent() == IVIncInsertPos->getParent()) | |||
5641 | // Only bother moving across blocks. Isel can handle block local case. | |||
5642 | continue; | |||
5643 | ||||
5644 | // Can we legally schedule inc at the desired point? | |||
5645 | if (!llvm::all_of(BO->uses(), | |||
5646 | [&](Use &U) {return DT.dominates(IVIncInsertPos, U);})) | |||
5647 | continue; | |||
5648 | BO->moveBefore(IVIncInsertPos); | |||
5649 | Changed = true; | |||
5650 | } | |||
5651 | ||||
5652 | ||||
5653 | } | |||
5654 | ||||
5655 | LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, | |||
5656 | DominatorTree &DT, LoopInfo &LI, | |||
5657 | const TargetTransformInfo &TTI, AssumptionCache &AC, | |||
5658 | TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU) | |||
5659 | : IU(IU), SE(SE), DT(DT), LI(LI), AC(AC), TLI(TLI), TTI(TTI), L(L), | |||
5660 | MSSAU(MSSAU), AMK(PreferredAddresingMode.getNumOccurrences() > 0 ? | |||
5661 | PreferredAddresingMode : TTI.getPreferredAddressingMode(L, &SE)) { | |||
5662 | // If LoopSimplify form is not available, stay out of trouble. | |||
5663 | if (!L->isLoopSimplifyForm()) | |||
5664 | return; | |||
5665 | ||||
5666 | // If there's no interesting work to be done, bail early. | |||
5667 | if (IU.empty()) return; | |||
5668 | ||||
5669 | // If there's too much analysis to be done, bail early. We won't be able to | |||
5670 | // model the problem anyway. | |||
5671 | unsigned NumUsers = 0; | |||
5672 | for (const IVStrideUse &U : IU) { | |||
5673 | if (++NumUsers > MaxIVUsers) { | |||
5674 | (void)U; | |||
5675 | LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << Udo { } while (false) | |||
5676 | << "\n")do { } while (false); | |||
5677 | return; | |||
5678 | } | |||
5679 | // Bail out if we have a PHI on an EHPad that gets a value from a | |||
5680 | // CatchSwitchInst. Because the CatchSwitchInst cannot be split, there is | |||
5681 | // no good place to stick any instructions. | |||
5682 | if (auto *PN = dyn_cast<PHINode>(U.getUser())) { | |||
5683 | auto *FirstNonPHI = PN->getParent()->getFirstNonPHI(); | |||
5684 | if (isa<FuncletPadInst>(FirstNonPHI) || | |||
5685 | isa<CatchSwitchInst>(FirstNonPHI)) | |||
5686 | for (BasicBlock *PredBB : PN->blocks()) | |||
5687 | if (isa<CatchSwitchInst>(PredBB->getFirstNonPHI())) | |||
5688 | return; | |||
5689 | } | |||
5690 | } | |||
5691 | ||||
5692 | #ifndef NDEBUG1 | |||
5693 | // All dominating loops must have preheaders, or SCEVExpander may not be able | |||
5694 | // to materialize an AddRecExpr whose Start is an outer AddRecExpr. | |||
5695 | // | |||
5696 | // IVUsers analysis should only create users that are dominated by simple loop | |||
5697 | // headers. Since this loop should dominate all of its users, its user list | |||
5698 | // should be empty if this loop itself is not within a simple loop nest. | |||
5699 | for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader()); | |||
5700 | Rung; Rung = Rung->getIDom()) { | |||
5701 | BasicBlock *BB = Rung->getBlock(); | |||
5702 | const Loop *DomLoop = LI.getLoopFor(BB); | |||
5703 | if (DomLoop && DomLoop->getHeader() == BB) { | |||
5704 | assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest")((void)0); | |||
5705 | } | |||
5706 | } | |||
5707 | #endif // DEBUG | |||
5708 | ||||
5709 | LLVM_DEBUG(dbgs() << "\nLSR on loop ";do { } while (false) | |||
5710 | L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false);do { } while (false) | |||
5711 | dbgs() << ":\n")do { } while (false); | |||
5712 | ||||
5713 | // First, perform some low-level loop optimizations. | |||
5714 | OptimizeShadowIV(); | |||
5715 | OptimizeLoopTermCond(); | |||
5716 | ||||
5717 | // If loop preparation eliminates all interesting IV users, bail. | |||
5718 | if (IU.empty()) return; | |||
5719 | ||||
5720 | // Skip nested loops until we can model them better with formulae. | |||
5721 | if (!L->isInnermost()) { | |||
5722 | LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n")do { } while (false); | |||
5723 | return; | |||
5724 | } | |||
5725 | ||||
5726 | // Start collecting data and preparing for the solver. | |||
5727 | // If number of registers is not the major cost, we cannot benefit from the | |||
5728 | // current profitable chain optimization which is based on number of | |||
5729 | // registers. | |||
5730 | // FIXME: add profitable chain optimization for other kinds major cost, for | |||
5731 | // example number of instructions. | |||
5732 | if (TTI.isNumRegsMajorCostOfLSR() || StressIVChain) | |||
5733 | CollectChains(); | |||
5734 | CollectInterestingTypesAndFactors(); | |||
5735 | CollectFixupsAndInitialFormulae(); | |||
5736 | CollectLoopInvariantFixupsAndFormulae(); | |||
5737 | ||||
5738 | if (Uses.empty()) | |||
5739 | return; | |||
5740 | ||||
5741 | LLVM_DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";do { } while (false) | |||
5742 | print_uses(dbgs()))do { } while (false); | |||
5743 | ||||
5744 | // Now use the reuse data to generate a bunch of interesting ways | |||
5745 | // to formulate the values needed for the uses. | |||
5746 | GenerateAllReuseFormulae(); | |||
5747 | ||||
5748 | FilterOutUndesirableDedicatedRegisters(); | |||
5749 | NarrowSearchSpaceUsingHeuristics(); | |||
5750 | ||||
5751 | SmallVector<const Formula *, 8> Solution; | |||
5752 | Solve(Solution); | |||
5753 | ||||
5754 | // Release memory that is no longer needed. | |||
5755 | Factors.clear(); | |||
5756 | Types.clear(); | |||
5757 | RegUses.clear(); | |||
5758 | ||||
5759 | if (Solution.empty()) | |||
5760 | return; | |||
5761 | ||||
5762 | #ifndef NDEBUG1 | |||
5763 | // Formulae should be legal. | |||
5764 | for (const LSRUse &LU : Uses) { | |||
5765 | for (const Formula &F : LU.Formulae) | |||
5766 | assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,((void)0) | |||
5767 | F) && "Illegal formula generated!")((void)0); | |||
5768 | }; | |||
5769 | #endif | |||
5770 | ||||
5771 | // Now that we've decided what we want, make it so. | |||
5772 | ImplementSolution(Solution); | |||
5773 | } | |||
5774 | ||||
5775 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
5776 | void LSRInstance::print_factors_and_types(raw_ostream &OS) const { | |||
5777 | if (Factors.empty() && Types.empty()) return; | |||
5778 | ||||
5779 | OS << "LSR has identified the following interesting factors and types: "; | |||
5780 | bool First = true; | |||
5781 | ||||
5782 | for (int64_t Factor : Factors) { | |||
5783 | if (!First) OS << ", "; | |||
5784 | First = false; | |||
5785 | OS << '*' << Factor; | |||
5786 | } | |||
5787 | ||||
5788 | for (Type *Ty : Types) { | |||
5789 | if (!First) OS << ", "; | |||
5790 | First = false; | |||
5791 | OS << '(' << *Ty << ')'; | |||
5792 | } | |||
5793 | OS << '\n'; | |||
5794 | } | |||
5795 | ||||
5796 | void LSRInstance::print_fixups(raw_ostream &OS) const { | |||
5797 | OS << "LSR is examining the following fixup sites:\n"; | |||
5798 | for (const LSRUse &LU : Uses) | |||
5799 | for (const LSRFixup &LF : LU.Fixups) { | |||
5800 | dbgs() << " "; | |||
5801 | LF.print(OS); | |||
5802 | OS << '\n'; | |||
5803 | } | |||
5804 | } | |||
5805 | ||||
5806 | void LSRInstance::print_uses(raw_ostream &OS) const { | |||
5807 | OS << "LSR is examining the following uses:\n"; | |||
5808 | for (const LSRUse &LU : Uses) { | |||
5809 | dbgs() << " "; | |||
5810 | LU.print(OS); | |||
5811 | OS << '\n'; | |||
5812 | for (const Formula &F : LU.Formulae) { | |||
5813 | OS << " "; | |||
5814 | F.print(OS); | |||
5815 | OS << '\n'; | |||
5816 | } | |||
5817 | } | |||
5818 | } | |||
5819 | ||||
5820 | void LSRInstance::print(raw_ostream &OS) const { | |||
5821 | print_factors_and_types(OS); | |||
5822 | print_fixups(OS); | |||
5823 | print_uses(OS); | |||
5824 | } | |||
5825 | ||||
5826 | LLVM_DUMP_METHOD__attribute__((noinline)) void LSRInstance::dump() const { | |||
5827 | print(errs()); errs() << '\n'; | |||
5828 | } | |||
5829 | #endif | |||
5830 | ||||
5831 | namespace { | |||
5832 | ||||
5833 | class LoopStrengthReduce : public LoopPass { | |||
5834 | public: | |||
5835 | static char ID; // Pass ID, replacement for typeid | |||
5836 | ||||
5837 | LoopStrengthReduce(); | |||
5838 | ||||
5839 | private: | |||
5840 | bool runOnLoop(Loop *L, LPPassManager &LPM) override; | |||
5841 | void getAnalysisUsage(AnalysisUsage &AU) const override; | |||
5842 | }; | |||
5843 | ||||
5844 | } // end anonymous namespace | |||
5845 | ||||
5846 | LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) { | |||
5847 | initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); | |||
5848 | } | |||
5849 | ||||
5850 | void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { | |||
5851 | // We split critical edges, so we change the CFG. However, we do update | |||
5852 | // many analyses if they are around. | |||
5853 | AU.addPreservedID(LoopSimplifyID); | |||
5854 | ||||
5855 | AU.addRequired<LoopInfoWrapperPass>(); | |||
5856 | AU.addPreserved<LoopInfoWrapperPass>(); | |||
5857 | AU.addRequiredID(LoopSimplifyID); | |||
5858 | AU.addRequired<DominatorTreeWrapperPass>(); | |||
5859 | AU.addPreserved<DominatorTreeWrapperPass>(); | |||
5860 | AU.addRequired<ScalarEvolutionWrapperPass>(); | |||
5861 | AU.addPreserved<ScalarEvolutionWrapperPass>(); | |||
5862 | AU.addRequired<AssumptionCacheTracker>(); | |||
5863 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | |||
5864 | // Requiring LoopSimplify a second time here prevents IVUsers from running | |||
5865 | // twice, since LoopSimplify was invalidated by running ScalarEvolution. | |||
5866 | AU.addRequiredID(LoopSimplifyID); | |||
5867 | AU.addRequired<IVUsersWrapperPass>(); | |||
5868 | AU.addPreserved<IVUsersWrapperPass>(); | |||
5869 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
5870 | AU.addPreserved<MemorySSAWrapperPass>(); | |||
5871 | } | |||
5872 | ||||
5873 | struct SCEVDbgValueBuilder { | |||
5874 | SCEVDbgValueBuilder() = default; | |||
5875 | SCEVDbgValueBuilder(const SCEVDbgValueBuilder &Base) { | |||
5876 | Values = Base.Values; | |||
5877 | Expr = Base.Expr; | |||
5878 | } | |||
5879 | ||||
5880 | /// The DIExpression as we translate the SCEV. | |||
5881 | SmallVector<uint64_t, 6> Expr; | |||
5882 | /// The location ops of the DIExpression. | |||
5883 | SmallVector<llvm::ValueAsMetadata *, 2> Values; | |||
5884 | ||||
5885 | void pushOperator(uint64_t Op) { Expr.push_back(Op); } | |||
5886 | void pushUInt(uint64_t Operand) { Expr.push_back(Operand); } | |||
5887 | ||||
5888 | /// Add a DW_OP_LLVM_arg to the expression, followed by the index of the value | |||
5889 | /// in the set of values referenced by the expression. | |||
5890 | void pushValue(llvm::Value *V) { | |||
5891 | Expr.push_back(llvm::dwarf::DW_OP_LLVM_arg); | |||
5892 | auto *It = | |||
5893 | std::find(Values.begin(), Values.end(), llvm::ValueAsMetadata::get(V)); | |||
5894 | unsigned ArgIndex = 0; | |||
5895 | if (It != Values.end()) { | |||
5896 | ArgIndex = std::distance(Values.begin(), It); | |||
5897 | } else { | |||
5898 | ArgIndex = Values.size(); | |||
5899 | Values.push_back(llvm::ValueAsMetadata::get(V)); | |||
5900 | } | |||
5901 | Expr.push_back(ArgIndex); | |||
5902 | } | |||
5903 | ||||
5904 | void pushValue(const SCEVUnknown *U) { | |||
5905 | llvm::Value *V = cast<SCEVUnknown>(U)->getValue(); | |||
5906 | pushValue(V); | |||
5907 | } | |||
5908 | ||||
5909 | bool pushConst(const SCEVConstant *C) { | |||
5910 | if (C->getAPInt().getMinSignedBits() > 64) | |||
5911 | return false; | |||
5912 | Expr.push_back(llvm::dwarf::DW_OP_consts); | |||
5913 | Expr.push_back(C->getAPInt().getSExtValue()); | |||
5914 | return true; | |||