File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Analysis/ScalarEvolution.cpp |
Warning: | line 10451, column 35 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file contains the implementation of the scalar evolution analysis | ||||
10 | // engine, which is used primarily to analyze expressions involving induction | ||||
11 | // variables in loops. | ||||
12 | // | ||||
13 | // There are several aspects to this library. First is the representation of | ||||
14 | // scalar expressions, which are represented as subclasses of the SCEV class. | ||||
15 | // These classes are used to represent certain types of subexpressions that we | ||||
16 | // can handle. We only create one SCEV of a particular shape, so | ||||
17 | // pointer-comparisons for equality are legal. | ||||
18 | // | ||||
19 | // One important aspect of the SCEV objects is that they are never cyclic, even | ||||
20 | // if there is a cycle in the dataflow for an expression (ie, a PHI node). If | ||||
21 | // the PHI node is one of the idioms that we can represent (e.g., a polynomial | ||||
22 | // recurrence) then we represent it directly as a recurrence node, otherwise we | ||||
23 | // represent it as a SCEVUnknown node. | ||||
24 | // | ||||
25 | // In addition to being able to represent expressions of various types, we also | ||||
26 | // have folders that are used to build the *canonical* representation for a | ||||
27 | // particular expression. These folders are capable of using a variety of | ||||
28 | // rewrite rules to simplify the expressions. | ||||
29 | // | ||||
30 | // Once the folders are defined, we can implement the more interesting | ||||
31 | // higher-level code, such as the code that recognizes PHI nodes of various | ||||
32 | // types, computes the execution count of a loop, etc. | ||||
33 | // | ||||
34 | // TODO: We should use these routines and value representations to implement | ||||
35 | // dependence analysis! | ||||
36 | // | ||||
37 | //===----------------------------------------------------------------------===// | ||||
38 | // | ||||
39 | // There are several good references for the techniques used in this analysis. | ||||
40 | // | ||||
41 | // Chains of recurrences -- a method to expedite the evaluation | ||||
42 | // of closed-form functions | ||||
43 | // Olaf Bachmann, Paul S. Wang, Eugene V. Zima | ||||
44 | // | ||||
45 | // On computational properties of chains of recurrences | ||||
46 | // Eugene V. Zima | ||||
47 | // | ||||
48 | // Symbolic Evaluation of Chains of Recurrences for Loop Optimization | ||||
49 | // Robert A. van Engelen | ||||
50 | // | ||||
51 | // Efficient Symbolic Analysis for Optimizing Compilers | ||||
52 | // Robert A. van Engelen | ||||
53 | // | ||||
54 | // Using the chains of recurrences algebra for data dependence testing and | ||||
55 | // induction variable substitution | ||||
56 | // MS Thesis, Johnie Birch | ||||
57 | // | ||||
58 | //===----------------------------------------------------------------------===// | ||||
59 | |||||
60 | #include "llvm/Analysis/ScalarEvolution.h" | ||||
61 | #include "llvm/ADT/APInt.h" | ||||
62 | #include "llvm/ADT/ArrayRef.h" | ||||
63 | #include "llvm/ADT/DenseMap.h" | ||||
64 | #include "llvm/ADT/DepthFirstIterator.h" | ||||
65 | #include "llvm/ADT/EquivalenceClasses.h" | ||||
66 | #include "llvm/ADT/FoldingSet.h" | ||||
67 | #include "llvm/ADT/None.h" | ||||
68 | #include "llvm/ADT/Optional.h" | ||||
69 | #include "llvm/ADT/STLExtras.h" | ||||
70 | #include "llvm/ADT/ScopeExit.h" | ||||
71 | #include "llvm/ADT/Sequence.h" | ||||
72 | #include "llvm/ADT/SetVector.h" | ||||
73 | #include "llvm/ADT/SmallPtrSet.h" | ||||
74 | #include "llvm/ADT/SmallSet.h" | ||||
75 | #include "llvm/ADT/SmallVector.h" | ||||
76 | #include "llvm/ADT/Statistic.h" | ||||
77 | #include "llvm/ADT/StringRef.h" | ||||
78 | #include "llvm/Analysis/AssumptionCache.h" | ||||
79 | #include "llvm/Analysis/ConstantFolding.h" | ||||
80 | #include "llvm/Analysis/InstructionSimplify.h" | ||||
81 | #include "llvm/Analysis/LoopInfo.h" | ||||
82 | #include "llvm/Analysis/ScalarEvolutionDivision.h" | ||||
83 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | ||||
84 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||
85 | #include "llvm/Analysis/ValueTracking.h" | ||||
86 | #include "llvm/Config/llvm-config.h" | ||||
87 | #include "llvm/IR/Argument.h" | ||||
88 | #include "llvm/IR/BasicBlock.h" | ||||
89 | #include "llvm/IR/CFG.h" | ||||
90 | #include "llvm/IR/Constant.h" | ||||
91 | #include "llvm/IR/ConstantRange.h" | ||||
92 | #include "llvm/IR/Constants.h" | ||||
93 | #include "llvm/IR/DataLayout.h" | ||||
94 | #include "llvm/IR/DerivedTypes.h" | ||||
95 | #include "llvm/IR/Dominators.h" | ||||
96 | #include "llvm/IR/Function.h" | ||||
97 | #include "llvm/IR/GlobalAlias.h" | ||||
98 | #include "llvm/IR/GlobalValue.h" | ||||
99 | #include "llvm/IR/GlobalVariable.h" | ||||
100 | #include "llvm/IR/InstIterator.h" | ||||
101 | #include "llvm/IR/InstrTypes.h" | ||||
102 | #include "llvm/IR/Instruction.h" | ||||
103 | #include "llvm/IR/Instructions.h" | ||||
104 | #include "llvm/IR/IntrinsicInst.h" | ||||
105 | #include "llvm/IR/Intrinsics.h" | ||||
106 | #include "llvm/IR/LLVMContext.h" | ||||
107 | #include "llvm/IR/Metadata.h" | ||||
108 | #include "llvm/IR/Operator.h" | ||||
109 | #include "llvm/IR/PatternMatch.h" | ||||
110 | #include "llvm/IR/Type.h" | ||||
111 | #include "llvm/IR/Use.h" | ||||
112 | #include "llvm/IR/User.h" | ||||
113 | #include "llvm/IR/Value.h" | ||||
114 | #include "llvm/IR/Verifier.h" | ||||
115 | #include "llvm/InitializePasses.h" | ||||
116 | #include "llvm/Pass.h" | ||||
117 | #include "llvm/Support/Casting.h" | ||||
118 | #include "llvm/Support/CommandLine.h" | ||||
119 | #include "llvm/Support/Compiler.h" | ||||
120 | #include "llvm/Support/Debug.h" | ||||
121 | #include "llvm/Support/ErrorHandling.h" | ||||
122 | #include "llvm/Support/KnownBits.h" | ||||
123 | #include "llvm/Support/SaveAndRestore.h" | ||||
124 | #include "llvm/Support/raw_ostream.h" | ||||
125 | #include <algorithm> | ||||
126 | #include <cassert> | ||||
127 | #include <climits> | ||||
128 | #include <cstddef> | ||||
129 | #include <cstdint> | ||||
130 | #include <cstdlib> | ||||
131 | #include <map> | ||||
132 | #include <memory> | ||||
133 | #include <tuple> | ||||
134 | #include <utility> | ||||
135 | #include <vector> | ||||
136 | |||||
137 | using namespace llvm; | ||||
138 | using namespace PatternMatch; | ||||
139 | |||||
140 | #define DEBUG_TYPE"scalar-evolution" "scalar-evolution" | ||||
141 | |||||
142 | STATISTIC(NumArrayLenItCounts,static llvm::Statistic NumArrayLenItCounts = {"scalar-evolution" , "NumArrayLenItCounts", "Number of trip counts computed with array length" } | ||||
143 | "Number of trip counts computed with array length")static llvm::Statistic NumArrayLenItCounts = {"scalar-evolution" , "NumArrayLenItCounts", "Number of trip counts computed with array length" }; | ||||
144 | STATISTIC(NumTripCountsComputed,static llvm::Statistic NumTripCountsComputed = {"scalar-evolution" , "NumTripCountsComputed", "Number of loops with predictable loop counts" } | ||||
145 | "Number of loops with predictable loop counts")static llvm::Statistic NumTripCountsComputed = {"scalar-evolution" , "NumTripCountsComputed", "Number of loops with predictable loop counts" }; | ||||
146 | STATISTIC(NumTripCountsNotComputed,static llvm::Statistic NumTripCountsNotComputed = {"scalar-evolution" , "NumTripCountsNotComputed", "Number of loops without predictable loop counts" } | ||||
147 | "Number of loops without predictable loop counts")static llvm::Statistic NumTripCountsNotComputed = {"scalar-evolution" , "NumTripCountsNotComputed", "Number of loops without predictable loop counts" }; | ||||
148 | STATISTIC(NumBruteForceTripCountsComputed,static llvm::Statistic NumBruteForceTripCountsComputed = {"scalar-evolution" , "NumBruteForceTripCountsComputed", "Number of loops with trip counts computed by force" } | ||||
149 | "Number of loops with trip counts computed by force")static llvm::Statistic NumBruteForceTripCountsComputed = {"scalar-evolution" , "NumBruteForceTripCountsComputed", "Number of loops with trip counts computed by force" }; | ||||
150 | |||||
151 | static cl::opt<unsigned> | ||||
152 | MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, | ||||
153 | cl::ZeroOrMore, | ||||
154 | cl::desc("Maximum number of iterations SCEV will " | ||||
155 | "symbolically execute a constant " | ||||
156 | "derived loop"), | ||||
157 | cl::init(100)); | ||||
158 | |||||
159 | // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. | ||||
160 | static cl::opt<bool> VerifySCEV( | ||||
161 | "verify-scev", cl::Hidden, | ||||
162 | cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); | ||||
163 | static cl::opt<bool> VerifySCEVStrict( | ||||
164 | "verify-scev-strict", cl::Hidden, | ||||
165 | cl::desc("Enable stricter verification with -verify-scev is passed")); | ||||
166 | static cl::opt<bool> | ||||
167 | VerifySCEVMap("verify-scev-maps", cl::Hidden, | ||||
168 | cl::desc("Verify no dangling value in ScalarEvolution's " | ||||
169 | "ExprValueMap (slow)")); | ||||
170 | |||||
171 | static cl::opt<bool> VerifyIR( | ||||
172 | "scev-verify-ir", cl::Hidden, | ||||
173 | cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), | ||||
174 | cl::init(false)); | ||||
175 | |||||
176 | static cl::opt<unsigned> MulOpsInlineThreshold( | ||||
177 | "scev-mulops-inline-threshold", cl::Hidden, | ||||
178 | cl::desc("Threshold for inlining multiplication operands into a SCEV"), | ||||
179 | cl::init(32)); | ||||
180 | |||||
181 | static cl::opt<unsigned> AddOpsInlineThreshold( | ||||
182 | "scev-addops-inline-threshold", cl::Hidden, | ||||
183 | cl::desc("Threshold for inlining addition operands into a SCEV"), | ||||
184 | cl::init(500)); | ||||
185 | |||||
186 | static cl::opt<unsigned> MaxSCEVCompareDepth( | ||||
187 | "scalar-evolution-max-scev-compare-depth", cl::Hidden, | ||||
188 | cl::desc("Maximum depth of recursive SCEV complexity comparisons"), | ||||
189 | cl::init(32)); | ||||
190 | |||||
191 | static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( | ||||
192 | "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, | ||||
193 | cl::desc("Maximum depth of recursive SCEV operations implication analysis"), | ||||
194 | cl::init(2)); | ||||
195 | |||||
196 | static cl::opt<unsigned> MaxValueCompareDepth( | ||||
197 | "scalar-evolution-max-value-compare-depth", cl::Hidden, | ||||
198 | cl::desc("Maximum depth of recursive value complexity comparisons"), | ||||
199 | cl::init(2)); | ||||
200 | |||||
201 | static cl::opt<unsigned> | ||||
202 | MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, | ||||
203 | cl::desc("Maximum depth of recursive arithmetics"), | ||||
204 | cl::init(32)); | ||||
205 | |||||
206 | static cl::opt<unsigned> MaxConstantEvolvingDepth( | ||||
207 | "scalar-evolution-max-constant-evolving-depth", cl::Hidden, | ||||
208 | cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); | ||||
209 | |||||
210 | static cl::opt<unsigned> | ||||
211 | MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, | ||||
212 | cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), | ||||
213 | cl::init(8)); | ||||
214 | |||||
215 | static cl::opt<unsigned> | ||||
216 | MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, | ||||
217 | cl::desc("Max coefficients in AddRec during evolving"), | ||||
218 | cl::init(8)); | ||||
219 | |||||
220 | static cl::opt<unsigned> | ||||
221 | HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, | ||||
222 | cl::desc("Size of the expression which is considered huge"), | ||||
223 | cl::init(4096)); | ||||
224 | |||||
225 | static cl::opt<bool> | ||||
226 | ClassifyExpressions("scalar-evolution-classify-expressions", | ||||
227 | cl::Hidden, cl::init(true), | ||||
228 | cl::desc("When printing analysis, include information on every instruction")); | ||||
229 | |||||
230 | static cl::opt<bool> UseExpensiveRangeSharpening( | ||||
231 | "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, | ||||
232 | cl::init(false), | ||||
233 | cl::desc("Use more powerful methods of sharpening expression ranges. May " | ||||
234 | "be costly in terms of compile time")); | ||||
235 | |||||
236 | //===----------------------------------------------------------------------===// | ||||
237 | // SCEV class definitions | ||||
238 | //===----------------------------------------------------------------------===// | ||||
239 | |||||
240 | //===----------------------------------------------------------------------===// | ||||
241 | // Implementation of the SCEV class. | ||||
242 | // | ||||
243 | |||||
244 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | ||||
245 | LLVM_DUMP_METHOD__attribute__((noinline)) void SCEV::dump() const { | ||||
246 | print(dbgs()); | ||||
247 | dbgs() << '\n'; | ||||
248 | } | ||||
249 | #endif | ||||
250 | |||||
251 | void SCEV::print(raw_ostream &OS) const { | ||||
252 | switch (getSCEVType()) { | ||||
253 | case scConstant: | ||||
254 | cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); | ||||
255 | return; | ||||
256 | case scPtrToInt: { | ||||
257 | const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); | ||||
258 | const SCEV *Op = PtrToInt->getOperand(); | ||||
259 | OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " | ||||
260 | << *PtrToInt->getType() << ")"; | ||||
261 | return; | ||||
262 | } | ||||
263 | case scTruncate: { | ||||
264 | const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); | ||||
265 | const SCEV *Op = Trunc->getOperand(); | ||||
266 | OS << "(trunc " << *Op->getType() << " " << *Op << " to " | ||||
267 | << *Trunc->getType() << ")"; | ||||
268 | return; | ||||
269 | } | ||||
270 | case scZeroExtend: { | ||||
271 | const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); | ||||
272 | const SCEV *Op = ZExt->getOperand(); | ||||
273 | OS << "(zext " << *Op->getType() << " " << *Op << " to " | ||||
274 | << *ZExt->getType() << ")"; | ||||
275 | return; | ||||
276 | } | ||||
277 | case scSignExtend: { | ||||
278 | const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); | ||||
279 | const SCEV *Op = SExt->getOperand(); | ||||
280 | OS << "(sext " << *Op->getType() << " " << *Op << " to " | ||||
281 | << *SExt->getType() << ")"; | ||||
282 | return; | ||||
283 | } | ||||
284 | case scAddRecExpr: { | ||||
285 | const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); | ||||
286 | OS << "{" << *AR->getOperand(0); | ||||
287 | for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) | ||||
288 | OS << ",+," << *AR->getOperand(i); | ||||
289 | OS << "}<"; | ||||
290 | if (AR->hasNoUnsignedWrap()) | ||||
291 | OS << "nuw><"; | ||||
292 | if (AR->hasNoSignedWrap()) | ||||
293 | OS << "nsw><"; | ||||
294 | if (AR->hasNoSelfWrap() && | ||||
295 | !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) | ||||
296 | OS << "nw><"; | ||||
297 | AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); | ||||
298 | OS << ">"; | ||||
299 | return; | ||||
300 | } | ||||
301 | case scAddExpr: | ||||
302 | case scMulExpr: | ||||
303 | case scUMaxExpr: | ||||
304 | case scSMaxExpr: | ||||
305 | case scUMinExpr: | ||||
306 | case scSMinExpr: { | ||||
307 | const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); | ||||
308 | const char *OpStr = nullptr; | ||||
309 | switch (NAry->getSCEVType()) { | ||||
310 | case scAddExpr: OpStr = " + "; break; | ||||
311 | case scMulExpr: OpStr = " * "; break; | ||||
312 | case scUMaxExpr: OpStr = " umax "; break; | ||||
313 | case scSMaxExpr: OpStr = " smax "; break; | ||||
314 | case scUMinExpr: | ||||
315 | OpStr = " umin "; | ||||
316 | break; | ||||
317 | case scSMinExpr: | ||||
318 | OpStr = " smin "; | ||||
319 | break; | ||||
320 | default: | ||||
321 | llvm_unreachable("There are no other nary expression types.")__builtin_unreachable(); | ||||
322 | } | ||||
323 | OS << "("; | ||||
324 | ListSeparator LS(OpStr); | ||||
325 | for (const SCEV *Op : NAry->operands()) | ||||
326 | OS << LS << *Op; | ||||
327 | OS << ")"; | ||||
328 | switch (NAry->getSCEVType()) { | ||||
329 | case scAddExpr: | ||||
330 | case scMulExpr: | ||||
331 | if (NAry->hasNoUnsignedWrap()) | ||||
332 | OS << "<nuw>"; | ||||
333 | if (NAry->hasNoSignedWrap()) | ||||
334 | OS << "<nsw>"; | ||||
335 | break; | ||||
336 | default: | ||||
337 | // Nothing to print for other nary expressions. | ||||
338 | break; | ||||
339 | } | ||||
340 | return; | ||||
341 | } | ||||
342 | case scUDivExpr: { | ||||
343 | const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); | ||||
344 | OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; | ||||
345 | return; | ||||
346 | } | ||||
347 | case scUnknown: { | ||||
348 | const SCEVUnknown *U = cast<SCEVUnknown>(this); | ||||
349 | Type *AllocTy; | ||||
350 | if (U->isSizeOf(AllocTy)) { | ||||
351 | OS << "sizeof(" << *AllocTy << ")"; | ||||
352 | return; | ||||
353 | } | ||||
354 | if (U->isAlignOf(AllocTy)) { | ||||
355 | OS << "alignof(" << *AllocTy << ")"; | ||||
356 | return; | ||||
357 | } | ||||
358 | |||||
359 | Type *CTy; | ||||
360 | Constant *FieldNo; | ||||
361 | if (U->isOffsetOf(CTy, FieldNo)) { | ||||
362 | OS << "offsetof(" << *CTy << ", "; | ||||
363 | FieldNo->printAsOperand(OS, false); | ||||
364 | OS << ")"; | ||||
365 | return; | ||||
366 | } | ||||
367 | |||||
368 | // Otherwise just print it normally. | ||||
369 | U->getValue()->printAsOperand(OS, false); | ||||
370 | return; | ||||
371 | } | ||||
372 | case scCouldNotCompute: | ||||
373 | OS << "***COULDNOTCOMPUTE***"; | ||||
374 | return; | ||||
375 | } | ||||
376 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | ||||
377 | } | ||||
378 | |||||
379 | Type *SCEV::getType() const { | ||||
380 | switch (getSCEVType()) { | ||||
381 | case scConstant: | ||||
382 | return cast<SCEVConstant>(this)->getType(); | ||||
383 | case scPtrToInt: | ||||
384 | case scTruncate: | ||||
385 | case scZeroExtend: | ||||
386 | case scSignExtend: | ||||
387 | return cast<SCEVCastExpr>(this)->getType(); | ||||
388 | case scAddRecExpr: | ||||
389 | return cast<SCEVAddRecExpr>(this)->getType(); | ||||
390 | case scMulExpr: | ||||
391 | return cast<SCEVMulExpr>(this)->getType(); | ||||
392 | case scUMaxExpr: | ||||
393 | case scSMaxExpr: | ||||
394 | case scUMinExpr: | ||||
395 | case scSMinExpr: | ||||
396 | return cast<SCEVMinMaxExpr>(this)->getType(); | ||||
397 | case scAddExpr: | ||||
398 | return cast<SCEVAddExpr>(this)->getType(); | ||||
399 | case scUDivExpr: | ||||
400 | return cast<SCEVUDivExpr>(this)->getType(); | ||||
401 | case scUnknown: | ||||
402 | return cast<SCEVUnknown>(this)->getType(); | ||||
403 | case scCouldNotCompute: | ||||
404 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable(); | ||||
405 | } | ||||
406 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | ||||
407 | } | ||||
408 | |||||
409 | bool SCEV::isZero() const { | ||||
410 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) | ||||
411 | return SC->getValue()->isZero(); | ||||
412 | return false; | ||||
413 | } | ||||
414 | |||||
415 | bool SCEV::isOne() const { | ||||
416 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) | ||||
417 | return SC->getValue()->isOne(); | ||||
418 | return false; | ||||
419 | } | ||||
420 | |||||
421 | bool SCEV::isAllOnesValue() const { | ||||
422 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) | ||||
423 | return SC->getValue()->isMinusOne(); | ||||
424 | return false; | ||||
425 | } | ||||
426 | |||||
427 | bool SCEV::isNonConstantNegative() const { | ||||
428 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); | ||||
429 | if (!Mul) return false; | ||||
430 | |||||
431 | // If there is a constant factor, it will be first. | ||||
432 | const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); | ||||
433 | if (!SC) return false; | ||||
434 | |||||
435 | // Return true if the value is negative, this matches things like (-42 * V). | ||||
436 | return SC->getAPInt().isNegative(); | ||||
437 | } | ||||
438 | |||||
439 | SCEVCouldNotCompute::SCEVCouldNotCompute() : | ||||
440 | SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} | ||||
441 | |||||
442 | bool SCEVCouldNotCompute::classof(const SCEV *S) { | ||||
443 | return S->getSCEVType() == scCouldNotCompute; | ||||
444 | } | ||||
445 | |||||
446 | const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { | ||||
447 | FoldingSetNodeID ID; | ||||
448 | ID.AddInteger(scConstant); | ||||
449 | ID.AddPointer(V); | ||||
450 | void *IP = nullptr; | ||||
451 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | ||||
452 | SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); | ||||
453 | UniqueSCEVs.InsertNode(S, IP); | ||||
454 | return S; | ||||
455 | } | ||||
456 | |||||
457 | const SCEV *ScalarEvolution::getConstant(const APInt &Val) { | ||||
458 | return getConstant(ConstantInt::get(getContext(), Val)); | ||||
459 | } | ||||
460 | |||||
461 | const SCEV * | ||||
462 | ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { | ||||
463 | IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); | ||||
464 | return getConstant(ConstantInt::get(ITy, V, isSigned)); | ||||
465 | } | ||||
466 | |||||
467 | SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, | ||||
468 | const SCEV *op, Type *ty) | ||||
469 | : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { | ||||
470 | Operands[0] = op; | ||||
471 | } | ||||
472 | |||||
473 | SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, | ||||
474 | Type *ITy) | ||||
475 | : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { | ||||
476 | assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&((void)0) | ||||
477 | "Must be a non-bit-width-changing pointer-to-integer cast!")((void)0); | ||||
478 | } | ||||
479 | |||||
480 | SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, | ||||
481 | SCEVTypes SCEVTy, const SCEV *op, | ||||
482 | Type *ty) | ||||
483 | : SCEVCastExpr(ID, SCEVTy, op, ty) {} | ||||
484 | |||||
485 | SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, | ||||
486 | Type *ty) | ||||
487 | : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { | ||||
488 | assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | ||||
489 | "Cannot truncate non-integer value!")((void)0); | ||||
490 | } | ||||
491 | |||||
492 | SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, | ||||
493 | const SCEV *op, Type *ty) | ||||
494 | : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { | ||||
495 | assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | ||||
496 | "Cannot zero extend non-integer value!")((void)0); | ||||
497 | } | ||||
498 | |||||
499 | SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, | ||||
500 | const SCEV *op, Type *ty) | ||||
501 | : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { | ||||
502 | assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | ||||
503 | "Cannot sign extend non-integer value!")((void)0); | ||||
504 | } | ||||
505 | |||||
506 | void SCEVUnknown::deleted() { | ||||
507 | // Clear this SCEVUnknown from various maps. | ||||
508 | SE->forgetMemoizedResults(this); | ||||
509 | |||||
510 | // Remove this SCEVUnknown from the uniquing map. | ||||
511 | SE->UniqueSCEVs.RemoveNode(this); | ||||
512 | |||||
513 | // Release the value. | ||||
514 | setValPtr(nullptr); | ||||
515 | } | ||||
516 | |||||
517 | void SCEVUnknown::allUsesReplacedWith(Value *New) { | ||||
518 | // Remove this SCEVUnknown from the uniquing map. | ||||
519 | SE->UniqueSCEVs.RemoveNode(this); | ||||
520 | |||||
521 | // Update this SCEVUnknown to point to the new value. This is needed | ||||
522 | // because there may still be outstanding SCEVs which still point to | ||||
523 | // this SCEVUnknown. | ||||
524 | setValPtr(New); | ||||
525 | } | ||||
526 | |||||
527 | bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { | ||||
528 | if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) | ||||
529 | if (VCE->getOpcode() == Instruction::PtrToInt) | ||||
530 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) | ||||
531 | if (CE->getOpcode() == Instruction::GetElementPtr && | ||||
532 | CE->getOperand(0)->isNullValue() && | ||||
533 | CE->getNumOperands() == 2) | ||||
534 | if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) | ||||
535 | if (CI->isOne()) { | ||||
536 | AllocTy = cast<GEPOperator>(CE)->getSourceElementType(); | ||||
537 | return true; | ||||
538 | } | ||||
539 | |||||
540 | return false; | ||||
541 | } | ||||
542 | |||||
543 | bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { | ||||
544 | if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) | ||||
545 | if (VCE->getOpcode() == Instruction::PtrToInt) | ||||
546 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) | ||||
547 | if (CE->getOpcode() == Instruction::GetElementPtr && | ||||
548 | CE->getOperand(0)->isNullValue()) { | ||||
549 | Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); | ||||
550 | if (StructType *STy = dyn_cast<StructType>(Ty)) | ||||
551 | if (!STy->isPacked() && | ||||
552 | CE->getNumOperands() == 3 && | ||||
553 | CE->getOperand(1)->isNullValue()) { | ||||
554 | if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) | ||||
555 | if (CI->isOne() && | ||||
556 | STy->getNumElements() == 2 && | ||||
557 | STy->getElementType(0)->isIntegerTy(1)) { | ||||
558 | AllocTy = STy->getElementType(1); | ||||
559 | return true; | ||||
560 | } | ||||
561 | } | ||||
562 | } | ||||
563 | |||||
564 | return false; | ||||
565 | } | ||||
566 | |||||
567 | bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { | ||||
568 | if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) | ||||
569 | if (VCE->getOpcode() == Instruction::PtrToInt) | ||||
570 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) | ||||
571 | if (CE->getOpcode() == Instruction::GetElementPtr && | ||||
572 | CE->getNumOperands() == 3 && | ||||
573 | CE->getOperand(0)->isNullValue() && | ||||
574 | CE->getOperand(1)->isNullValue()) { | ||||
575 | Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); | ||||
576 | // Ignore vector types here so that ScalarEvolutionExpander doesn't | ||||
577 | // emit getelementptrs that index into vectors. | ||||
578 | if (Ty->isStructTy() || Ty->isArrayTy()) { | ||||
579 | CTy = Ty; | ||||
580 | FieldNo = CE->getOperand(2); | ||||
581 | return true; | ||||
582 | } | ||||
583 | } | ||||
584 | |||||
585 | return false; | ||||
586 | } | ||||
587 | |||||
588 | //===----------------------------------------------------------------------===// | ||||
589 | // SCEV Utilities | ||||
590 | //===----------------------------------------------------------------------===// | ||||
591 | |||||
592 | /// Compare the two values \p LV and \p RV in terms of their "complexity" where | ||||
593 | /// "complexity" is a partial (and somewhat ad-hoc) relation used to order | ||||
594 | /// operands in SCEV expressions. \p EqCache is a set of pairs of values that | ||||
595 | /// have been previously deemed to be "equally complex" by this routine. It is | ||||
596 | /// intended to avoid exponential time complexity in cases like: | ||||
597 | /// | ||||
598 | /// %a = f(%x, %y) | ||||
599 | /// %b = f(%a, %a) | ||||
600 | /// %c = f(%b, %b) | ||||
601 | /// | ||||
602 | /// %d = f(%x, %y) | ||||
603 | /// %e = f(%d, %d) | ||||
604 | /// %f = f(%e, %e) | ||||
605 | /// | ||||
606 | /// CompareValueComplexity(%f, %c) | ||||
607 | /// | ||||
608 | /// Since we do not continue running this routine on expression trees once we | ||||
609 | /// have seen unequal values, there is no need to track them in the cache. | ||||
610 | static int | ||||
611 | CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, | ||||
612 | const LoopInfo *const LI, Value *LV, Value *RV, | ||||
613 | unsigned Depth) { | ||||
614 | if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) | ||||
615 | return 0; | ||||
616 | |||||
617 | // Order pointer values after integer values. This helps SCEVExpander form | ||||
618 | // GEPs. | ||||
619 | bool LIsPointer = LV->getType()->isPointerTy(), | ||||
620 | RIsPointer = RV->getType()->isPointerTy(); | ||||
621 | if (LIsPointer != RIsPointer) | ||||
622 | return (int)LIsPointer - (int)RIsPointer; | ||||
623 | |||||
624 | // Compare getValueID values. | ||||
625 | unsigned LID = LV->getValueID(), RID = RV->getValueID(); | ||||
626 | if (LID != RID) | ||||
627 | return (int)LID - (int)RID; | ||||
628 | |||||
629 | // Sort arguments by their position. | ||||
630 | if (const auto *LA = dyn_cast<Argument>(LV)) { | ||||
631 | const auto *RA = cast<Argument>(RV); | ||||
632 | unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); | ||||
633 | return (int)LArgNo - (int)RArgNo; | ||||
634 | } | ||||
635 | |||||
636 | if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { | ||||
637 | const auto *RGV = cast<GlobalValue>(RV); | ||||
638 | |||||
639 | const auto IsGVNameSemantic = [&](const GlobalValue *GV) { | ||||
640 | auto LT = GV->getLinkage(); | ||||
641 | return !(GlobalValue::isPrivateLinkage(LT) || | ||||
642 | GlobalValue::isInternalLinkage(LT)); | ||||
643 | }; | ||||
644 | |||||
645 | // Use the names to distinguish the two values, but only if the | ||||
646 | // names are semantically important. | ||||
647 | if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) | ||||
648 | return LGV->getName().compare(RGV->getName()); | ||||
649 | } | ||||
650 | |||||
651 | // For instructions, compare their loop depth, and their operand count. This | ||||
652 | // is pretty loose. | ||||
653 | if (const auto *LInst = dyn_cast<Instruction>(LV)) { | ||||
654 | const auto *RInst = cast<Instruction>(RV); | ||||
655 | |||||
656 | // Compare loop depths. | ||||
657 | const BasicBlock *LParent = LInst->getParent(), | ||||
658 | *RParent = RInst->getParent(); | ||||
659 | if (LParent != RParent) { | ||||
660 | unsigned LDepth = LI->getLoopDepth(LParent), | ||||
661 | RDepth = LI->getLoopDepth(RParent); | ||||
662 | if (LDepth != RDepth) | ||||
663 | return (int)LDepth - (int)RDepth; | ||||
664 | } | ||||
665 | |||||
666 | // Compare the number of operands. | ||||
667 | unsigned LNumOps = LInst->getNumOperands(), | ||||
668 | RNumOps = RInst->getNumOperands(); | ||||
669 | if (LNumOps != RNumOps) | ||||
670 | return (int)LNumOps - (int)RNumOps; | ||||
671 | |||||
672 | for (unsigned Idx : seq(0u, LNumOps)) { | ||||
673 | int Result = | ||||
674 | CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), | ||||
675 | RInst->getOperand(Idx), Depth + 1); | ||||
676 | if (Result != 0) | ||||
677 | return Result; | ||||
678 | } | ||||
679 | } | ||||
680 | |||||
681 | EqCacheValue.unionSets(LV, RV); | ||||
682 | return 0; | ||||
683 | } | ||||
684 | |||||
685 | // Return negative, zero, or positive, if LHS is less than, equal to, or greater | ||||
686 | // than RHS, respectively. A three-way result allows recursive comparisons to be | ||||
687 | // more efficient. | ||||
688 | // If the max analysis depth was reached, return None, assuming we do not know | ||||
689 | // if they are equivalent for sure. | ||||
690 | static Optional<int> | ||||
691 | CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, | ||||
692 | EquivalenceClasses<const Value *> &EqCacheValue, | ||||
693 | const LoopInfo *const LI, const SCEV *LHS, | ||||
694 | const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { | ||||
695 | // Fast-path: SCEVs are uniqued so we can do a quick equality check. | ||||
696 | if (LHS == RHS) | ||||
697 | return 0; | ||||
698 | |||||
699 | // Primarily, sort the SCEVs by their getSCEVType(). | ||||
700 | SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); | ||||
701 | if (LType != RType) | ||||
702 | return (int)LType - (int)RType; | ||||
703 | |||||
704 | if (EqCacheSCEV.isEquivalent(LHS, RHS)) | ||||
705 | return 0; | ||||
706 | |||||
707 | if (Depth > MaxSCEVCompareDepth) | ||||
708 | return None; | ||||
709 | |||||
710 | // Aside from the getSCEVType() ordering, the particular ordering | ||||
711 | // isn't very important except that it's beneficial to be consistent, | ||||
712 | // so that (a + b) and (b + a) don't end up as different expressions. | ||||
713 | switch (LType) { | ||||
714 | case scUnknown: { | ||||
715 | const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); | ||||
716 | const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); | ||||
717 | |||||
718 | int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), | ||||
719 | RU->getValue(), Depth + 1); | ||||
720 | if (X == 0) | ||||
721 | EqCacheSCEV.unionSets(LHS, RHS); | ||||
722 | return X; | ||||
723 | } | ||||
724 | |||||
725 | case scConstant: { | ||||
726 | const SCEVConstant *LC = cast<SCEVConstant>(LHS); | ||||
727 | const SCEVConstant *RC = cast<SCEVConstant>(RHS); | ||||
728 | |||||
729 | // Compare constant values. | ||||
730 | const APInt &LA = LC->getAPInt(); | ||||
731 | const APInt &RA = RC->getAPInt(); | ||||
732 | unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); | ||||
733 | if (LBitWidth != RBitWidth) | ||||
734 | return (int)LBitWidth - (int)RBitWidth; | ||||
735 | return LA.ult(RA) ? -1 : 1; | ||||
736 | } | ||||
737 | |||||
738 | case scAddRecExpr: { | ||||
739 | const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); | ||||
740 | const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); | ||||
741 | |||||
742 | // There is always a dominance between two recs that are used by one SCEV, | ||||
743 | // so we can safely sort recs by loop header dominance. We require such | ||||
744 | // order in getAddExpr. | ||||
745 | const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); | ||||
746 | if (LLoop != RLoop) { | ||||
747 | const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); | ||||
748 | assert(LHead != RHead && "Two loops share the same header?")((void)0); | ||||
749 | if (DT.dominates(LHead, RHead)) | ||||
750 | return 1; | ||||
751 | else | ||||
752 | assert(DT.dominates(RHead, LHead) &&((void)0) | ||||
753 | "No dominance between recurrences used by one SCEV?")((void)0); | ||||
754 | return -1; | ||||
755 | } | ||||
756 | |||||
757 | // Addrec complexity grows with operand count. | ||||
758 | unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); | ||||
759 | if (LNumOps != RNumOps) | ||||
760 | return (int)LNumOps - (int)RNumOps; | ||||
761 | |||||
762 | // Lexicographically compare. | ||||
763 | for (unsigned i = 0; i != LNumOps; ++i) { | ||||
764 | auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, | ||||
765 | LA->getOperand(i), RA->getOperand(i), DT, | ||||
766 | Depth + 1); | ||||
767 | if (X != 0) | ||||
768 | return X; | ||||
769 | } | ||||
770 | EqCacheSCEV.unionSets(LHS, RHS); | ||||
771 | return 0; | ||||
772 | } | ||||
773 | |||||
774 | case scAddExpr: | ||||
775 | case scMulExpr: | ||||
776 | case scSMaxExpr: | ||||
777 | case scUMaxExpr: | ||||
778 | case scSMinExpr: | ||||
779 | case scUMinExpr: { | ||||
780 | const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); | ||||
781 | const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); | ||||
782 | |||||
783 | // Lexicographically compare n-ary expressions. | ||||
784 | unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); | ||||
785 | if (LNumOps != RNumOps) | ||||
786 | return (int)LNumOps - (int)RNumOps; | ||||
787 | |||||
788 | for (unsigned i = 0; i != LNumOps; ++i) { | ||||
789 | auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, | ||||
790 | LC->getOperand(i), RC->getOperand(i), DT, | ||||
791 | Depth + 1); | ||||
792 | if (X != 0) | ||||
793 | return X; | ||||
794 | } | ||||
795 | EqCacheSCEV.unionSets(LHS, RHS); | ||||
796 | return 0; | ||||
797 | } | ||||
798 | |||||
799 | case scUDivExpr: { | ||||
800 | const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); | ||||
801 | const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); | ||||
802 | |||||
803 | // Lexicographically compare udiv expressions. | ||||
804 | auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), | ||||
805 | RC->getLHS(), DT, Depth + 1); | ||||
806 | if (X != 0) | ||||
807 | return X; | ||||
808 | X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), | ||||
809 | RC->getRHS(), DT, Depth + 1); | ||||
810 | if (X == 0) | ||||
811 | EqCacheSCEV.unionSets(LHS, RHS); | ||||
812 | return X; | ||||
813 | } | ||||
814 | |||||
815 | case scPtrToInt: | ||||
816 | case scTruncate: | ||||
817 | case scZeroExtend: | ||||
818 | case scSignExtend: { | ||||
819 | const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); | ||||
820 | const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); | ||||
821 | |||||
822 | // Compare cast expressions by operand. | ||||
823 | auto X = | ||||
824 | CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), | ||||
825 | RC->getOperand(), DT, Depth + 1); | ||||
826 | if (X == 0) | ||||
827 | EqCacheSCEV.unionSets(LHS, RHS); | ||||
828 | return X; | ||||
829 | } | ||||
830 | |||||
831 | case scCouldNotCompute: | ||||
832 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable(); | ||||
833 | } | ||||
834 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | ||||
835 | } | ||||
836 | |||||
837 | /// Given a list of SCEV objects, order them by their complexity, and group | ||||
838 | /// objects of the same complexity together by value. When this routine is | ||||
839 | /// finished, we know that any duplicates in the vector are consecutive and that | ||||
840 | /// complexity is monotonically increasing. | ||||
841 | /// | ||||
842 | /// Note that we go take special precautions to ensure that we get deterministic | ||||
843 | /// results from this routine. In other words, we don't want the results of | ||||
844 | /// this to depend on where the addresses of various SCEV objects happened to | ||||
845 | /// land in memory. | ||||
846 | static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, | ||||
847 | LoopInfo *LI, DominatorTree &DT) { | ||||
848 | if (Ops.size() < 2) return; // Noop | ||||
849 | |||||
850 | EquivalenceClasses<const SCEV *> EqCacheSCEV; | ||||
851 | EquivalenceClasses<const Value *> EqCacheValue; | ||||
852 | |||||
853 | // Whether LHS has provably less complexity than RHS. | ||||
854 | auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { | ||||
855 | auto Complexity = | ||||
856 | CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); | ||||
857 | return Complexity && *Complexity < 0; | ||||
858 | }; | ||||
859 | if (Ops.size() == 2) { | ||||
860 | // This is the common case, which also happens to be trivially simple. | ||||
861 | // Special case it. | ||||
862 | const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; | ||||
863 | if (IsLessComplex(RHS, LHS)) | ||||
864 | std::swap(LHS, RHS); | ||||
865 | return; | ||||
866 | } | ||||
867 | |||||
868 | // Do the rough sort by complexity. | ||||
869 | llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { | ||||
870 | return IsLessComplex(LHS, RHS); | ||||
871 | }); | ||||
872 | |||||
873 | // Now that we are sorted by complexity, group elements of the same | ||||
874 | // complexity. Note that this is, at worst, N^2, but the vector is likely to | ||||
875 | // be extremely short in practice. Note that we take this approach because we | ||||
876 | // do not want to depend on the addresses of the objects we are grouping. | ||||
877 | for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { | ||||
878 | const SCEV *S = Ops[i]; | ||||
879 | unsigned Complexity = S->getSCEVType(); | ||||
880 | |||||
881 | // If there are any objects of the same complexity and same value as this | ||||
882 | // one, group them. | ||||
883 | for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { | ||||
884 | if (Ops[j] == S) { // Found a duplicate. | ||||
885 | // Move it to immediately after i'th element. | ||||
886 | std::swap(Ops[i+1], Ops[j]); | ||||
887 | ++i; // no need to rescan it. | ||||
888 | if (i == e-2) return; // Done! | ||||
889 | } | ||||
890 | } | ||||
891 | } | ||||
892 | } | ||||
893 | |||||
894 | /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at | ||||
895 | /// least HugeExprThreshold nodes). | ||||
896 | static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { | ||||
897 | return any_of(Ops, [](const SCEV *S) { | ||||
898 | return S->getExpressionSize() >= HugeExprThreshold; | ||||
899 | }); | ||||
900 | } | ||||
901 | |||||
902 | //===----------------------------------------------------------------------===// | ||||
903 | // Simple SCEV method implementations | ||||
904 | //===----------------------------------------------------------------------===// | ||||
905 | |||||
906 | /// Compute BC(It, K). The result has width W. Assume, K > 0. | ||||
907 | static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, | ||||
908 | ScalarEvolution &SE, | ||||
909 | Type *ResultTy) { | ||||
910 | // Handle the simplest case efficiently. | ||||
911 | if (K == 1) | ||||
912 | return SE.getTruncateOrZeroExtend(It, ResultTy); | ||||
913 | |||||
914 | // We are using the following formula for BC(It, K): | ||||
915 | // | ||||
916 | // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! | ||||
917 | // | ||||
918 | // Suppose, W is the bitwidth of the return value. We must be prepared for | ||||
919 | // overflow. Hence, we must assure that the result of our computation is | ||||
920 | // equal to the accurate one modulo 2^W. Unfortunately, division isn't | ||||
921 | // safe in modular arithmetic. | ||||
922 | // | ||||
923 | // However, this code doesn't use exactly that formula; the formula it uses | ||||
924 | // is something like the following, where T is the number of factors of 2 in | ||||
925 | // K! (i.e. trailing zeros in the binary representation of K!), and ^ is | ||||
926 | // exponentiation: | ||||
927 | // | ||||
928 | // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) | ||||
929 | // | ||||
930 | // This formula is trivially equivalent to the previous formula. However, | ||||
931 | // this formula can be implemented much more efficiently. The trick is that | ||||
932 | // K! / 2^T is odd, and exact division by an odd number *is* safe in modular | ||||
933 | // arithmetic. To do exact division in modular arithmetic, all we have | ||||
934 | // to do is multiply by the inverse. Therefore, this step can be done at | ||||
935 | // width W. | ||||
936 | // | ||||
937 | // The next issue is how to safely do the division by 2^T. The way this | ||||
938 | // is done is by doing the multiplication step at a width of at least W + T | ||||
939 | // bits. This way, the bottom W+T bits of the product are accurate. Then, | ||||
940 | // when we perform the division by 2^T (which is equivalent to a right shift | ||||
941 | // by T), the bottom W bits are accurate. Extra bits are okay; they'll get | ||||
942 | // truncated out after the division by 2^T. | ||||
943 | // | ||||
944 | // In comparison to just directly using the first formula, this technique | ||||
945 | // is much more efficient; using the first formula requires W * K bits, | ||||
946 | // but this formula less than W + K bits. Also, the first formula requires | ||||
947 | // a division step, whereas this formula only requires multiplies and shifts. | ||||
948 | // | ||||
949 | // It doesn't matter whether the subtraction step is done in the calculation | ||||
950 | // width or the input iteration count's width; if the subtraction overflows, | ||||
951 | // the result must be zero anyway. We prefer here to do it in the width of | ||||
952 | // the induction variable because it helps a lot for certain cases; CodeGen | ||||
953 | // isn't smart enough to ignore the overflow, which leads to much less | ||||
954 | // efficient code if the width of the subtraction is wider than the native | ||||
955 | // register width. | ||||
956 | // | ||||
957 | // (It's possible to not widen at all by pulling out factors of 2 before | ||||
958 | // the multiplication; for example, K=2 can be calculated as | ||||
959 | // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires | ||||
960 | // extra arithmetic, so it's not an obvious win, and it gets | ||||
961 | // much more complicated for K > 3.) | ||||
962 | |||||
963 | // Protection from insane SCEVs; this bound is conservative, | ||||
964 | // but it probably doesn't matter. | ||||
965 | if (K > 1000) | ||||
966 | return SE.getCouldNotCompute(); | ||||
967 | |||||
968 | unsigned W = SE.getTypeSizeInBits(ResultTy); | ||||
969 | |||||
970 | // Calculate K! / 2^T and T; we divide out the factors of two before | ||||
971 | // multiplying for calculating K! / 2^T to avoid overflow. | ||||
972 | // Other overflow doesn't matter because we only care about the bottom | ||||
973 | // W bits of the result. | ||||
974 | APInt OddFactorial(W, 1); | ||||
975 | unsigned T = 1; | ||||
976 | for (unsigned i = 3; i <= K; ++i) { | ||||
977 | APInt Mult(W, i); | ||||
978 | unsigned TwoFactors = Mult.countTrailingZeros(); | ||||
979 | T += TwoFactors; | ||||
980 | Mult.lshrInPlace(TwoFactors); | ||||
981 | OddFactorial *= Mult; | ||||
982 | } | ||||
983 | |||||
984 | // We need at least W + T bits for the multiplication step | ||||
985 | unsigned CalculationBits = W + T; | ||||
986 | |||||
987 | // Calculate 2^T, at width T+W. | ||||
988 | APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); | ||||
989 | |||||
990 | // Calculate the multiplicative inverse of K! / 2^T; | ||||
991 | // this multiplication factor will perform the exact division by | ||||
992 | // K! / 2^T. | ||||
993 | APInt Mod = APInt::getSignedMinValue(W+1); | ||||
994 | APInt MultiplyFactor = OddFactorial.zext(W+1); | ||||
995 | MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); | ||||
996 | MultiplyFactor = MultiplyFactor.trunc(W); | ||||
997 | |||||
998 | // Calculate the product, at width T+W | ||||
999 | IntegerType *CalculationTy = IntegerType::get(SE.getContext(), | ||||
1000 | CalculationBits); | ||||
1001 | const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); | ||||
1002 | for (unsigned i = 1; i != K; ++i) { | ||||
1003 | const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); | ||||
1004 | Dividend = SE.getMulExpr(Dividend, | ||||
1005 | SE.getTruncateOrZeroExtend(S, CalculationTy)); | ||||
1006 | } | ||||
1007 | |||||
1008 | // Divide by 2^T | ||||
1009 | const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); | ||||
1010 | |||||
1011 | // Truncate the result, and divide by K! / 2^T. | ||||
1012 | |||||
1013 | return SE.getMulExpr(SE.getConstant(MultiplyFactor), | ||||
1014 | SE.getTruncateOrZeroExtend(DivResult, ResultTy)); | ||||
1015 | } | ||||
1016 | |||||
1017 | /// Return the value of this chain of recurrences at the specified iteration | ||||
1018 | /// number. We can evaluate this recurrence by multiplying each element in the | ||||
1019 | /// chain by the binomial coefficient corresponding to it. In other words, we | ||||
1020 | /// can evaluate {A,+,B,+,C,+,D} as: | ||||
1021 | /// | ||||
1022 | /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) | ||||
1023 | /// | ||||
1024 | /// where BC(It, k) stands for binomial coefficient. | ||||
1025 | const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, | ||||
1026 | ScalarEvolution &SE) const { | ||||
1027 | return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE); | ||||
1028 | } | ||||
1029 | |||||
1030 | const SCEV * | ||||
1031 | SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, | ||||
1032 | const SCEV *It, ScalarEvolution &SE) { | ||||
1033 | assert(Operands.size() > 0)((void)0); | ||||
1034 | const SCEV *Result = Operands[0]; | ||||
1035 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) { | ||||
1036 | // The computation is correct in the face of overflow provided that the | ||||
1037 | // multiplication is performed _after_ the evaluation of the binomial | ||||
1038 | // coefficient. | ||||
1039 | const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); | ||||
1040 | if (isa<SCEVCouldNotCompute>(Coeff)) | ||||
1041 | return Coeff; | ||||
1042 | |||||
1043 | Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); | ||||
1044 | } | ||||
1045 | return Result; | ||||
1046 | } | ||||
1047 | |||||
1048 | //===----------------------------------------------------------------------===// | ||||
1049 | // SCEV Expression folder implementations | ||||
1050 | //===----------------------------------------------------------------------===// | ||||
1051 | |||||
1052 | const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, | ||||
1053 | unsigned Depth) { | ||||
1054 | assert(Depth <= 1 &&((void)0) | ||||
1055 | "getLosslessPtrToIntExpr() should self-recurse at most once.")((void)0); | ||||
1056 | |||||
1057 | // We could be called with an integer-typed operands during SCEV rewrites. | ||||
1058 | // Since the operand is an integer already, just perform zext/trunc/self cast. | ||||
1059 | if (!Op->getType()->isPointerTy()) | ||||
1060 | return Op; | ||||
1061 | |||||
1062 | // What would be an ID for such a SCEV cast expression? | ||||
1063 | FoldingSetNodeID ID; | ||||
1064 | ID.AddInteger(scPtrToInt); | ||||
1065 | ID.AddPointer(Op); | ||||
1066 | |||||
1067 | void *IP = nullptr; | ||||
1068 | |||||
1069 | // Is there already an expression for such a cast? | ||||
1070 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) | ||||
1071 | return S; | ||||
1072 | |||||
1073 | // It isn't legal for optimizations to construct new ptrtoint expressions | ||||
1074 | // for non-integral pointers. | ||||
1075 | if (getDataLayout().isNonIntegralPointerType(Op->getType())) | ||||
1076 | return getCouldNotCompute(); | ||||
1077 | |||||
1078 | Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); | ||||
1079 | |||||
1080 | // We can only trivially model ptrtoint if SCEV's effective (integer) type | ||||
1081 | // is sufficiently wide to represent all possible pointer values. | ||||
1082 | // We could theoretically teach SCEV to truncate wider pointers, but | ||||
1083 | // that isn't implemented for now. | ||||
1084 | if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != | ||||
1085 | getDataLayout().getTypeSizeInBits(IntPtrTy)) | ||||
1086 | return getCouldNotCompute(); | ||||
1087 | |||||
1088 | // If not, is this expression something we can't reduce any further? | ||||
1089 | if (auto *U = dyn_cast<SCEVUnknown>(Op)) { | ||||
1090 | // Perform some basic constant folding. If the operand of the ptr2int cast | ||||
1091 | // is a null pointer, don't create a ptr2int SCEV expression (that will be | ||||
1092 | // left as-is), but produce a zero constant. | ||||
1093 | // NOTE: We could handle a more general case, but lack motivational cases. | ||||
1094 | if (isa<ConstantPointerNull>(U->getValue())) | ||||
1095 | return getZero(IntPtrTy); | ||||
1096 | |||||
1097 | // Create an explicit cast node. | ||||
1098 | // We can reuse the existing insert position since if we get here, | ||||
1099 | // we won't have made any changes which would invalidate it. | ||||
1100 | SCEV *S = new (SCEVAllocator) | ||||
1101 | SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); | ||||
1102 | UniqueSCEVs.InsertNode(S, IP); | ||||
1103 | addToLoopUseLists(S); | ||||
1104 | return S; | ||||
1105 | } | ||||
1106 | |||||
1107 | assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for "((void)0) | ||||
1108 | "non-SCEVUnknown's.")((void)0); | ||||
1109 | |||||
1110 | // Otherwise, we've got some expression that is more complex than just a | ||||
1111 | // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an | ||||
1112 | // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown | ||||
1113 | // only, and the expressions must otherwise be integer-typed. | ||||
1114 | // So sink the cast down to the SCEVUnknown's. | ||||
1115 | |||||
1116 | /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, | ||||
1117 | /// which computes a pointer-typed value, and rewrites the whole expression | ||||
1118 | /// tree so that *all* the computations are done on integers, and the only | ||||
1119 | /// pointer-typed operands in the expression are SCEVUnknown. | ||||
1120 | class SCEVPtrToIntSinkingRewriter | ||||
1121 | : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { | ||||
1122 | using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; | ||||
1123 | |||||
1124 | public: | ||||
1125 | SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} | ||||
1126 | |||||
1127 | static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { | ||||
1128 | SCEVPtrToIntSinkingRewriter Rewriter(SE); | ||||
1129 | return Rewriter.visit(Scev); | ||||
1130 | } | ||||
1131 | |||||
1132 | const SCEV *visit(const SCEV *S) { | ||||
1133 | Type *STy = S->getType(); | ||||
1134 | // If the expression is not pointer-typed, just keep it as-is. | ||||
1135 | if (!STy->isPointerTy()) | ||||
1136 | return S; | ||||
1137 | // Else, recursively sink the cast down into it. | ||||
1138 | return Base::visit(S); | ||||
1139 | } | ||||
1140 | |||||
1141 | const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { | ||||
1142 | SmallVector<const SCEV *, 2> Operands; | ||||
1143 | bool Changed = false; | ||||
1144 | for (auto *Op : Expr->operands()) { | ||||
1145 | Operands.push_back(visit(Op)); | ||||
1146 | Changed |= Op != Operands.back(); | ||||
1147 | } | ||||
1148 | return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); | ||||
1149 | } | ||||
1150 | |||||
1151 | const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { | ||||
1152 | SmallVector<const SCEV *, 2> Operands; | ||||
1153 | bool Changed = false; | ||||
1154 | for (auto *Op : Expr->operands()) { | ||||
1155 | Operands.push_back(visit(Op)); | ||||
1156 | Changed |= Op != Operands.back(); | ||||
1157 | } | ||||
1158 | return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); | ||||
1159 | } | ||||
1160 | |||||
1161 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | ||||
1162 | assert(Expr->getType()->isPointerTy() &&((void)0) | ||||
1163 | "Should only reach pointer-typed SCEVUnknown's.")((void)0); | ||||
1164 | return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); | ||||
1165 | } | ||||
1166 | }; | ||||
1167 | |||||
1168 | // And actually perform the cast sinking. | ||||
1169 | const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); | ||||
1170 | assert(IntOp->getType()->isIntegerTy() &&((void)0) | ||||
1171 | "We must have succeeded in sinking the cast, "((void)0) | ||||
1172 | "and ending up with an integer-typed expression!")((void)0); | ||||
1173 | return IntOp; | ||||
1174 | } | ||||
1175 | |||||
1176 | const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { | ||||
1177 | assert(Ty->isIntegerTy() && "Target type must be an integer type!")((void)0); | ||||
1178 | |||||
1179 | const SCEV *IntOp = getLosslessPtrToIntExpr(Op); | ||||
1180 | if (isa<SCEVCouldNotCompute>(IntOp)) | ||||
1181 | return IntOp; | ||||
1182 | |||||
1183 | return getTruncateOrZeroExtend(IntOp, Ty); | ||||
1184 | } | ||||
1185 | |||||
1186 | const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, | ||||
1187 | unsigned Depth) { | ||||
1188 | assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&((void)0) | ||||
1189 | "This is not a truncating conversion!")((void)0); | ||||
1190 | assert(isSCEVable(Ty) &&((void)0) | ||||
1191 | "This is not a conversion to a SCEVable type!")((void)0); | ||||
1192 | assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!")((void)0); | ||||
1193 | Ty = getEffectiveSCEVType(Ty); | ||||
1194 | |||||
1195 | FoldingSetNodeID ID; | ||||
1196 | ID.AddInteger(scTruncate); | ||||
1197 | ID.AddPointer(Op); | ||||
1198 | ID.AddPointer(Ty); | ||||
1199 | void *IP = nullptr; | ||||
1200 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | ||||
1201 | |||||
1202 | // Fold if the operand is constant. | ||||
1203 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) | ||||
1204 | return getConstant( | ||||
1205 | cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); | ||||
1206 | |||||
1207 | // trunc(trunc(x)) --> trunc(x) | ||||
1208 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) | ||||
1209 | return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); | ||||
1210 | |||||
1211 | // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing | ||||
1212 | if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) | ||||
1213 | return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); | ||||
1214 | |||||
1215 | // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing | ||||
1216 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) | ||||
1217 | return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); | ||||
1218 | |||||
1219 | if (Depth > MaxCastDepth) { | ||||
1220 | SCEV *S = | ||||
1221 | new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); | ||||
1222 | UniqueSCEVs.InsertNode(S, IP); | ||||
1223 | addToLoopUseLists(S); | ||||
1224 | return S; | ||||
1225 | } | ||||
1226 | |||||
1227 | // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and | ||||
1228 | // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), | ||||
1229 | // if after transforming we have at most one truncate, not counting truncates | ||||
1230 | // that replace other casts. | ||||
1231 | if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { | ||||
1232 | auto *CommOp = cast<SCEVCommutativeExpr>(Op); | ||||
1233 | SmallVector<const SCEV *, 4> Operands; | ||||
1234 | unsigned numTruncs = 0; | ||||
1235 | for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; | ||||
1236 | ++i) { | ||||
1237 | const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); | ||||
1238 | if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && | ||||
1239 | isa<SCEVTruncateExpr>(S)) | ||||
1240 | numTruncs++; | ||||
1241 | Operands.push_back(S); | ||||
1242 | } | ||||
1243 | if (numTruncs < 2) { | ||||
1244 | if (isa<SCEVAddExpr>(Op)) | ||||
1245 | return getAddExpr(Operands); | ||||
1246 | else if (isa<SCEVMulExpr>(Op)) | ||||
1247 | return getMulExpr(Operands); | ||||
1248 | else | ||||
1249 | llvm_unreachable("Unexpected SCEV type for Op.")__builtin_unreachable(); | ||||
1250 | } | ||||
1251 | // Although we checked in the beginning that ID is not in the cache, it is | ||||
1252 | // possible that during recursion and different modification ID was inserted | ||||
1253 | // into the cache. So if we find it, just return it. | ||||
1254 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) | ||||
1255 | return S; | ||||
1256 | } | ||||
1257 | |||||
1258 | // If the input value is a chrec scev, truncate the chrec's operands. | ||||
1259 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { | ||||
1260 | SmallVector<const SCEV *, 4> Operands; | ||||
1261 | for (const SCEV *Op : AddRec->operands()) | ||||
1262 | Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); | ||||
1263 | return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); | ||||
1264 | } | ||||
1265 | |||||
1266 | // Return zero if truncating to known zeros. | ||||
1267 | uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); | ||||
1268 | if (MinTrailingZeros >= getTypeSizeInBits(Ty)) | ||||
1269 | return getZero(Ty); | ||||
1270 | |||||
1271 | // The cast wasn't folded; create an explicit cast node. We can reuse | ||||
1272 | // the existing insert position since if we get here, we won't have | ||||
1273 | // made any changes which would invalidate it. | ||||
1274 | SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), | ||||
1275 | Op, Ty); | ||||
1276 | UniqueSCEVs.InsertNode(S, IP); | ||||
1277 | addToLoopUseLists(S); | ||||
1278 | return S; | ||||
1279 | } | ||||
1280 | |||||
1281 | // Get the limit of a recurrence such that incrementing by Step cannot cause | ||||
1282 | // signed overflow as long as the value of the recurrence within the | ||||
1283 | // loop does not exceed this limit before incrementing. | ||||
1284 | static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, | ||||
1285 | ICmpInst::Predicate *Pred, | ||||
1286 | ScalarEvolution *SE) { | ||||
1287 | unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); | ||||
1288 | if (SE->isKnownPositive(Step)) { | ||||
1289 | *Pred = ICmpInst::ICMP_SLT; | ||||
1290 | return SE->getConstant(APInt::getSignedMinValue(BitWidth) - | ||||
1291 | SE->getSignedRangeMax(Step)); | ||||
1292 | } | ||||
1293 | if (SE->isKnownNegative(Step)) { | ||||
1294 | *Pred = ICmpInst::ICMP_SGT; | ||||
1295 | return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - | ||||
1296 | SE->getSignedRangeMin(Step)); | ||||
1297 | } | ||||
1298 | return nullptr; | ||||
1299 | } | ||||
1300 | |||||
1301 | // Get the limit of a recurrence such that incrementing by Step cannot cause | ||||
1302 | // unsigned overflow as long as the value of the recurrence within the loop does | ||||
1303 | // not exceed this limit before incrementing. | ||||
1304 | static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, | ||||
1305 | ICmpInst::Predicate *Pred, | ||||
1306 | ScalarEvolution *SE) { | ||||
1307 | unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); | ||||
1308 | *Pred = ICmpInst::ICMP_ULT; | ||||
1309 | |||||
1310 | return SE->getConstant(APInt::getMinValue(BitWidth) - | ||||
1311 | SE->getUnsignedRangeMax(Step)); | ||||
1312 | } | ||||
1313 | |||||
1314 | namespace { | ||||
1315 | |||||
1316 | struct ExtendOpTraitsBase { | ||||
1317 | typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, | ||||
1318 | unsigned); | ||||
1319 | }; | ||||
1320 | |||||
1321 | // Used to make code generic over signed and unsigned overflow. | ||||
1322 | template <typename ExtendOp> struct ExtendOpTraits { | ||||
1323 | // Members present: | ||||
1324 | // | ||||
1325 | // static const SCEV::NoWrapFlags WrapType; | ||||
1326 | // | ||||
1327 | // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; | ||||
1328 | // | ||||
1329 | // static const SCEV *getOverflowLimitForStep(const SCEV *Step, | ||||
1330 | // ICmpInst::Predicate *Pred, | ||||
1331 | // ScalarEvolution *SE); | ||||
1332 | }; | ||||
1333 | |||||
1334 | template <> | ||||
1335 | struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { | ||||
1336 | static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; | ||||
1337 | |||||
1338 | static const GetExtendExprTy GetExtendExpr; | ||||
1339 | |||||
1340 | static const SCEV *getOverflowLimitForStep(const SCEV *Step, | ||||
1341 | ICmpInst::Predicate *Pred, | ||||
1342 | ScalarEvolution *SE) { | ||||
1343 | return getSignedOverflowLimitForStep(Step, Pred, SE); | ||||
1344 | } | ||||
1345 | }; | ||||
1346 | |||||
1347 | const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< | ||||
1348 | SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; | ||||
1349 | |||||
1350 | template <> | ||||
1351 | struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { | ||||
1352 | static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; | ||||
1353 | |||||
1354 | static const GetExtendExprTy GetExtendExpr; | ||||
1355 | |||||
1356 | static const SCEV *getOverflowLimitForStep(const SCEV *Step, | ||||
1357 | ICmpInst::Predicate *Pred, | ||||
1358 | ScalarEvolution *SE) { | ||||
1359 | return getUnsignedOverflowLimitForStep(Step, Pred, SE); | ||||
1360 | } | ||||
1361 | }; | ||||
1362 | |||||
1363 | const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< | ||||
1364 | SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; | ||||
1365 | |||||
1366 | } // end anonymous namespace | ||||
1367 | |||||
1368 | // The recurrence AR has been shown to have no signed/unsigned wrap or something | ||||
1369 | // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as | ||||
1370 | // easily prove NSW/NUW for its preincrement or postincrement sibling. This | ||||
1371 | // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + | ||||
1372 | // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the | ||||
1373 | // expression "Step + sext/zext(PreIncAR)" is congruent with | ||||
1374 | // "sext/zext(PostIncAR)" | ||||
1375 | template <typename ExtendOpTy> | ||||
1376 | static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, | ||||
1377 | ScalarEvolution *SE, unsigned Depth) { | ||||
1378 | auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; | ||||
1379 | auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; | ||||
1380 | |||||
1381 | const Loop *L = AR->getLoop(); | ||||
1382 | const SCEV *Start = AR->getStart(); | ||||
1383 | const SCEV *Step = AR->getStepRecurrence(*SE); | ||||
1384 | |||||
1385 | // Check for a simple looking step prior to loop entry. | ||||
1386 | const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); | ||||
1387 | if (!SA) | ||||
1388 | return nullptr; | ||||
1389 | |||||
1390 | // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV | ||||
1391 | // subtraction is expensive. For this purpose, perform a quick and dirty | ||||
1392 | // difference, by checking for Step in the operand list. | ||||
1393 | SmallVector<const SCEV *, 4> DiffOps; | ||||
1394 | for (const SCEV *Op : SA->operands()) | ||||
1395 | if (Op != Step) | ||||
1396 | DiffOps.push_back(Op); | ||||
1397 | |||||
1398 | if (DiffOps.size() == SA->getNumOperands()) | ||||
1399 | return nullptr; | ||||
1400 | |||||
1401 | // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + | ||||
1402 | // `Step`: | ||||
1403 | |||||
1404 | // 1. NSW/NUW flags on the step increment. | ||||
1405 | auto PreStartFlags = | ||||
1406 | ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); | ||||
1407 | const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); | ||||
1408 | const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( | ||||
1409 | SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); | ||||
1410 | |||||
1411 | // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies | ||||
1412 | // "S+X does not sign/unsign-overflow". | ||||
1413 | // | ||||
1414 | |||||
1415 | const SCEV *BECount = SE->getBackedgeTakenCount(L); | ||||
1416 | if (PreAR && PreAR->getNoWrapFlags(WrapType) && | ||||
1417 | !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) | ||||
1418 | return PreStart; | ||||
1419 | |||||
1420 | // 2. Direct overflow check on the step operation's expression. | ||||
1421 | unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); | ||||
1422 | Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); | ||||
1423 | const SCEV *OperandExtendedStart = | ||||
1424 | SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), | ||||
1425 | (SE->*GetExtendExpr)(Step, WideTy, Depth)); | ||||
1426 | if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { | ||||
1427 | if (PreAR && AR->getNoWrapFlags(WrapType)) { | ||||
1428 | // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW | ||||
1429 | // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then | ||||
1430 | // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. | ||||
1431 | SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); | ||||
1432 | } | ||||
1433 | return PreStart; | ||||
1434 | } | ||||
1435 | |||||
1436 | // 3. Loop precondition. | ||||
1437 | ICmpInst::Predicate Pred; | ||||
1438 | const SCEV *OverflowLimit = | ||||
1439 | ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); | ||||
1440 | |||||
1441 | if (OverflowLimit && | ||||
1442 | SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) | ||||
1443 | return PreStart; | ||||
1444 | |||||
1445 | return nullptr; | ||||
1446 | } | ||||
1447 | |||||
1448 | // Get the normalized zero or sign extended expression for this AddRec's Start. | ||||
1449 | template <typename ExtendOpTy> | ||||
1450 | static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, | ||||
1451 | ScalarEvolution *SE, | ||||
1452 | unsigned Depth) { | ||||
1453 | auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; | ||||
1454 | |||||
1455 | const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); | ||||
1456 | if (!PreStart) | ||||
1457 | return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); | ||||
1458 | |||||
1459 | return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, | ||||
1460 | Depth), | ||||
1461 | (SE->*GetExtendExpr)(PreStart, Ty, Depth)); | ||||
1462 | } | ||||
1463 | |||||
1464 | // Try to prove away overflow by looking at "nearby" add recurrences. A | ||||
1465 | // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it | ||||
1466 | // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. | ||||
1467 | // | ||||
1468 | // Formally: | ||||
1469 | // | ||||
1470 | // {S,+,X} == {S-T,+,X} + T | ||||
1471 | // => Ext({S,+,X}) == Ext({S-T,+,X} + T) | ||||
1472 | // | ||||
1473 | // If ({S-T,+,X} + T) does not overflow ... (1) | ||||
1474 | // | ||||
1475 | // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) | ||||
1476 | // | ||||
1477 | // If {S-T,+,X} does not overflow ... (2) | ||||
1478 | // | ||||
1479 | // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) | ||||
1480 | // == {Ext(S-T)+Ext(T),+,Ext(X)} | ||||
1481 | // | ||||
1482 | // If (S-T)+T does not overflow ... (3) | ||||
1483 | // | ||||
1484 | // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} | ||||
1485 | // == {Ext(S),+,Ext(X)} == LHS | ||||
1486 | // | ||||
1487 | // Thus, if (1), (2) and (3) are true for some T, then | ||||
1488 | // Ext({S,+,X}) == {Ext(S),+,Ext(X)} | ||||
1489 | // | ||||
1490 | // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) | ||||
1491 | // does not overflow" restricted to the 0th iteration. Therefore we only need | ||||
1492 | // to check for (1) and (2). | ||||
1493 | // | ||||
1494 | // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T | ||||
1495 | // is `Delta` (defined below). | ||||
1496 | template <typename ExtendOpTy> | ||||
1497 | bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, | ||||
1498 | const SCEV *Step, | ||||
1499 | const Loop *L) { | ||||
1500 | auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; | ||||
1501 | |||||
1502 | // We restrict `Start` to a constant to prevent SCEV from spending too much | ||||
1503 | // time here. It is correct (but more expensive) to continue with a | ||||
1504 | // non-constant `Start` and do a general SCEV subtraction to compute | ||||
1505 | // `PreStart` below. | ||||
1506 | const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); | ||||
1507 | if (!StartC) | ||||
1508 | return false; | ||||
1509 | |||||
1510 | APInt StartAI = StartC->getAPInt(); | ||||
1511 | |||||
1512 | for (unsigned Delta : {-2, -1, 1, 2}) { | ||||
1513 | const SCEV *PreStart = getConstant(StartAI - Delta); | ||||
1514 | |||||
1515 | FoldingSetNodeID ID; | ||||
1516 | ID.AddInteger(scAddRecExpr); | ||||
1517 | ID.AddPointer(PreStart); | ||||
1518 | ID.AddPointer(Step); | ||||
1519 | ID.AddPointer(L); | ||||
1520 | void *IP = nullptr; | ||||
1521 | const auto *PreAR = | ||||
1522 | static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); | ||||
1523 | |||||
1524 | // Give up if we don't already have the add recurrence we need because | ||||
1525 | // actually constructing an add recurrence is relatively expensive. | ||||
1526 | if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) | ||||
1527 | const SCEV *DeltaS = getConstant(StartC->getType(), Delta); | ||||
1528 | ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; | ||||
1529 | const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( | ||||
1530 | DeltaS, &Pred, this); | ||||
1531 | if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) | ||||
1532 | return true; | ||||
1533 | } | ||||
1534 | } | ||||
1535 | |||||
1536 | return false; | ||||
1537 | } | ||||
1538 | |||||
1539 | // Finds an integer D for an expression (C + x + y + ...) such that the top | ||||
1540 | // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or | ||||
1541 | // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is | ||||
1542 | // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and | ||||
1543 | // the (C + x + y + ...) expression is \p WholeAddExpr. | ||||
1544 | static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, | ||||
1545 | const SCEVConstant *ConstantTerm, | ||||
1546 | const SCEVAddExpr *WholeAddExpr) { | ||||
1547 | const APInt &C = ConstantTerm->getAPInt(); | ||||
1548 | const unsigned BitWidth = C.getBitWidth(); | ||||
1549 | // Find number of trailing zeros of (x + y + ...) w/o the C first: | ||||
1550 | uint32_t TZ = BitWidth; | ||||
1551 | for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) | ||||
1552 | TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); | ||||
1553 | if (TZ) { | ||||
1554 | // Set D to be as many least significant bits of C as possible while still | ||||
1555 | // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: | ||||
1556 | return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; | ||||
1557 | } | ||||
1558 | return APInt(BitWidth, 0); | ||||
1559 | } | ||||
1560 | |||||
1561 | // Finds an integer D for an affine AddRec expression {C,+,x} such that the top | ||||
1562 | // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the | ||||
1563 | // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p | ||||
1564 | // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. | ||||
1565 | static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, | ||||
1566 | const APInt &ConstantStart, | ||||
1567 | const SCEV *Step) { | ||||
1568 | const unsigned BitWidth = ConstantStart.getBitWidth(); | ||||
1569 | const uint32_t TZ = SE.GetMinTrailingZeros(Step); | ||||
1570 | if (TZ) | ||||
1571 | return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) | ||||
1572 | : ConstantStart; | ||||
1573 | return APInt(BitWidth, 0); | ||||
1574 | } | ||||
1575 | |||||
1576 | const SCEV * | ||||
1577 | ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { | ||||
1578 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&((void)0) | ||||
1579 | "This is not an extending conversion!")((void)0); | ||||
1580 | assert(isSCEVable(Ty) &&((void)0) | ||||
1581 | "This is not a conversion to a SCEVable type!")((void)0); | ||||
1582 | assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")((void)0); | ||||
1583 | Ty = getEffectiveSCEVType(Ty); | ||||
1584 | |||||
1585 | // Fold if the operand is constant. | ||||
1586 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) | ||||
1587 | return getConstant( | ||||
1588 | cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); | ||||
1589 | |||||
1590 | // zext(zext(x)) --> zext(x) | ||||
1591 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) | ||||
1592 | return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); | ||||
1593 | |||||
1594 | // Before doing any expensive analysis, check to see if we've already | ||||
1595 | // computed a SCEV for this Op and Ty. | ||||
1596 | FoldingSetNodeID ID; | ||||
1597 | ID.AddInteger(scZeroExtend); | ||||
1598 | ID.AddPointer(Op); | ||||
1599 | ID.AddPointer(Ty); | ||||
1600 | void *IP = nullptr; | ||||
1601 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | ||||
1602 | if (Depth > MaxCastDepth) { | ||||
1603 | SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), | ||||
1604 | Op, Ty); | ||||
1605 | UniqueSCEVs.InsertNode(S, IP); | ||||
1606 | addToLoopUseLists(S); | ||||
1607 | return S; | ||||
1608 | } | ||||
1609 | |||||
1610 | // zext(trunc(x)) --> zext(x) or x or trunc(x) | ||||
1611 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { | ||||
1612 | // It's possible the bits taken off by the truncate were all zero bits. If | ||||
1613 | // so, we should be able to simplify this further. | ||||
1614 | const SCEV *X = ST->getOperand(); | ||||
1615 | ConstantRange CR = getUnsignedRange(X); | ||||
1616 | unsigned TruncBits = getTypeSizeInBits(ST->getType()); | ||||
1617 | unsigned NewBits = getTypeSizeInBits(Ty); | ||||
1618 | if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( | ||||
1619 | CR.zextOrTrunc(NewBits))) | ||||
1620 | return getTruncateOrZeroExtend(X, Ty, Depth); | ||||
1621 | } | ||||
1622 | |||||
1623 | // If the input value is a chrec scev, and we can prove that the value | ||||
1624 | // did not overflow the old, smaller, value, we can zero extend all of the | ||||
1625 | // operands (often constants). This allows analysis of something like | ||||
1626 | // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } | ||||
1627 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) | ||||
1628 | if (AR->isAffine()) { | ||||
1629 | const SCEV *Start = AR->getStart(); | ||||
1630 | const SCEV *Step = AR->getStepRecurrence(*this); | ||||
1631 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); | ||||
1632 | const Loop *L = AR->getLoop(); | ||||
1633 | |||||
1634 | if (!AR->hasNoUnsignedWrap()) { | ||||
1635 | auto NewFlags = proveNoWrapViaConstantRanges(AR); | ||||
1636 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); | ||||
1637 | } | ||||
1638 | |||||
1639 | // If we have special knowledge that this addrec won't overflow, | ||||
1640 | // we don't need to do any further analysis. | ||||
1641 | if (AR->hasNoUnsignedWrap()) | ||||
1642 | return getAddRecExpr( | ||||
1643 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), | ||||
1644 | getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); | ||||
1645 | |||||
1646 | // Check whether the backedge-taken count is SCEVCouldNotCompute. | ||||
1647 | // Note that this serves two purposes: It filters out loops that are | ||||
1648 | // simply not analyzable, and it covers the case where this code is | ||||
1649 | // being called from within backedge-taken count analysis, such that | ||||
1650 | // attempting to ask for the backedge-taken count would likely result | ||||
1651 | // in infinite recursion. In the later case, the analysis code will | ||||
1652 | // cope with a conservative value, and it will take care to purge | ||||
1653 | // that value once it has finished. | ||||
1654 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); | ||||
1655 | if (!isa<SCEVCouldNotCompute>(MaxBECount)) { | ||||
1656 | // Manually compute the final value for AR, checking for overflow. | ||||
1657 | |||||
1658 | // Check whether the backedge-taken count can be losslessly casted to | ||||
1659 | // the addrec's type. The count is always unsigned. | ||||
1660 | const SCEV *CastedMaxBECount = | ||||
1661 | getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); | ||||
1662 | const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( | ||||
1663 | CastedMaxBECount, MaxBECount->getType(), Depth); | ||||
1664 | if (MaxBECount == RecastedMaxBECount) { | ||||
1665 | Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); | ||||
1666 | // Check whether Start+Step*MaxBECount has no unsigned overflow. | ||||
1667 | const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, | ||||
1668 | SCEV::FlagAnyWrap, Depth + 1); | ||||
1669 | const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, | ||||
1670 | SCEV::FlagAnyWrap, | ||||
1671 | Depth + 1), | ||||
1672 | WideTy, Depth + 1); | ||||
1673 | const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); | ||||
1674 | const SCEV *WideMaxBECount = | ||||
1675 | getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); | ||||
1676 | const SCEV *OperandExtendedAdd = | ||||
1677 | getAddExpr(WideStart, | ||||
1678 | getMulExpr(WideMaxBECount, | ||||
1679 | getZeroExtendExpr(Step, WideTy, Depth + 1), | ||||
1680 | SCEV::FlagAnyWrap, Depth + 1), | ||||
1681 | SCEV::FlagAnyWrap, Depth + 1); | ||||
1682 | if (ZAdd == OperandExtendedAdd) { | ||||
1683 | // Cache knowledge of AR NUW, which is propagated to this AddRec. | ||||
1684 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); | ||||
1685 | // Return the expression with the addrec on the outside. | ||||
1686 | return getAddRecExpr( | ||||
1687 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, | ||||
1688 | Depth + 1), | ||||
1689 | getZeroExtendExpr(Step, Ty, Depth + 1), L, | ||||
1690 | AR->getNoWrapFlags()); | ||||
1691 | } | ||||
1692 | // Similar to above, only this time treat the step value as signed. | ||||
1693 | // This covers loops that count down. | ||||
1694 | OperandExtendedAdd = | ||||
1695 | getAddExpr(WideStart, | ||||
1696 | getMulExpr(WideMaxBECount, | ||||
1697 | getSignExtendExpr(Step, WideTy, Depth + 1), | ||||
1698 | SCEV::FlagAnyWrap, Depth + 1), | ||||
1699 | SCEV::FlagAnyWrap, Depth + 1); | ||||
1700 | if (ZAdd == OperandExtendedAdd) { | ||||
1701 | // Cache knowledge of AR NW, which is propagated to this AddRec. | ||||
1702 | // Negative step causes unsigned wrap, but it still can't self-wrap. | ||||
1703 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); | ||||
1704 | // Return the expression with the addrec on the outside. | ||||
1705 | return getAddRecExpr( | ||||
1706 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, | ||||
1707 | Depth + 1), | ||||
1708 | getSignExtendExpr(Step, Ty, Depth + 1), L, | ||||
1709 | AR->getNoWrapFlags()); | ||||
1710 | } | ||||
1711 | } | ||||
1712 | } | ||||
1713 | |||||
1714 | // Normally, in the cases we can prove no-overflow via a | ||||
1715 | // backedge guarding condition, we can also compute a backedge | ||||
1716 | // taken count for the loop. The exceptions are assumptions and | ||||
1717 | // guards present in the loop -- SCEV is not great at exploiting | ||||
1718 | // these to compute max backedge taken counts, but can still use | ||||
1719 | // these to prove lack of overflow. Use this fact to avoid | ||||
1720 | // doing extra work that may not pay off. | ||||
1721 | if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || | ||||
1722 | !AC.assumptions().empty()) { | ||||
1723 | |||||
1724 | auto NewFlags = proveNoUnsignedWrapViaInduction(AR); | ||||
1725 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); | ||||
1726 | if (AR->hasNoUnsignedWrap()) { | ||||
1727 | // Same as nuw case above - duplicated here to avoid a compile time | ||||
1728 | // issue. It's not clear that the order of checks does matter, but | ||||
1729 | // it's one of two issue possible causes for a change which was | ||||
1730 | // reverted. Be conservative for the moment. | ||||
1731 | return getAddRecExpr( | ||||
1732 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, | ||||
1733 | Depth + 1), | ||||
1734 | getZeroExtendExpr(Step, Ty, Depth + 1), L, | ||||
1735 | AR->getNoWrapFlags()); | ||||
1736 | } | ||||
1737 | |||||
1738 | // For a negative step, we can extend the operands iff doing so only | ||||
1739 | // traverses values in the range zext([0,UINT_MAX]). | ||||
1740 | if (isKnownNegative(Step)) { | ||||
1741 | const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - | ||||
1742 | getSignedRangeMin(Step)); | ||||
1743 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || | ||||
1744 | isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { | ||||
1745 | // Cache knowledge of AR NW, which is propagated to this | ||||
1746 | // AddRec. Negative step causes unsigned wrap, but it | ||||
1747 | // still can't self-wrap. | ||||
1748 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); | ||||
1749 | // Return the expression with the addrec on the outside. | ||||
1750 | return getAddRecExpr( | ||||
1751 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, | ||||
1752 | Depth + 1), | ||||
1753 | getSignExtendExpr(Step, Ty, Depth + 1), L, | ||||
1754 | AR->getNoWrapFlags()); | ||||
1755 | } | ||||
1756 | } | ||||
1757 | } | ||||
1758 | |||||
1759 | // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> | ||||
1760 | // if D + (C - D + Step * n) could be proven to not unsigned wrap | ||||
1761 | // where D maximizes the number of trailing zeros of (C - D + Step * n) | ||||
1762 | if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { | ||||
1763 | const APInt &C = SC->getAPInt(); | ||||
1764 | const APInt &D = extractConstantWithoutWrapping(*this, C, Step); | ||||
1765 | if (D != 0) { | ||||
1766 | const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); | ||||
1767 | const SCEV *SResidual = | ||||
1768 | getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); | ||||
1769 | const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); | ||||
1770 | return getAddExpr(SZExtD, SZExtR, | ||||
1771 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), | ||||
1772 | Depth + 1); | ||||
1773 | } | ||||
1774 | } | ||||
1775 | |||||
1776 | if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { | ||||
1777 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); | ||||
1778 | return getAddRecExpr( | ||||
1779 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), | ||||
1780 | getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); | ||||
1781 | } | ||||
1782 | } | ||||
1783 | |||||
1784 | // zext(A % B) --> zext(A) % zext(B) | ||||
1785 | { | ||||
1786 | const SCEV *LHS; | ||||
1787 | const SCEV *RHS; | ||||
1788 | if (matchURem(Op, LHS, RHS)) | ||||
1789 | return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), | ||||
1790 | getZeroExtendExpr(RHS, Ty, Depth + 1)); | ||||
1791 | } | ||||
1792 | |||||
1793 | // zext(A / B) --> zext(A) / zext(B). | ||||
1794 | if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) | ||||
1795 | return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), | ||||
1796 | getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); | ||||
1797 | |||||
1798 | if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { | ||||
1799 | // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> | ||||
1800 | if (SA->hasNoUnsignedWrap()) { | ||||
1801 | // If the addition does not unsign overflow then we can, by definition, | ||||
1802 | // commute the zero extension with the addition operation. | ||||
1803 | SmallVector<const SCEV *, 4> Ops; | ||||
1804 | for (const auto *Op : SA->operands()) | ||||
1805 | Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); | ||||
1806 | return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); | ||||
1807 | } | ||||
1808 | |||||
1809 | // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) | ||||
1810 | // if D + (C - D + x + y + ...) could be proven to not unsigned wrap | ||||
1811 | // where D maximizes the number of trailing zeros of (C - D + x + y + ...) | ||||
1812 | // | ||||
1813 | // Often address arithmetics contain expressions like | ||||
1814 | // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). | ||||
1815 | // This transformation is useful while proving that such expressions are | ||||
1816 | // equal or differ by a small constant amount, see LoadStoreVectorizer pass. | ||||
1817 | if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { | ||||
1818 | const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); | ||||
1819 | if (D != 0) { | ||||
1820 | const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); | ||||
1821 | const SCEV *SResidual = | ||||
1822 | getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); | ||||
1823 | const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); | ||||
1824 | return getAddExpr(SZExtD, SZExtR, | ||||
1825 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), | ||||
1826 | Depth + 1); | ||||
1827 | } | ||||
1828 | } | ||||
1829 | } | ||||
1830 | |||||
1831 | if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { | ||||
1832 | // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> | ||||
1833 | if (SM->hasNoUnsignedWrap()) { | ||||
1834 | // If the multiply does not unsign overflow then we can, by definition, | ||||
1835 | // commute the zero extension with the multiply operation. | ||||
1836 | SmallVector<const SCEV *, 4> Ops; | ||||
1837 | for (const auto *Op : SM->operands()) | ||||
1838 | Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); | ||||
1839 | return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); | ||||
1840 | } | ||||
1841 | |||||
1842 | // zext(2^K * (trunc X to iN)) to iM -> | ||||
1843 | // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> | ||||
1844 | // | ||||
1845 | // Proof: | ||||
1846 | // | ||||
1847 | // zext(2^K * (trunc X to iN)) to iM | ||||
1848 | // = zext((trunc X to iN) << K) to iM | ||||
1849 | // = zext((trunc X to i{N-K}) << K)<nuw> to iM | ||||
1850 | // (because shl removes the top K bits) | ||||
1851 | // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM | ||||
1852 | // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. | ||||
1853 | // | ||||
1854 | if (SM->getNumOperands() == 2) | ||||
1855 | if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) | ||||
1856 | if (MulLHS->getAPInt().isPowerOf2()) | ||||
1857 | if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { | ||||
1858 | int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - | ||||
1859 | MulLHS->getAPInt().logBase2(); | ||||
1860 | Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); | ||||
1861 | return getMulExpr( | ||||
1862 | getZeroExtendExpr(MulLHS, Ty), | ||||
1863 | getZeroExtendExpr( | ||||
1864 | getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), | ||||
1865 | SCEV::FlagNUW, Depth + 1); | ||||
1866 | } | ||||
1867 | } | ||||
1868 | |||||
1869 | // The cast wasn't folded; create an explicit cast node. | ||||
1870 | // Recompute the insert position, as it may have been invalidated. | ||||
1871 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | ||||
1872 | SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), | ||||
1873 | Op, Ty); | ||||
1874 | UniqueSCEVs.InsertNode(S, IP); | ||||
1875 | addToLoopUseLists(S); | ||||
1876 | return S; | ||||
1877 | } | ||||
1878 | |||||
1879 | const SCEV * | ||||
1880 | ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { | ||||
1881 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&((void)0) | ||||
1882 | "This is not an extending conversion!")((void)0); | ||||
1883 | assert(isSCEVable(Ty) &&((void)0) | ||||
1884 | "This is not a conversion to a SCEVable type!")((void)0); | ||||
1885 | assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")((void)0); | ||||
1886 | Ty = getEffectiveSCEVType(Ty); | ||||
1887 | |||||
1888 | // Fold if the operand is constant. | ||||
1889 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) | ||||
1890 | return getConstant( | ||||
1891 | cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); | ||||
1892 | |||||
1893 | // sext(sext(x)) --> sext(x) | ||||
1894 | if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) | ||||
1895 | return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); | ||||
1896 | |||||
1897 | // sext(zext(x)) --> zext(x) | ||||
1898 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) | ||||
1899 | return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); | ||||
1900 | |||||
1901 | // Before doing any expensive analysis, check to see if we've already | ||||
1902 | // computed a SCEV for this Op and Ty. | ||||
1903 | FoldingSetNodeID ID; | ||||
1904 | ID.AddInteger(scSignExtend); | ||||
1905 | ID.AddPointer(Op); | ||||
1906 | ID.AddPointer(Ty); | ||||
1907 | void *IP = nullptr; | ||||
1908 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | ||||
1909 | // Limit recursion depth. | ||||
1910 | if (Depth > MaxCastDepth) { | ||||
1911 | SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), | ||||
1912 | Op, Ty); | ||||
1913 | UniqueSCEVs.InsertNode(S, IP); | ||||
1914 | addToLoopUseLists(S); | ||||
1915 | return S; | ||||
1916 | } | ||||
1917 | |||||
1918 | // sext(trunc(x)) --> sext(x) or x or trunc(x) | ||||
1919 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { | ||||
1920 | // It's possible the bits taken off by the truncate were all sign bits. If | ||||
1921 | // so, we should be able to simplify this further. | ||||
1922 | const SCEV *X = ST->getOperand(); | ||||
1923 | ConstantRange CR = getSignedRange(X); | ||||
1924 | unsigned TruncBits = getTypeSizeInBits(ST->getType()); | ||||
1925 | unsigned NewBits = getTypeSizeInBits(Ty); | ||||
1926 | if (CR.truncate(TruncBits).signExtend(NewBits).contains( | ||||
1927 | CR.sextOrTrunc(NewBits))) | ||||
1928 | return getTruncateOrSignExtend(X, Ty, Depth); | ||||
1929 | } | ||||
1930 | |||||
1931 | if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { | ||||
1932 | // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> | ||||
1933 | if (SA->hasNoSignedWrap()) { | ||||
1934 | // If the addition does not sign overflow then we can, by definition, | ||||
1935 | // commute the sign extension with the addition operation. | ||||
1936 | SmallVector<const SCEV *, 4> Ops; | ||||
1937 | for (const auto *Op : SA->operands()) | ||||
1938 | Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); | ||||
1939 | return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); | ||||
1940 | } | ||||
1941 | |||||
1942 | // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) | ||||
1943 | // if D + (C - D + x + y + ...) could be proven to not signed wrap | ||||
1944 | // where D maximizes the number of trailing zeros of (C - D + x + y + ...) | ||||
1945 | // | ||||
1946 | // For instance, this will bring two seemingly different expressions: | ||||
1947 | // 1 + sext(5 + 20 * %x + 24 * %y) and | ||||
1948 | // sext(6 + 20 * %x + 24 * %y) | ||||
1949 | // to the same form: | ||||
1950 | // 2 + sext(4 + 20 * %x + 24 * %y) | ||||
1951 | if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { | ||||
1952 | const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); | ||||
1953 | if (D != 0) { | ||||
1954 | const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); | ||||
1955 | const SCEV *SResidual = | ||||
1956 | getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); | ||||
1957 | const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); | ||||
1958 | return getAddExpr(SSExtD, SSExtR, | ||||
1959 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), | ||||
1960 | Depth + 1); | ||||
1961 | } | ||||
1962 | } | ||||
1963 | } | ||||
1964 | // If the input value is a chrec scev, and we can prove that the value | ||||
1965 | // did not overflow the old, smaller, value, we can sign extend all of the | ||||
1966 | // operands (often constants). This allows analysis of something like | ||||
1967 | // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } | ||||
1968 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) | ||||
1969 | if (AR->isAffine()) { | ||||
1970 | const SCEV *Start = AR->getStart(); | ||||
1971 | const SCEV *Step = AR->getStepRecurrence(*this); | ||||
1972 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); | ||||
1973 | const Loop *L = AR->getLoop(); | ||||
1974 | |||||
1975 | if (!AR->hasNoSignedWrap()) { | ||||
1976 | auto NewFlags = proveNoWrapViaConstantRanges(AR); | ||||
1977 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); | ||||
1978 | } | ||||
1979 | |||||
1980 | // If we have special knowledge that this addrec won't overflow, | ||||
1981 | // we don't need to do any further analysis. | ||||
1982 | if (AR->hasNoSignedWrap()) | ||||
1983 | return getAddRecExpr( | ||||
1984 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), | ||||
1985 | getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); | ||||
1986 | |||||
1987 | // Check whether the backedge-taken count is SCEVCouldNotCompute. | ||||
1988 | // Note that this serves two purposes: It filters out loops that are | ||||
1989 | // simply not analyzable, and it covers the case where this code is | ||||
1990 | // being called from within backedge-taken count analysis, such that | ||||
1991 | // attempting to ask for the backedge-taken count would likely result | ||||
1992 | // in infinite recursion. In the later case, the analysis code will | ||||
1993 | // cope with a conservative value, and it will take care to purge | ||||
1994 | // that value once it has finished. | ||||
1995 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); | ||||
1996 | if (!isa<SCEVCouldNotCompute>(MaxBECount)) { | ||||
1997 | // Manually compute the final value for AR, checking for | ||||
1998 | // overflow. | ||||
1999 | |||||
2000 | // Check whether the backedge-taken count can be losslessly casted to | ||||
2001 | // the addrec's type. The count is always unsigned. | ||||
2002 | const SCEV *CastedMaxBECount = | ||||
2003 | getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); | ||||
2004 | const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( | ||||
2005 | CastedMaxBECount, MaxBECount->getType(), Depth); | ||||
2006 | if (MaxBECount == RecastedMaxBECount) { | ||||
2007 | Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); | ||||
2008 | // Check whether Start+Step*MaxBECount has no signed overflow. | ||||
2009 | const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, | ||||
2010 | SCEV::FlagAnyWrap, Depth + 1); | ||||
2011 | const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, | ||||
2012 | SCEV::FlagAnyWrap, | ||||
2013 | Depth + 1), | ||||
2014 | WideTy, Depth + 1); | ||||
2015 | const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); | ||||
2016 | const SCEV *WideMaxBECount = | ||||
2017 | getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); | ||||
2018 | const SCEV *OperandExtendedAdd = | ||||
2019 | getAddExpr(WideStart, | ||||
2020 | getMulExpr(WideMaxBECount, | ||||
2021 | getSignExtendExpr(Step, WideTy, Depth + 1), | ||||
2022 | SCEV::FlagAnyWrap, Depth + 1), | ||||
2023 | SCEV::FlagAnyWrap, Depth + 1); | ||||
2024 | if (SAdd == OperandExtendedAdd) { | ||||
2025 | // Cache knowledge of AR NSW, which is propagated to this AddRec. | ||||
2026 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); | ||||
2027 | // Return the expression with the addrec on the outside. | ||||
2028 | return getAddRecExpr( | ||||
2029 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, | ||||
2030 | Depth + 1), | ||||
2031 | getSignExtendExpr(Step, Ty, Depth + 1), L, | ||||
2032 | AR->getNoWrapFlags()); | ||||
2033 | } | ||||
2034 | // Similar to above, only this time treat the step value as unsigned. | ||||
2035 | // This covers loops that count up with an unsigned step. | ||||
2036 | OperandExtendedAdd = | ||||
2037 | getAddExpr(WideStart, | ||||
2038 | getMulExpr(WideMaxBECount, | ||||
2039 | getZeroExtendExpr(Step, WideTy, Depth + 1), | ||||
2040 | SCEV::FlagAnyWrap, Depth + 1), | ||||
2041 | SCEV::FlagAnyWrap, Depth + 1); | ||||
2042 | if (SAdd == OperandExtendedAdd) { | ||||
2043 | // If AR wraps around then | ||||
2044 | // | ||||
2045 | // abs(Step) * MaxBECount > unsigned-max(AR->getType()) | ||||
2046 | // => SAdd != OperandExtendedAdd | ||||
2047 | // | ||||
2048 | // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> | ||||
2049 | // (SAdd == OperandExtendedAdd => AR is NW) | ||||
2050 | |||||
2051 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); | ||||
2052 | |||||
2053 | // Return the expression with the addrec on the outside. | ||||
2054 | return getAddRecExpr( | ||||
2055 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, | ||||
2056 | Depth + 1), | ||||
2057 | getZeroExtendExpr(Step, Ty, Depth + 1), L, | ||||
2058 | AR->getNoWrapFlags()); | ||||
2059 | } | ||||
2060 | } | ||||
2061 | } | ||||
2062 | |||||
2063 | auto NewFlags = proveNoSignedWrapViaInduction(AR); | ||||
2064 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); | ||||
2065 | if (AR->hasNoSignedWrap()) { | ||||
2066 | // Same as nsw case above - duplicated here to avoid a compile time | ||||
2067 | // issue. It's not clear that the order of checks does matter, but | ||||
2068 | // it's one of two issue possible causes for a change which was | ||||
2069 | // reverted. Be conservative for the moment. | ||||
2070 | return getAddRecExpr( | ||||
2071 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), | ||||
2072 | getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); | ||||
2073 | } | ||||
2074 | |||||
2075 | // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> | ||||
2076 | // if D + (C - D + Step * n) could be proven to not signed wrap | ||||
2077 | // where D maximizes the number of trailing zeros of (C - D + Step * n) | ||||
2078 | if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { | ||||
2079 | const APInt &C = SC->getAPInt(); | ||||
2080 | const APInt &D = extractConstantWithoutWrapping(*this, C, Step); | ||||
2081 | if (D != 0) { | ||||
2082 | const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); | ||||
2083 | const SCEV *SResidual = | ||||
2084 | getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); | ||||
2085 | const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); | ||||
2086 | return getAddExpr(SSExtD, SSExtR, | ||||
2087 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), | ||||
2088 | Depth + 1); | ||||
2089 | } | ||||
2090 | } | ||||
2091 | |||||
2092 | if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { | ||||
2093 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); | ||||
2094 | return getAddRecExpr( | ||||
2095 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), | ||||
2096 | getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); | ||||
2097 | } | ||||
2098 | } | ||||
2099 | |||||
2100 | // If the input value is provably positive and we could not simplify | ||||
2101 | // away the sext build a zext instead. | ||||
2102 | if (isKnownNonNegative(Op)) | ||||
2103 | return getZeroExtendExpr(Op, Ty, Depth + 1); | ||||
2104 | |||||
2105 | // The cast wasn't folded; create an explicit cast node. | ||||
2106 | // Recompute the insert position, as it may have been invalidated. | ||||
2107 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | ||||
2108 | SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), | ||||
2109 | Op, Ty); | ||||
2110 | UniqueSCEVs.InsertNode(S, IP); | ||||
2111 | addToLoopUseLists(S); | ||||
2112 | return S; | ||||
2113 | } | ||||
2114 | |||||
2115 | /// getAnyExtendExpr - Return a SCEV for the given operand extended with | ||||
2116 | /// unspecified bits out to the given type. | ||||
2117 | const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, | ||||
2118 | Type *Ty) { | ||||
2119 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&((void)0) | ||||
2120 | "This is not an extending conversion!")((void)0); | ||||
2121 | assert(isSCEVable(Ty) &&((void)0) | ||||
2122 | "This is not a conversion to a SCEVable type!")((void)0); | ||||
2123 | Ty = getEffectiveSCEVType(Ty); | ||||
2124 | |||||
2125 | // Sign-extend negative constants. | ||||
2126 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) | ||||
2127 | if (SC->getAPInt().isNegative()) | ||||
2128 | return getSignExtendExpr(Op, Ty); | ||||
2129 | |||||
2130 | // Peel off a truncate cast. | ||||
2131 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { | ||||
2132 | const SCEV *NewOp = T->getOperand(); | ||||
2133 | if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) | ||||
2134 | return getAnyExtendExpr(NewOp, Ty); | ||||
2135 | return getTruncateOrNoop(NewOp, Ty); | ||||
2136 | } | ||||
2137 | |||||
2138 | // Next try a zext cast. If the cast is folded, use it. | ||||
2139 | const SCEV *ZExt = getZeroExtendExpr(Op, Ty); | ||||
2140 | if (!isa<SCEVZeroExtendExpr>(ZExt)) | ||||
2141 | return ZExt; | ||||
2142 | |||||
2143 | // Next try a sext cast. If the cast is folded, use it. | ||||
2144 | const SCEV *SExt = getSignExtendExpr(Op, Ty); | ||||
2145 | if (!isa<SCEVSignExtendExpr>(SExt)) | ||||
2146 | return SExt; | ||||
2147 | |||||
2148 | // Force the cast to be folded into the operands of an addrec. | ||||
2149 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { | ||||
2150 | SmallVector<const SCEV *, 4> Ops; | ||||
2151 | for (const SCEV *Op : AR->operands()) | ||||
2152 | Ops.push_back(getAnyExtendExpr(Op, Ty)); | ||||
2153 | return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); | ||||
2154 | } | ||||
2155 | |||||
2156 | // If the expression is obviously signed, use the sext cast value. | ||||
2157 | if (isa<SCEVSMaxExpr>(Op)) | ||||
2158 | return SExt; | ||||
2159 | |||||
2160 | // Absent any other information, use the zext cast value. | ||||
2161 | return ZExt; | ||||
2162 | } | ||||
2163 | |||||
2164 | /// Process the given Ops list, which is a list of operands to be added under | ||||
2165 | /// the given scale, update the given map. This is a helper function for | ||||
2166 | /// getAddRecExpr. As an example of what it does, given a sequence of operands | ||||
2167 | /// that would form an add expression like this: | ||||
2168 | /// | ||||
2169 | /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) | ||||
2170 | /// | ||||
2171 | /// where A and B are constants, update the map with these values: | ||||
2172 | /// | ||||
2173 | /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) | ||||
2174 | /// | ||||
2175 | /// and add 13 + A*B*29 to AccumulatedConstant. | ||||
2176 | /// This will allow getAddRecExpr to produce this: | ||||
2177 | /// | ||||
2178 | /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) | ||||
2179 | /// | ||||
2180 | /// This form often exposes folding opportunities that are hidden in | ||||
2181 | /// the original operand list. | ||||
2182 | /// | ||||
2183 | /// Return true iff it appears that any interesting folding opportunities | ||||
2184 | /// may be exposed. This helps getAddRecExpr short-circuit extra work in | ||||
2185 | /// the common case where no interesting opportunities are present, and | ||||
2186 | /// is also used as a check to avoid infinite recursion. | ||||
2187 | static bool | ||||
2188 | CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, | ||||
2189 | SmallVectorImpl<const SCEV *> &NewOps, | ||||
2190 | APInt &AccumulatedConstant, | ||||
2191 | const SCEV *const *Ops, size_t NumOperands, | ||||
2192 | const APInt &Scale, | ||||
2193 | ScalarEvolution &SE) { | ||||
2194 | bool Interesting = false; | ||||
2195 | |||||
2196 | // Iterate over the add operands. They are sorted, with constants first. | ||||
2197 | unsigned i = 0; | ||||
2198 | while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { | ||||
2199 | ++i; | ||||
2200 | // Pull a buried constant out to the outside. | ||||
2201 | if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) | ||||
2202 | Interesting = true; | ||||
2203 | AccumulatedConstant += Scale * C->getAPInt(); | ||||
2204 | } | ||||
2205 | |||||
2206 | // Next comes everything else. We're especially interested in multiplies | ||||
2207 | // here, but they're in the middle, so just visit the rest with one loop. | ||||
2208 | for (; i != NumOperands; ++i) { | ||||
2209 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); | ||||
2210 | if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { | ||||
2211 | APInt NewScale = | ||||
2212 | Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); | ||||
2213 | if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { | ||||
2214 | // A multiplication of a constant with another add; recurse. | ||||
2215 | const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); | ||||
2216 | Interesting |= | ||||
2217 | CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, | ||||
2218 | Add->op_begin(), Add->getNumOperands(), | ||||
2219 | NewScale, SE); | ||||
2220 | } else { | ||||
2221 | // A multiplication of a constant with some other value. Update | ||||
2222 | // the map. | ||||
2223 | SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); | ||||
2224 | const SCEV *Key = SE.getMulExpr(MulOps); | ||||
2225 | auto Pair = M.insert({Key, NewScale}); | ||||
2226 | if (Pair.second) { | ||||
2227 | NewOps.push_back(Pair.first->first); | ||||
2228 | } else { | ||||
2229 | Pair.first->second += NewScale; | ||||
2230 | // The map already had an entry for this value, which may indicate | ||||
2231 | // a folding opportunity. | ||||
2232 | Interesting = true; | ||||
2233 | } | ||||
2234 | } | ||||
2235 | } else { | ||||
2236 | // An ordinary operand. Update the map. | ||||
2237 | std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = | ||||
2238 | M.insert({Ops[i], Scale}); | ||||
2239 | if (Pair.second) { | ||||
2240 | NewOps.push_back(Pair.first->first); | ||||
2241 | } else { | ||||
2242 | Pair.first->second += Scale; | ||||
2243 | // The map already had an entry for this value, which may indicate | ||||
2244 | // a folding opportunity. | ||||
2245 | Interesting = true; | ||||
2246 | } | ||||
2247 | } | ||||
2248 | } | ||||
2249 | |||||
2250 | return Interesting; | ||||
2251 | } | ||||
2252 | |||||
2253 | bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, | ||||
2254 | const SCEV *LHS, const SCEV *RHS) { | ||||
2255 | const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, | ||||
2256 | SCEV::NoWrapFlags, unsigned); | ||||
2257 | switch (BinOp) { | ||||
2258 | default: | ||||
2259 | llvm_unreachable("Unsupported binary op")__builtin_unreachable(); | ||||
2260 | case Instruction::Add: | ||||
2261 | Operation = &ScalarEvolution::getAddExpr; | ||||
2262 | break; | ||||
2263 | case Instruction::Sub: | ||||
2264 | Operation = &ScalarEvolution::getMinusSCEV; | ||||
2265 | break; | ||||
2266 | case Instruction::Mul: | ||||
2267 | Operation = &ScalarEvolution::getMulExpr; | ||||
2268 | break; | ||||
2269 | } | ||||
2270 | |||||
2271 | const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = | ||||
2272 | Signed ? &ScalarEvolution::getSignExtendExpr | ||||
2273 | : &ScalarEvolution::getZeroExtendExpr; | ||||
2274 | |||||
2275 | // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) | ||||
2276 | auto *NarrowTy = cast<IntegerType>(LHS->getType()); | ||||
2277 | auto *WideTy = | ||||
2278 | IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); | ||||
2279 | |||||
2280 | const SCEV *A = (this->*Extension)( | ||||
2281 | (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); | ||||
2282 | const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0), | ||||
2283 | (this->*Extension)(RHS, WideTy, 0), | ||||
2284 | SCEV::FlagAnyWrap, 0); | ||||
2285 | return A == B; | ||||
2286 | } | ||||
2287 | |||||
2288 | std::pair<SCEV::NoWrapFlags, bool /*Deduced*/> | ||||
2289 | ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( | ||||
2290 | const OverflowingBinaryOperator *OBO) { | ||||
2291 | SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; | ||||
2292 | |||||
2293 | if (OBO->hasNoUnsignedWrap()) | ||||
2294 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); | ||||
2295 | if (OBO->hasNoSignedWrap()) | ||||
2296 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); | ||||
2297 | |||||
2298 | bool Deduced = false; | ||||
2299 | |||||
2300 | if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) | ||||
2301 | return {Flags, Deduced}; | ||||
2302 | |||||
2303 | if (OBO->getOpcode() != Instruction::Add && | ||||
2304 | OBO->getOpcode() != Instruction::Sub && | ||||
2305 | OBO->getOpcode() != Instruction::Mul) | ||||
2306 | return {Flags, Deduced}; | ||||
2307 | |||||
2308 | const SCEV *LHS = getSCEV(OBO->getOperand(0)); | ||||
2309 | const SCEV *RHS = getSCEV(OBO->getOperand(1)); | ||||
2310 | |||||
2311 | if (!OBO->hasNoUnsignedWrap() && | ||||
2312 | willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), | ||||
2313 | /* Signed */ false, LHS, RHS)) { | ||||
2314 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); | ||||
2315 | Deduced = true; | ||||
2316 | } | ||||
2317 | |||||
2318 | if (!OBO->hasNoSignedWrap() && | ||||
2319 | willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), | ||||
2320 | /* Signed */ true, LHS, RHS)) { | ||||
2321 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); | ||||
2322 | Deduced = true; | ||||
2323 | } | ||||
2324 | |||||
2325 | return {Flags, Deduced}; | ||||
2326 | } | ||||
2327 | |||||
2328 | // We're trying to construct a SCEV of type `Type' with `Ops' as operands and | ||||
2329 | // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of | ||||
2330 | // can't-overflow flags for the operation if possible. | ||||
2331 | static SCEV::NoWrapFlags | ||||
2332 | StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, | ||||
2333 | const ArrayRef<const SCEV *> Ops, | ||||
2334 | SCEV::NoWrapFlags Flags) { | ||||
2335 | using namespace std::placeholders; | ||||
2336 | |||||
2337 | using OBO = OverflowingBinaryOperator; | ||||
2338 | |||||
2339 | bool CanAnalyze = | ||||
2340 | Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; | ||||
2341 | (void)CanAnalyze; | ||||
2342 | assert(CanAnalyze && "don't call from other places!")((void)0); | ||||
2343 | |||||
2344 | int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; | ||||
2345 | SCEV::NoWrapFlags SignOrUnsignWrap = | ||||
2346 | ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); | ||||
2347 | |||||
2348 | // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. | ||||
2349 | auto IsKnownNonNegative = [&](const SCEV *S) { | ||||
2350 | return SE->isKnownNonNegative(S); | ||||
2351 | }; | ||||
2352 | |||||
2353 | if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) | ||||
2354 | Flags = | ||||
2355 | ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); | ||||
2356 | |||||
2357 | SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); | ||||
2358 | |||||
2359 | if (SignOrUnsignWrap != SignOrUnsignMask && | ||||
2360 | (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && | ||||
2361 | isa<SCEVConstant>(Ops[0])) { | ||||
2362 | |||||
2363 | auto Opcode = [&] { | ||||
2364 | switch (Type) { | ||||
2365 | case scAddExpr: | ||||
2366 | return Instruction::Add; | ||||
2367 | case scMulExpr: | ||||
2368 | return Instruction::Mul; | ||||
2369 | default: | ||||
2370 | llvm_unreachable("Unexpected SCEV op.")__builtin_unreachable(); | ||||
2371 | } | ||||
2372 | }(); | ||||
2373 | |||||
2374 | const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); | ||||
2375 | |||||
2376 | // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. | ||||
2377 | if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { | ||||
2378 | auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( | ||||
2379 | Opcode, C, OBO::NoSignedWrap); | ||||
2380 | if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) | ||||
2381 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); | ||||
2382 | } | ||||
2383 | |||||
2384 | // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. | ||||
2385 | if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { | ||||
2386 | auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( | ||||
2387 | Opcode, C, OBO::NoUnsignedWrap); | ||||
2388 | if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) | ||||
2389 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); | ||||
2390 | } | ||||
2391 | } | ||||
2392 | |||||
2393 | return Flags; | ||||
2394 | } | ||||
2395 | |||||
2396 | bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { | ||||
2397 | return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); | ||||
2398 | } | ||||
2399 | |||||
2400 | /// Get a canonical add expression, or something simpler if possible. | ||||
2401 | const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, | ||||
2402 | SCEV::NoWrapFlags OrigFlags, | ||||
2403 | unsigned Depth) { | ||||
2404 | assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&((void)0) | ||||
2405 | "only nuw or nsw allowed")((void)0); | ||||
2406 | assert(!Ops.empty() && "Cannot get empty add!")((void)0); | ||||
2407 | if (Ops.size() == 1) return Ops[0]; | ||||
2408 | #ifndef NDEBUG1 | ||||
2409 | Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); | ||||
2410 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) | ||||
2411 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&((void)0) | ||||
2412 | "SCEVAddExpr operand types don't match!")((void)0); | ||||
2413 | unsigned NumPtrs = count_if( | ||||
2414 | Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); }); | ||||
2415 | assert(NumPtrs <= 1 && "add has at most one pointer operand")((void)0); | ||||
2416 | #endif | ||||
2417 | |||||
2418 | // Sort by complexity, this groups all similar expression types together. | ||||
2419 | GroupByComplexity(Ops, &LI, DT); | ||||
2420 | |||||
2421 | // If there are any constants, fold them together. | ||||
2422 | unsigned Idx = 0; | ||||
2423 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { | ||||
2424 | ++Idx; | ||||
2425 | assert(Idx < Ops.size())((void)0); | ||||
2426 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { | ||||
2427 | // We found two constants, fold them together! | ||||
2428 | Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); | ||||
2429 | if (Ops.size() == 2) return Ops[0]; | ||||
2430 | Ops.erase(Ops.begin()+1); // Erase the folded element | ||||
2431 | LHSC = cast<SCEVConstant>(Ops[0]); | ||||
2432 | } | ||||
2433 | |||||
2434 | // If we are left with a constant zero being added, strip it off. | ||||
2435 | if (LHSC->getValue()->isZero()) { | ||||
2436 | Ops.erase(Ops.begin()); | ||||
2437 | --Idx; | ||||
2438 | } | ||||
2439 | |||||
2440 | if (Ops.size() == 1) return Ops[0]; | ||||
2441 | } | ||||
2442 | |||||
2443 | // Delay expensive flag strengthening until necessary. | ||||
2444 | auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { | ||||
2445 | return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); | ||||
2446 | }; | ||||
2447 | |||||
2448 | // Limit recursion calls depth. | ||||
2449 | if (Depth > MaxArithDepth || hasHugeExpression(Ops)) | ||||
2450 | return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); | ||||
2451 | |||||
2452 | if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { | ||||
2453 | // Don't strengthen flags if we have no new information. | ||||
2454 | SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); | ||||
2455 | if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) | ||||
2456 | Add->setNoWrapFlags(ComputeFlags(Ops)); | ||||
2457 | return S; | ||||
2458 | } | ||||
2459 | |||||
2460 | // Okay, check to see if the same value occurs in the operand list more than | ||||
2461 | // once. If so, merge them together into an multiply expression. Since we | ||||
2462 | // sorted the list, these values are required to be adjacent. | ||||
2463 | Type *Ty = Ops[0]->getType(); | ||||
2464 | bool FoundMatch = false; | ||||
2465 | for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) | ||||
2466 | if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 | ||||
2467 | // Scan ahead to count how many equal operands there are. | ||||
2468 | unsigned Count = 2; | ||||
2469 | while (i+Count != e && Ops[i+Count] == Ops[i]) | ||||
2470 | ++Count; | ||||
2471 | // Merge the values into a multiply. | ||||
2472 | const SCEV *Scale = getConstant(Ty, Count); | ||||
2473 | const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); | ||||
2474 | if (Ops.size() == Count) | ||||
2475 | return Mul; | ||||
2476 | Ops[i] = Mul; | ||||
2477 | Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); | ||||
2478 | --i; e -= Count - 1; | ||||
2479 | FoundMatch = true; | ||||
2480 | } | ||||
2481 | if (FoundMatch) | ||||
2482 | return getAddExpr(Ops, OrigFlags, Depth + 1); | ||||
2483 | |||||
2484 | // Check for truncates. If all the operands are truncated from the same | ||||
2485 | // type, see if factoring out the truncate would permit the result to be | ||||
2486 | // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) | ||||
2487 | // if the contents of the resulting outer trunc fold to something simple. | ||||
2488 | auto FindTruncSrcType = [&]() -> Type * { | ||||
2489 | // We're ultimately looking to fold an addrec of truncs and muls of only | ||||
2490 | // constants and truncs, so if we find any other types of SCEV | ||||
2491 | // as operands of the addrec then we bail and return nullptr here. | ||||
2492 | // Otherwise, we return the type of the operand of a trunc that we find. | ||||
2493 | if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) | ||||
2494 | return T->getOperand()->getType(); | ||||
2495 | if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { | ||||
2496 | const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); | ||||
2497 | if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) | ||||
2498 | return T->getOperand()->getType(); | ||||
2499 | } | ||||
2500 | return nullptr; | ||||
2501 | }; | ||||
2502 | if (auto *SrcType = FindTruncSrcType()) { | ||||
2503 | SmallVector<const SCEV *, 8> LargeOps; | ||||
2504 | bool Ok = true; | ||||
2505 | // Check all the operands to see if they can be represented in the | ||||
2506 | // source type of the truncate. | ||||
2507 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) { | ||||
2508 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { | ||||
2509 | if (T->getOperand()->getType() != SrcType) { | ||||
2510 | Ok = false; | ||||
2511 | break; | ||||
2512 | } | ||||
2513 | LargeOps.push_back(T->getOperand()); | ||||
2514 | } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { | ||||
2515 | LargeOps.push_back(getAnyExtendExpr(C, SrcType)); | ||||
2516 | } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { | ||||
2517 | SmallVector<const SCEV *, 8> LargeMulOps; | ||||
2518 | for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { | ||||
2519 | if (const SCEVTruncateExpr *T = | ||||
2520 | dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { | ||||
2521 | if (T->getOperand()->getType() != SrcType) { | ||||
2522 | Ok = false; | ||||
2523 | break; | ||||
2524 | } | ||||
2525 | LargeMulOps.push_back(T->getOperand()); | ||||
2526 | } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { | ||||
2527 | LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); | ||||
2528 | } else { | ||||
2529 | Ok = false; | ||||
2530 | break; | ||||
2531 | } | ||||
2532 | } | ||||
2533 | if (Ok) | ||||
2534 | LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); | ||||
2535 | } else { | ||||
2536 | Ok = false; | ||||
2537 | break; | ||||
2538 | } | ||||
2539 | } | ||||
2540 | if (Ok) { | ||||
2541 | // Evaluate the expression in the larger type. | ||||
2542 | const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); | ||||
2543 | // If it folds to something simple, use it. Otherwise, don't. | ||||
2544 | if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) | ||||
2545 | return getTruncateExpr(Fold, Ty); | ||||
2546 | } | ||||
2547 | } | ||||
2548 | |||||
2549 | if (Ops.size() == 2) { | ||||
2550 | // Check if we have an expression of the form ((X + C1) - C2), where C1 and | ||||
2551 | // C2 can be folded in a way that allows retaining wrapping flags of (X + | ||||
2552 | // C1). | ||||
2553 | const SCEV *A = Ops[0]; | ||||
2554 | const SCEV *B = Ops[1]; | ||||
2555 | auto *AddExpr = dyn_cast<SCEVAddExpr>(B); | ||||
2556 | auto *C = dyn_cast<SCEVConstant>(A); | ||||
2557 | if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) { | ||||
2558 | auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt(); | ||||
2559 | auto C2 = C->getAPInt(); | ||||
2560 | SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; | ||||
2561 | |||||
2562 | APInt ConstAdd = C1 + C2; | ||||
2563 | auto AddFlags = AddExpr->getNoWrapFlags(); | ||||
2564 | // Adding a smaller constant is NUW if the original AddExpr was NUW. | ||||
2565 | if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNUW) == | ||||
2566 | SCEV::FlagNUW && | ||||
2567 | ConstAdd.ule(C1)) { | ||||
2568 | PreservedFlags = | ||||
2569 | ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); | ||||
2570 | } | ||||
2571 | |||||
2572 | // Adding a constant with the same sign and small magnitude is NSW, if the | ||||
2573 | // original AddExpr was NSW. | ||||
2574 | if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNSW) == | ||||
2575 | SCEV::FlagNSW && | ||||
2576 | C1.isSignBitSet() == ConstAdd.isSignBitSet() && | ||||
2577 | ConstAdd.abs().ule(C1.abs())) { | ||||
2578 | PreservedFlags = | ||||
2579 | ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); | ||||
2580 | } | ||||
2581 | |||||
2582 | if (PreservedFlags != SCEV::FlagAnyWrap) { | ||||
2583 | SmallVector<const SCEV *, 4> NewOps(AddExpr->op_begin(), | ||||
2584 | AddExpr->op_end()); | ||||
2585 | NewOps[0] = getConstant(ConstAdd); | ||||
2586 | return getAddExpr(NewOps, PreservedFlags); | ||||
2587 | } | ||||
2588 | } | ||||
2589 | } | ||||
2590 | |||||
2591 | // Skip past any other cast SCEVs. | ||||
2592 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) | ||||
2593 | ++Idx; | ||||
2594 | |||||
2595 | // If there are add operands they would be next. | ||||
2596 | if (Idx < Ops.size()) { | ||||
2597 | bool DeletedAdd = false; | ||||
2598 | // If the original flags and all inlined SCEVAddExprs are NUW, use the | ||||
2599 | // common NUW flag for expression after inlining. Other flags cannot be | ||||
2600 | // preserved, because they may depend on the original order of operations. | ||||
2601 | SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); | ||||
2602 | while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { | ||||
2603 | if (Ops.size() > AddOpsInlineThreshold || | ||||
2604 | Add->getNumOperands() > AddOpsInlineThreshold) | ||||
2605 | break; | ||||
2606 | // If we have an add, expand the add operands onto the end of the operands | ||||
2607 | // list. | ||||
2608 | Ops.erase(Ops.begin()+Idx); | ||||
2609 | Ops.append(Add->op_begin(), Add->op_end()); | ||||
2610 | DeletedAdd = true; | ||||
2611 | CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); | ||||
2612 | } | ||||
2613 | |||||
2614 | // If we deleted at least one add, we added operands to the end of the list, | ||||
2615 | // and they are not necessarily sorted. Recurse to resort and resimplify | ||||
2616 | // any operands we just acquired. | ||||
2617 | if (DeletedAdd) | ||||
2618 | return getAddExpr(Ops, CommonFlags, Depth + 1); | ||||
2619 | } | ||||
2620 | |||||
2621 | // Skip over the add expression until we get to a multiply. | ||||
2622 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) | ||||
2623 | ++Idx; | ||||
2624 | |||||
2625 | // Check to see if there are any folding opportunities present with | ||||
2626 | // operands multiplied by constant values. | ||||
2627 | if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { | ||||
2628 | uint64_t BitWidth = getTypeSizeInBits(Ty); | ||||
2629 | DenseMap<const SCEV *, APInt> M; | ||||
2630 | SmallVector<const SCEV *, 8> NewOps; | ||||
2631 | APInt AccumulatedConstant(BitWidth, 0); | ||||
2632 | if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, | ||||
2633 | Ops.data(), Ops.size(), | ||||
2634 | APInt(BitWidth, 1), *this)) { | ||||
2635 | struct APIntCompare { | ||||
2636 | bool operator()(const APInt &LHS, const APInt &RHS) const { | ||||
2637 | return LHS.ult(RHS); | ||||
2638 | } | ||||
2639 | }; | ||||
2640 | |||||
2641 | // Some interesting folding opportunity is present, so its worthwhile to | ||||
2642 | // re-generate the operands list. Group the operands by constant scale, | ||||
2643 | // to avoid multiplying by the same constant scale multiple times. | ||||
2644 | std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; | ||||
2645 | for (const SCEV *NewOp : NewOps) | ||||
2646 | MulOpLists[M.find(NewOp)->second].push_back(NewOp); | ||||
2647 | // Re-generate the operands list. | ||||
2648 | Ops.clear(); | ||||
2649 | if (AccumulatedConstant != 0) | ||||
2650 | Ops.push_back(getConstant(AccumulatedConstant)); | ||||
2651 | for (auto &MulOp : MulOpLists) { | ||||
2652 | if (MulOp.first == 1) { | ||||
2653 | Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)); | ||||
2654 | } else if (MulOp.first != 0) { | ||||
2655 | Ops.push_back(getMulExpr( | ||||
2656 | getConstant(MulOp.first), | ||||
2657 | getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), | ||||
2658 | SCEV::FlagAnyWrap, Depth + 1)); | ||||
2659 | } | ||||
2660 | } | ||||
2661 | if (Ops.empty()) | ||||
2662 | return getZero(Ty); | ||||
2663 | if (Ops.size() == 1) | ||||
2664 | return Ops[0]; | ||||
2665 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | ||||
2666 | } | ||||
2667 | } | ||||
2668 | |||||
2669 | // If we are adding something to a multiply expression, make sure the | ||||
2670 | // something is not already an operand of the multiply. If so, merge it into | ||||
2671 | // the multiply. | ||||
2672 | for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { | ||||
2673 | const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); | ||||
2674 | for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { | ||||
2675 | const SCEV *MulOpSCEV = Mul->getOperand(MulOp); | ||||
2676 | if (isa<SCEVConstant>(MulOpSCEV)) | ||||
2677 | continue; | ||||
2678 | for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) | ||||
2679 | if (MulOpSCEV == Ops[AddOp]) { | ||||
2680 | // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) | ||||
2681 | const SCEV *InnerMul = Mul->getOperand(MulOp == 0); | ||||
2682 | if (Mul->getNumOperands() != 2) { | ||||
2683 | // If the multiply has more than two operands, we must get the | ||||
2684 | // Y*Z term. | ||||
2685 | SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), | ||||
2686 | Mul->op_begin()+MulOp); | ||||
2687 | MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); | ||||
2688 | InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); | ||||
2689 | } | ||||
2690 | SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; | ||||
2691 | const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); | ||||
2692 | const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, | ||||
2693 | SCEV::FlagAnyWrap, Depth + 1); | ||||
2694 | if (Ops.size() == 2) return OuterMul; | ||||
2695 | if (AddOp < Idx) { | ||||
2696 | Ops.erase(Ops.begin()+AddOp); | ||||
2697 | Ops.erase(Ops.begin()+Idx-1); | ||||
2698 | } else { | ||||
2699 | Ops.erase(Ops.begin()+Idx); | ||||
2700 | Ops.erase(Ops.begin()+AddOp-1); | ||||
2701 | } | ||||
2702 | Ops.push_back(OuterMul); | ||||
2703 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | ||||
2704 | } | ||||
2705 | |||||
2706 | // Check this multiply against other multiplies being added together. | ||||
2707 | for (unsigned OtherMulIdx = Idx+1; | ||||
2708 | OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); | ||||
2709 | ++OtherMulIdx) { | ||||
2710 | const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); | ||||
2711 | // If MulOp occurs in OtherMul, we can fold the two multiplies | ||||
2712 | // together. | ||||
2713 | for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); | ||||
2714 | OMulOp != e; ++OMulOp) | ||||
2715 | if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { | ||||
2716 | // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) | ||||
2717 | const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); | ||||
2718 | if (Mul->getNumOperands() != 2) { | ||||
2719 | SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), | ||||
2720 | Mul->op_begin()+MulOp); | ||||
2721 | MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); | ||||
2722 | InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); | ||||
2723 | } | ||||
2724 | const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); | ||||
2725 | if (OtherMul->getNumOperands() != 2) { | ||||
2726 | SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), | ||||
2727 | OtherMul->op_begin()+OMulOp); | ||||
2728 | MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); | ||||
2729 | InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); | ||||
2730 | } | ||||
2731 | SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; | ||||
2732 | const SCEV *InnerMulSum = | ||||
2733 | getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); | ||||
2734 | const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, | ||||
2735 | SCEV::FlagAnyWrap, Depth + 1); | ||||
2736 | if (Ops.size() == 2) return OuterMul; | ||||
2737 | Ops.erase(Ops.begin()+Idx); | ||||
2738 | Ops.erase(Ops.begin()+OtherMulIdx-1); | ||||
2739 | Ops.push_back(OuterMul); | ||||
2740 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | ||||
2741 | } | ||||
2742 | } | ||||
2743 | } | ||||
2744 | } | ||||
2745 | |||||
2746 | // If there are any add recurrences in the operands list, see if any other | ||||
2747 | // added values are loop invariant. If so, we can fold them into the | ||||
2748 | // recurrence. | ||||
2749 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) | ||||
2750 | ++Idx; | ||||
2751 | |||||
2752 | // Scan over all recurrences, trying to fold loop invariants into them. | ||||
2753 | for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { | ||||
2754 | // Scan all of the other operands to this add and add them to the vector if | ||||
2755 | // they are loop invariant w.r.t. the recurrence. | ||||
2756 | SmallVector<const SCEV *, 8> LIOps; | ||||
2757 | const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); | ||||
2758 | const Loop *AddRecLoop = AddRec->getLoop(); | ||||
2759 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | ||||
2760 | if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { | ||||
2761 | LIOps.push_back(Ops[i]); | ||||
2762 | Ops.erase(Ops.begin()+i); | ||||
2763 | --i; --e; | ||||
2764 | } | ||||
2765 | |||||
2766 | // If we found some loop invariants, fold them into the recurrence. | ||||
2767 | if (!LIOps.empty()) { | ||||
2768 | // Compute nowrap flags for the addition of the loop-invariant ops and | ||||
2769 | // the addrec. Temporarily push it as an operand for that purpose. | ||||
2770 | LIOps.push_back(AddRec); | ||||
2771 | SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); | ||||
2772 | LIOps.pop_back(); | ||||
2773 | |||||
2774 | // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} | ||||
2775 | LIOps.push_back(AddRec->getStart()); | ||||
2776 | |||||
2777 | SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); | ||||
2778 | // This follows from the fact that the no-wrap flags on the outer add | ||||
2779 | // expression are applicable on the 0th iteration, when the add recurrence | ||||
2780 | // will be equal to its start value. | ||||
2781 | AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); | ||||
2782 | |||||
2783 | // Build the new addrec. Propagate the NUW and NSW flags if both the | ||||
2784 | // outer add and the inner addrec are guaranteed to have no overflow. | ||||
2785 | // Always propagate NW. | ||||
2786 | Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); | ||||
2787 | const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); | ||||
2788 | |||||
2789 | // If all of the other operands were loop invariant, we are done. | ||||
2790 | if (Ops.size() == 1) return NewRec; | ||||
2791 | |||||
2792 | // Otherwise, add the folded AddRec by the non-invariant parts. | ||||
2793 | for (unsigned i = 0;; ++i) | ||||
2794 | if (Ops[i] == AddRec) { | ||||
2795 | Ops[i] = NewRec; | ||||
2796 | break; | ||||
2797 | } | ||||
2798 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | ||||
2799 | } | ||||
2800 | |||||
2801 | // Okay, if there weren't any loop invariants to be folded, check to see if | ||||
2802 | // there are multiple AddRec's with the same loop induction variable being | ||||
2803 | // added together. If so, we can fold them. | ||||
2804 | for (unsigned OtherIdx = Idx+1; | ||||
2805 | OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); | ||||
2806 | ++OtherIdx) { | ||||
2807 | // We expect the AddRecExpr's to be sorted in reverse dominance order, | ||||
2808 | // so that the 1st found AddRecExpr is dominated by all others. | ||||
2809 | assert(DT.dominates(((void)0) | ||||
2810 | cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),((void)0) | ||||
2811 | AddRec->getLoop()->getHeader()) &&((void)0) | ||||
2812 | "AddRecExprs are not sorted in reverse dominance order?")((void)0); | ||||
2813 | if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { | ||||
2814 | // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> | ||||
2815 | SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); | ||||
2816 | for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); | ||||
2817 | ++OtherIdx) { | ||||
2818 | const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); | ||||
2819 | if (OtherAddRec->getLoop() == AddRecLoop) { | ||||
2820 | for (unsigned i = 0, e = OtherAddRec->getNumOperands(); | ||||
2821 | i != e; ++i) { | ||||
2822 | if (i >= AddRecOps.size()) { | ||||
2823 | AddRecOps.append(OtherAddRec->op_begin()+i, | ||||
2824 | OtherAddRec->op_end()); | ||||
2825 | break; | ||||
2826 | } | ||||
2827 | SmallVector<const SCEV *, 2> TwoOps = { | ||||
2828 | AddRecOps[i], OtherAddRec->getOperand(i)}; | ||||
2829 | AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); | ||||
2830 | } | ||||
2831 | Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; | ||||
2832 | } | ||||
2833 | } | ||||
2834 | // Step size has changed, so we cannot guarantee no self-wraparound. | ||||
2835 | Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); | ||||
2836 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | ||||
2837 | } | ||||
2838 | } | ||||
2839 | |||||
2840 | // Otherwise couldn't fold anything into this recurrence. Move onto the | ||||
2841 | // next one. | ||||
2842 | } | ||||
2843 | |||||
2844 | // Okay, it looks like we really DO need an add expr. Check to see if we | ||||
2845 | // already have one, otherwise create a new one. | ||||
2846 | return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); | ||||
2847 | } | ||||
2848 | |||||
2849 | const SCEV * | ||||
2850 | ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, | ||||
2851 | SCEV::NoWrapFlags Flags) { | ||||
2852 | FoldingSetNodeID ID; | ||||
2853 | ID.AddInteger(scAddExpr); | ||||
2854 | for (const SCEV *Op : Ops) | ||||
2855 | ID.AddPointer(Op); | ||||
2856 | void *IP = nullptr; | ||||
2857 | SCEVAddExpr *S = | ||||
2858 | static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); | ||||
2859 | if (!S) { | ||||
2860 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); | ||||
2861 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); | ||||
2862 | S = new (SCEVAllocator) | ||||
2863 | SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); | ||||
2864 | UniqueSCEVs.InsertNode(S, IP); | ||||
2865 | addToLoopUseLists(S); | ||||
2866 | } | ||||
2867 | S->setNoWrapFlags(Flags); | ||||
2868 | return S; | ||||
2869 | } | ||||
2870 | |||||
2871 | const SCEV * | ||||
2872 | ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, | ||||
2873 | const Loop *L, SCEV::NoWrapFlags Flags) { | ||||
2874 | FoldingSetNodeID ID; | ||||
2875 | ID.AddInteger(scAddRecExpr); | ||||
2876 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | ||||
2877 | ID.AddPointer(Ops[i]); | ||||
2878 | ID.AddPointer(L); | ||||
2879 | void *IP = nullptr; | ||||
2880 | SCEVAddRecExpr *S = | ||||
2881 | static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); | ||||
2882 | if (!S) { | ||||
2883 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); | ||||
2884 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); | ||||
2885 | S = new (SCEVAllocator) | ||||
2886 | SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); | ||||
2887 | UniqueSCEVs.InsertNode(S, IP); | ||||
2888 | addToLoopUseLists(S); | ||||
2889 | } | ||||
2890 | setNoWrapFlags(S, Flags); | ||||
2891 | return S; | ||||
2892 | } | ||||
2893 | |||||
2894 | const SCEV * | ||||
2895 | ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, | ||||
2896 | SCEV::NoWrapFlags Flags) { | ||||
2897 | FoldingSetNodeID ID; | ||||
2898 | ID.AddInteger(scMulExpr); | ||||
2899 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | ||||
2900 | ID.AddPointer(Ops[i]); | ||||
2901 | void *IP = nullptr; | ||||
2902 | SCEVMulExpr *S = | ||||
2903 | static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); | ||||
2904 | if (!S) { | ||||
2905 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); | ||||
2906 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); | ||||
2907 | S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), | ||||
2908 | O, Ops.size()); | ||||
2909 | UniqueSCEVs.InsertNode(S, IP); | ||||
2910 | addToLoopUseLists(S); | ||||
2911 | } | ||||
2912 | S->setNoWrapFlags(Flags); | ||||
2913 | return S; | ||||
2914 | } | ||||
2915 | |||||
2916 | static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { | ||||
2917 | uint64_t k = i*j; | ||||
2918 | if (j > 1 && k / j != i) Overflow = true; | ||||
2919 | return k; | ||||
2920 | } | ||||
2921 | |||||
2922 | /// Compute the result of "n choose k", the binomial coefficient. If an | ||||
2923 | /// intermediate computation overflows, Overflow will be set and the return will | ||||
2924 | /// be garbage. Overflow is not cleared on absence of overflow. | ||||
2925 | static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { | ||||
2926 | // We use the multiplicative formula: | ||||
2927 | // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . | ||||
2928 | // At each iteration, we take the n-th term of the numeral and divide by the | ||||
2929 | // (k-n)th term of the denominator. This division will always produce an | ||||
2930 | // integral result, and helps reduce the chance of overflow in the | ||||
2931 | // intermediate computations. However, we can still overflow even when the | ||||
2932 | // final result would fit. | ||||
2933 | |||||
2934 | if (n == 0 || n == k) return 1; | ||||
2935 | if (k > n) return 0; | ||||
2936 | |||||
2937 | if (k > n/2) | ||||
2938 | k = n-k; | ||||
2939 | |||||
2940 | uint64_t r = 1; | ||||
2941 | for (uint64_t i = 1; i <= k; ++i) { | ||||
2942 | r = umul_ov(r, n-(i-1), Overflow); | ||||
2943 | r /= i; | ||||
2944 | } | ||||
2945 | return r; | ||||
2946 | } | ||||
2947 | |||||
2948 | /// Determine if any of the operands in this SCEV are a constant or if | ||||
2949 | /// any of the add or multiply expressions in this SCEV contain a constant. | ||||
2950 | static bool containsConstantInAddMulChain(const SCEV *StartExpr) { | ||||
2951 | struct FindConstantInAddMulChain { | ||||
2952 | bool FoundConstant = false; | ||||
2953 | |||||
2954 | bool follow(const SCEV *S) { | ||||
2955 | FoundConstant |= isa<SCEVConstant>(S); | ||||
2956 | return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); | ||||
2957 | } | ||||
2958 | |||||
2959 | bool isDone() const { | ||||
2960 | return FoundConstant; | ||||
2961 | } | ||||
2962 | }; | ||||
2963 | |||||
2964 | FindConstantInAddMulChain F; | ||||
2965 | SCEVTraversal<FindConstantInAddMulChain> ST(F); | ||||
2966 | ST.visitAll(StartExpr); | ||||
2967 | return F.FoundConstant; | ||||
2968 | } | ||||
2969 | |||||
2970 | /// Get a canonical multiply expression, or something simpler if possible. | ||||
2971 | const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, | ||||
2972 | SCEV::NoWrapFlags OrigFlags, | ||||
2973 | unsigned Depth) { | ||||
2974 | assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&((void)0) | ||||
2975 | "only nuw or nsw allowed")((void)0); | ||||
2976 | assert(!Ops.empty() && "Cannot get empty mul!")((void)0); | ||||
2977 | if (Ops.size() == 1) return Ops[0]; | ||||
2978 | #ifndef NDEBUG1 | ||||
2979 | Type *ETy = Ops[0]->getType(); | ||||
2980 | assert(!ETy->isPointerTy())((void)0); | ||||
2981 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) | ||||
2982 | assert(Ops[i]->getType() == ETy &&((void)0) | ||||
2983 | "SCEVMulExpr operand types don't match!")((void)0); | ||||
2984 | #endif | ||||
2985 | |||||
2986 | // Sort by complexity, this groups all similar expression types together. | ||||
2987 | GroupByComplexity(Ops, &LI, DT); | ||||
2988 | |||||
2989 | // If there are any constants, fold them together. | ||||
2990 | unsigned Idx = 0; | ||||
2991 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { | ||||
2992 | ++Idx; | ||||
2993 | assert(Idx < Ops.size())((void)0); | ||||
2994 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { | ||||
2995 | // We found two constants, fold them together! | ||||
2996 | Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); | ||||
2997 | if (Ops.size() == 2) return Ops[0]; | ||||
2998 | Ops.erase(Ops.begin()+1); // Erase the folded element | ||||
2999 | LHSC = cast<SCEVConstant>(Ops[0]); | ||||
3000 | } | ||||
3001 | |||||
3002 | // If we have a multiply of zero, it will always be zero. | ||||
3003 | if (LHSC->getValue()->isZero()) | ||||
3004 | return LHSC; | ||||
3005 | |||||
3006 | // If we are left with a constant one being multiplied, strip it off. | ||||
3007 | if (LHSC->getValue()->isOne()) { | ||||
3008 | Ops.erase(Ops.begin()); | ||||
3009 | --Idx; | ||||
3010 | } | ||||
3011 | |||||
3012 | if (Ops.size() == 1) | ||||
3013 | return Ops[0]; | ||||
3014 | } | ||||
3015 | |||||
3016 | // Delay expensive flag strengthening until necessary. | ||||
3017 | auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { | ||||
3018 | return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); | ||||
3019 | }; | ||||
3020 | |||||
3021 | // Limit recursion calls depth. | ||||
3022 | if (Depth > MaxArithDepth || hasHugeExpression(Ops)) | ||||
3023 | return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); | ||||
3024 | |||||
3025 | if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { | ||||
3026 | // Don't strengthen flags if we have no new information. | ||||
3027 | SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); | ||||
3028 | if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) | ||||
3029 | Mul->setNoWrapFlags(ComputeFlags(Ops)); | ||||
3030 | return S; | ||||
3031 | } | ||||
3032 | |||||
3033 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { | ||||
3034 | if (Ops.size() == 2) { | ||||
3035 | // C1*(C2+V) -> C1*C2 + C1*V | ||||
3036 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) | ||||
3037 | // If any of Add's ops are Adds or Muls with a constant, apply this | ||||
3038 | // transformation as well. | ||||
3039 | // | ||||
3040 | // TODO: There are some cases where this transformation is not | ||||
3041 | // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of | ||||
3042 | // this transformation should be narrowed down. | ||||
3043 | if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) | ||||
3044 | return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), | ||||
3045 | SCEV::FlagAnyWrap, Depth + 1), | ||||
3046 | getMulExpr(LHSC, Add->getOperand(1), | ||||
3047 | SCEV::FlagAnyWrap, Depth + 1), | ||||
3048 | SCEV::FlagAnyWrap, Depth + 1); | ||||
3049 | |||||
3050 | if (Ops[0]->isAllOnesValue()) { | ||||
3051 | // If we have a mul by -1 of an add, try distributing the -1 among the | ||||
3052 | // add operands. | ||||
3053 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { | ||||
3054 | SmallVector<const SCEV *, 4> NewOps; | ||||
3055 | bool AnyFolded = false; | ||||
3056 | for (const SCEV *AddOp : Add->operands()) { | ||||
3057 | const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, | ||||
3058 | Depth + 1); | ||||
3059 | if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; | ||||
3060 | NewOps.push_back(Mul); | ||||
3061 | } | ||||
3062 | if (AnyFolded) | ||||
3063 | return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); | ||||
3064 | } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { | ||||
3065 | // Negation preserves a recurrence's no self-wrap property. | ||||
3066 | SmallVector<const SCEV *, 4> Operands; | ||||
3067 | for (const SCEV *AddRecOp : AddRec->operands()) | ||||
3068 | Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, | ||||
3069 | Depth + 1)); | ||||
3070 | |||||
3071 | return getAddRecExpr(Operands, AddRec->getLoop(), | ||||
3072 | AddRec->getNoWrapFlags(SCEV::FlagNW)); | ||||
3073 | } | ||||
3074 | } | ||||
3075 | } | ||||
3076 | } | ||||
3077 | |||||
3078 | // Skip over the add expression until we get to a multiply. | ||||
3079 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) | ||||
3080 | ++Idx; | ||||
3081 | |||||
3082 | // If there are mul operands inline them all into this expression. | ||||
3083 | if (Idx < Ops.size()) { | ||||
3084 | bool DeletedMul = false; | ||||
3085 | while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { | ||||
3086 | if (Ops.size() > MulOpsInlineThreshold) | ||||
3087 | break; | ||||
3088 | // If we have an mul, expand the mul operands onto the end of the | ||||
3089 | // operands list. | ||||
3090 | Ops.erase(Ops.begin()+Idx); | ||||
3091 | Ops.append(Mul->op_begin(), Mul->op_end()); | ||||
3092 | DeletedMul = true; | ||||
3093 | } | ||||
3094 | |||||
3095 | // If we deleted at least one mul, we added operands to the end of the | ||||
3096 | // list, and they are not necessarily sorted. Recurse to resort and | ||||
3097 | // resimplify any operands we just acquired. | ||||
3098 | if (DeletedMul) | ||||
3099 | return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | ||||
3100 | } | ||||
3101 | |||||
3102 | // If there are any add recurrences in the operands list, see if any other | ||||
3103 | // added values are loop invariant. If so, we can fold them into the | ||||
3104 | // recurrence. | ||||
3105 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) | ||||
3106 | ++Idx; | ||||
3107 | |||||
3108 | // Scan over all recurrences, trying to fold loop invariants into them. | ||||
3109 | for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { | ||||
3110 | // Scan all of the other operands to this mul and add them to the vector | ||||
3111 | // if they are loop invariant w.r.t. the recurrence. | ||||
3112 | SmallVector<const SCEV *, 8> LIOps; | ||||
3113 | const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); | ||||
3114 | const Loop *AddRecLoop = AddRec->getLoop(); | ||||
3115 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | ||||
3116 | if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { | ||||
3117 | LIOps.push_back(Ops[i]); | ||||
3118 | Ops.erase(Ops.begin()+i); | ||||
3119 | --i; --e; | ||||
3120 | } | ||||
3121 | |||||
3122 | // If we found some loop invariants, fold them into the recurrence. | ||||
3123 | if (!LIOps.empty()) { | ||||
3124 | // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} | ||||
3125 | SmallVector<const SCEV *, 4> NewOps; | ||||
3126 | NewOps.reserve(AddRec->getNumOperands()); | ||||
3127 | const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); | ||||
3128 | for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) | ||||
3129 | NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), | ||||
3130 | SCEV::FlagAnyWrap, Depth + 1)); | ||||
3131 | |||||
3132 | // Build the new addrec. Propagate the NUW and NSW flags if both the | ||||
3133 | // outer mul and the inner addrec are guaranteed to have no overflow. | ||||
3134 | // | ||||
3135 | // No self-wrap cannot be guaranteed after changing the step size, but | ||||
3136 | // will be inferred if either NUW or NSW is true. | ||||
3137 | SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); | ||||
3138 | const SCEV *NewRec = getAddRecExpr( | ||||
3139 | NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); | ||||
3140 | |||||
3141 | // If all of the other operands were loop invariant, we are done. | ||||
3142 | if (Ops.size() == 1) return NewRec; | ||||
3143 | |||||
3144 | // Otherwise, multiply the folded AddRec by the non-invariant parts. | ||||
3145 | for (unsigned i = 0;; ++i) | ||||
3146 | if (Ops[i] == AddRec) { | ||||
3147 | Ops[i] = NewRec; | ||||
3148 | break; | ||||
3149 | } | ||||
3150 | return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | ||||
3151 | } | ||||
3152 | |||||
3153 | // Okay, if there weren't any loop invariants to be folded, check to see | ||||
3154 | // if there are multiple AddRec's with the same loop induction variable | ||||
3155 | // being multiplied together. If so, we can fold them. | ||||
3156 | |||||
3157 | // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> | ||||
3158 | // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ | ||||
3159 | // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z | ||||
3160 | // ]]],+,...up to x=2n}. | ||||
3161 | // Note that the arguments to choose() are always integers with values | ||||
3162 | // known at compile time, never SCEV objects. | ||||
3163 | // | ||||
3164 | // The implementation avoids pointless extra computations when the two | ||||
3165 | // addrec's are of different length (mathematically, it's equivalent to | ||||
3166 | // an infinite stream of zeros on the right). | ||||
3167 | bool OpsModified = false; | ||||
3168 | for (unsigned OtherIdx = Idx+1; | ||||
3169 | OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); | ||||
3170 | ++OtherIdx) { | ||||
3171 | const SCEVAddRecExpr *OtherAddRec = | ||||
3172 | dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); | ||||
3173 | if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) | ||||
3174 | continue; | ||||
3175 | |||||
3176 | // Limit max number of arguments to avoid creation of unreasonably big | ||||
3177 | // SCEVAddRecs with very complex operands. | ||||
3178 | if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > | ||||
3179 | MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) | ||||
3180 | continue; | ||||
3181 | |||||
3182 | bool Overflow = false; | ||||
3183 | Type *Ty = AddRec->getType(); | ||||
3184 | bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; | ||||
3185 | SmallVector<const SCEV*, 7> AddRecOps; | ||||
3186 | for (int x = 0, xe = AddRec->getNumOperands() + | ||||
3187 | OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { | ||||
3188 | SmallVector <const SCEV *, 7> SumOps; | ||||
3189 | for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { | ||||
3190 | uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); | ||||
3191 | for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), | ||||
3192 | ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); | ||||
3193 | z < ze && !Overflow; ++z) { | ||||
3194 | uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); | ||||
3195 | uint64_t Coeff; | ||||
3196 | if (LargerThan64Bits) | ||||
3197 | Coeff = umul_ov(Coeff1, Coeff2, Overflow); | ||||
3198 | else | ||||
3199 | Coeff = Coeff1*Coeff2; | ||||
3200 | const SCEV *CoeffTerm = getConstant(Ty, Coeff); | ||||
3201 | const SCEV *Term1 = AddRec->getOperand(y-z); | ||||
3202 | const SCEV *Term2 = OtherAddRec->getOperand(z); | ||||
3203 | SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, | ||||
3204 | SCEV::FlagAnyWrap, Depth + 1)); | ||||
3205 | } | ||||
3206 | } | ||||
3207 | if (SumOps.empty()) | ||||
3208 | SumOps.push_back(getZero(Ty)); | ||||
3209 | AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); | ||||
3210 | } | ||||
3211 | if (!Overflow) { | ||||
3212 | const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, | ||||
3213 | SCEV::FlagAnyWrap); | ||||
3214 | if (Ops.size() == 2) return NewAddRec; | ||||
3215 | Ops[Idx] = NewAddRec; | ||||
3216 | Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; | ||||
3217 | OpsModified = true; | ||||
3218 | AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); | ||||
3219 | if (!AddRec) | ||||
3220 | break; | ||||
3221 | } | ||||
3222 | } | ||||
3223 | if (OpsModified) | ||||
3224 | return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | ||||
3225 | |||||
3226 | // Otherwise couldn't fold anything into this recurrence. Move onto the | ||||
3227 | // next one. | ||||
3228 | } | ||||
3229 | |||||
3230 | // Okay, it looks like we really DO need an mul expr. Check to see if we | ||||
3231 | // already have one, otherwise create a new one. | ||||
3232 | return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); | ||||
3233 | } | ||||
3234 | |||||
3235 | /// Represents an unsigned remainder expression based on unsigned division. | ||||
3236 | const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, | ||||
3237 | const SCEV *RHS) { | ||||
3238 | assert(getEffectiveSCEVType(LHS->getType()) ==((void)0) | ||||
3239 | getEffectiveSCEVType(RHS->getType()) &&((void)0) | ||||
3240 | "SCEVURemExpr operand types don't match!")((void)0); | ||||
3241 | |||||
3242 | // Short-circuit easy cases | ||||
3243 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { | ||||
3244 | // If constant is one, the result is trivial | ||||
3245 | if (RHSC->getValue()->isOne()) | ||||
3246 | return getZero(LHS->getType()); // X urem 1 --> 0 | ||||
3247 | |||||
3248 | // If constant is a power of two, fold into a zext(trunc(LHS)). | ||||
3249 | if (RHSC->getAPInt().isPowerOf2()) { | ||||
3250 | Type *FullTy = LHS->getType(); | ||||
3251 | Type *TruncTy = | ||||
3252 | IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); | ||||
3253 | return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); | ||||
3254 | } | ||||
3255 | } | ||||
3256 | |||||
3257 | // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) | ||||
3258 | const SCEV *UDiv = getUDivExpr(LHS, RHS); | ||||
3259 | const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); | ||||
3260 | return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); | ||||
3261 | } | ||||
3262 | |||||
3263 | /// Get a canonical unsigned division expression, or something simpler if | ||||
3264 | /// possible. | ||||
3265 | const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, | ||||
3266 | const SCEV *RHS) { | ||||
3267 | assert(!LHS->getType()->isPointerTy() &&((void)0) | ||||
3268 | "SCEVUDivExpr operand can't be pointer!")((void)0); | ||||
3269 | assert(LHS->getType() == RHS->getType() &&((void)0) | ||||
3270 | "SCEVUDivExpr operand types don't match!")((void)0); | ||||
3271 | |||||
3272 | FoldingSetNodeID ID; | ||||
3273 | ID.AddInteger(scUDivExpr); | ||||
3274 | ID.AddPointer(LHS); | ||||
3275 | ID.AddPointer(RHS); | ||||
3276 | void *IP = nullptr; | ||||
3277 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) | ||||
3278 | return S; | ||||
3279 | |||||
3280 | // 0 udiv Y == 0 | ||||
3281 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) | ||||
3282 | if (LHSC->getValue()->isZero()) | ||||
3283 | return LHS; | ||||
3284 | |||||
3285 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { | ||||
3286 | if (RHSC->getValue()->isOne()) | ||||
3287 | return LHS; // X udiv 1 --> x | ||||
3288 | // If the denominator is zero, the result of the udiv is undefined. Don't | ||||
3289 | // try to analyze it, because the resolution chosen here may differ from | ||||
3290 | // the resolution chosen in other parts of the compiler. | ||||
3291 | if (!RHSC->getValue()->isZero()) { | ||||
3292 | // Determine if the division can be folded into the operands of | ||||
3293 | // its operands. | ||||
3294 | // TODO: Generalize this to non-constants by using known-bits information. | ||||
3295 | Type *Ty = LHS->getType(); | ||||
3296 | unsigned LZ = RHSC->getAPInt().countLeadingZeros(); | ||||
3297 | unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; | ||||
3298 | // For non-power-of-two values, effectively round the value up to the | ||||
3299 | // nearest power of two. | ||||
3300 | if (!RHSC->getAPInt().isPowerOf2()) | ||||
3301 | ++MaxShiftAmt; | ||||
3302 | IntegerType *ExtTy = | ||||
3303 | IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); | ||||
3304 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) | ||||
3305 | if (const SCEVConstant *Step = | ||||
3306 | dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { | ||||
3307 | // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. | ||||
3308 | const APInt &StepInt = Step->getAPInt(); | ||||
3309 | const APInt &DivInt = RHSC->getAPInt(); | ||||
3310 | if (!StepInt.urem(DivInt) && | ||||
3311 | getZeroExtendExpr(AR, ExtTy) == | ||||
3312 | getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), | ||||
3313 | getZeroExtendExpr(Step, ExtTy), | ||||
3314 | AR->getLoop(), SCEV::FlagAnyWrap)) { | ||||
3315 | SmallVector<const SCEV *, 4> Operands; | ||||
3316 | for (const SCEV *Op : AR->operands()) | ||||
3317 | Operands.push_back(getUDivExpr(Op, RHS)); | ||||
3318 | return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); | ||||
3319 | } | ||||
3320 | /// Get a canonical UDivExpr for a recurrence. | ||||
3321 | /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. | ||||
3322 | // We can currently only fold X%N if X is constant. | ||||
3323 | const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); | ||||
3324 | if (StartC && !DivInt.urem(StepInt) && | ||||
3325 | getZeroExtendExpr(AR, ExtTy) == | ||||
3326 | getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), | ||||
3327 | getZeroExtendExpr(Step, ExtTy), | ||||
3328 | AR->getLoop(), SCEV::FlagAnyWrap)) { | ||||
3329 | const APInt &StartInt = StartC->getAPInt(); | ||||
3330 | const APInt &StartRem = StartInt.urem(StepInt); | ||||
3331 | if (StartRem != 0) { | ||||
3332 | const SCEV *NewLHS = | ||||
3333 | getAddRecExpr(getConstant(StartInt - StartRem), Step, | ||||
3334 | AR->getLoop(), SCEV::FlagNW); | ||||
3335 | if (LHS != NewLHS) { | ||||
3336 | LHS = NewLHS; | ||||
3337 | |||||
3338 | // Reset the ID to include the new LHS, and check if it is | ||||
3339 | // already cached. | ||||
3340 | ID.clear(); | ||||
3341 | ID.AddInteger(scUDivExpr); | ||||
3342 | ID.AddPointer(LHS); | ||||
3343 | ID.AddPointer(RHS); | ||||
3344 | IP = nullptr; | ||||
3345 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) | ||||
3346 | return S; | ||||
3347 | } | ||||
3348 | } | ||||
3349 | } | ||||
3350 | } | ||||
3351 | // (A*B)/C --> A*(B/C) if safe and B/C can be folded. | ||||
3352 | if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { | ||||
3353 | SmallVector<const SCEV *, 4> Operands; | ||||
3354 | for (const SCEV *Op : M->operands()) | ||||
3355 | Operands.push_back(getZeroExtendExpr(Op, ExtTy)); | ||||
3356 | if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) | ||||
3357 | // Find an operand that's safely divisible. | ||||
3358 | for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { | ||||
3359 | const SCEV *Op = M->getOperand(i); | ||||
3360 | const SCEV *Div = getUDivExpr(Op, RHSC); | ||||
3361 | if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { | ||||
3362 | Operands = SmallVector<const SCEV *, 4>(M->operands()); | ||||
3363 | Operands[i] = Div; | ||||
3364 | return getMulExpr(Operands); | ||||
3365 | } | ||||
3366 | } | ||||
3367 | } | ||||
3368 | |||||
3369 | // (A/B)/C --> A/(B*C) if safe and B*C can be folded. | ||||
3370 | if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { | ||||
3371 | if (auto *DivisorConstant = | ||||
3372 | dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { | ||||
3373 | bool Overflow = false; | ||||
3374 | APInt NewRHS = | ||||
3375 | DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); | ||||
3376 | if (Overflow) { | ||||
3377 | return getConstant(RHSC->getType(), 0, false); | ||||
3378 | } | ||||
3379 | return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); | ||||
3380 | } | ||||
3381 | } | ||||
3382 | |||||
3383 | // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. | ||||
3384 | if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { | ||||
3385 | SmallVector<const SCEV *, 4> Operands; | ||||
3386 | for (const SCEV *Op : A->operands()) | ||||
3387 | Operands.push_back(getZeroExtendExpr(Op, ExtTy)); | ||||
3388 | if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { | ||||
3389 | Operands.clear(); | ||||
3390 | for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { | ||||
3391 | const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); | ||||
3392 | if (isa<SCEVUDivExpr>(Op) || | ||||
3393 | getMulExpr(Op, RHS) != A->getOperand(i)) | ||||
3394 | break; | ||||
3395 | Operands.push_back(Op); | ||||
3396 | } | ||||
3397 | if (Operands.size() == A->getNumOperands()) | ||||
3398 | return getAddExpr(Operands); | ||||
3399 | } | ||||
3400 | } | ||||
3401 | |||||
3402 | // Fold if both operands are constant. | ||||
3403 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { | ||||
3404 | Constant *LHSCV = LHSC->getValue(); | ||||
3405 | Constant *RHSCV = RHSC->getValue(); | ||||
3406 | return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, | ||||
3407 | RHSCV))); | ||||
3408 | } | ||||
3409 | } | ||||
3410 | } | ||||
3411 | |||||
3412 | // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs | ||||
3413 | // changes). Make sure we get a new one. | ||||
3414 | IP = nullptr; | ||||
3415 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | ||||
3416 | SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), | ||||
3417 | LHS, RHS); | ||||
3418 | UniqueSCEVs.InsertNode(S, IP); | ||||
3419 | addToLoopUseLists(S); | ||||
3420 | return S; | ||||
3421 | } | ||||
3422 | |||||
3423 | static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { | ||||
3424 | APInt A = C1->getAPInt().abs(); | ||||
3425 | APInt B = C2->getAPInt().abs(); | ||||
3426 | uint32_t ABW = A.getBitWidth(); | ||||
3427 | uint32_t BBW = B.getBitWidth(); | ||||
3428 | |||||
3429 | if (ABW > BBW) | ||||
3430 | B = B.zext(ABW); | ||||
3431 | else if (ABW < BBW) | ||||
3432 | A = A.zext(BBW); | ||||
3433 | |||||
3434 | return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); | ||||
3435 | } | ||||
3436 | |||||
3437 | /// Get a canonical unsigned division expression, or something simpler if | ||||
3438 | /// possible. There is no representation for an exact udiv in SCEV IR, but we | ||||
3439 | /// can attempt to remove factors from the LHS and RHS. We can't do this when | ||||
3440 | /// it's not exact because the udiv may be clearing bits. | ||||
3441 | const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, | ||||
3442 | const SCEV *RHS) { | ||||
3443 | // TODO: we could try to find factors in all sorts of things, but for now we | ||||
3444 | // just deal with u/exact (multiply, constant). See SCEVDivision towards the | ||||
3445 | // end of this file for inspiration. | ||||
3446 | |||||
3447 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); | ||||
3448 | if (!Mul || !Mul->hasNoUnsignedWrap()) | ||||
3449 | return getUDivExpr(LHS, RHS); | ||||
3450 | |||||
3451 | if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { | ||||
3452 | // If the mulexpr multiplies by a constant, then that constant must be the | ||||
3453 | // first element of the mulexpr. | ||||
3454 | if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { | ||||
3455 | if (LHSCst == RHSCst) { | ||||
3456 | SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); | ||||
3457 | return getMulExpr(Operands); | ||||
3458 | } | ||||
3459 | |||||
3460 | // We can't just assume that LHSCst divides RHSCst cleanly, it could be | ||||
3461 | // that there's a factor provided by one of the other terms. We need to | ||||
3462 | // check. | ||||
3463 | APInt Factor = gcd(LHSCst, RHSCst); | ||||
3464 | if (!Factor.isIntN(1)) { | ||||
3465 | LHSCst = | ||||
3466 | cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); | ||||
3467 | RHSCst = | ||||
3468 | cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); | ||||
3469 | SmallVector<const SCEV *, 2> Operands; | ||||
3470 | Operands.push_back(LHSCst); | ||||
3471 | Operands.append(Mul->op_begin() + 1, Mul->op_end()); | ||||
3472 | LHS = getMulExpr(Operands); | ||||
3473 | RHS = RHSCst; | ||||
3474 | Mul = dyn_cast<SCEVMulExpr>(LHS); | ||||
3475 | if (!Mul) | ||||
3476 | return getUDivExactExpr(LHS, RHS); | ||||
3477 | } | ||||
3478 | } | ||||
3479 | } | ||||
3480 | |||||
3481 | for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { | ||||
3482 | if (Mul->getOperand(i) == RHS) { | ||||
3483 | SmallVector<const SCEV *, 2> Operands; | ||||
3484 | Operands.append(Mul->op_begin(), Mul->op_begin() + i); | ||||
3485 | Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); | ||||
3486 | return getMulExpr(Operands); | ||||
3487 | } | ||||
3488 | } | ||||
3489 | |||||
3490 | return getUDivExpr(LHS, RHS); | ||||
3491 | } | ||||
3492 | |||||
3493 | /// Get an add recurrence expression for the specified loop. Simplify the | ||||
3494 | /// expression as much as possible. | ||||
3495 | const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, | ||||
3496 | const Loop *L, | ||||
3497 | SCEV::NoWrapFlags Flags) { | ||||
3498 | SmallVector<const SCEV *, 4> Operands; | ||||
3499 | Operands.push_back(Start); | ||||
3500 | if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) | ||||
3501 | if (StepChrec->getLoop() == L) { | ||||
3502 | Operands.append(StepChrec->op_begin(), StepChrec->op_end()); | ||||
3503 | return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); | ||||
3504 | } | ||||
3505 | |||||
3506 | Operands.push_back(Step); | ||||
3507 | return getAddRecExpr(Operands, L, Flags); | ||||
3508 | } | ||||
3509 | |||||
3510 | /// Get an add recurrence expression for the specified loop. Simplify the | ||||
3511 | /// expression as much as possible. | ||||
3512 | const SCEV * | ||||
3513 | ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, | ||||
3514 | const Loop *L, SCEV::NoWrapFlags Flags) { | ||||
3515 | if (Operands.size() == 1) return Operands[0]; | ||||
3516 | #ifndef NDEBUG1 | ||||
3517 | Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); | ||||
3518 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) { | ||||
3519 | assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&((void)0) | ||||
3520 | "SCEVAddRecExpr operand types don't match!")((void)0); | ||||
3521 | assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer")((void)0); | ||||
3522 | } | ||||
3523 | for (unsigned i = 0, e = Operands.size(); i != e; ++i) | ||||
3524 | assert(isLoopInvariant(Operands[i], L) &&((void)0) | ||||
3525 | "SCEVAddRecExpr operand is not loop-invariant!")((void)0); | ||||
3526 | #endif | ||||
3527 | |||||
3528 | if (Operands.back()->isZero()) { | ||||
3529 | Operands.pop_back(); | ||||
3530 | return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X | ||||
3531 | } | ||||
3532 | |||||
3533 | // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and | ||||
3534 | // use that information to infer NUW and NSW flags. However, computing a | ||||
3535 | // BE count requires calling getAddRecExpr, so we may not yet have a | ||||
3536 | // meaningful BE count at this point (and if we don't, we'd be stuck | ||||
3537 | // with a SCEVCouldNotCompute as the cached BE count). | ||||
3538 | |||||
3539 | Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); | ||||
3540 | |||||
3541 | // Canonicalize nested AddRecs in by nesting them in order of loop depth. | ||||
3542 | if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { | ||||
3543 | const Loop *NestedLoop = NestedAR->getLoop(); | ||||
3544 | if (L->contains(NestedLoop) | ||||
3545 | ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) | ||||
3546 | : (!NestedLoop->contains(L) && | ||||
3547 | DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { | ||||
3548 | SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); | ||||
3549 | Operands[0] = NestedAR->getStart(); | ||||
3550 | // AddRecs require their operands be loop-invariant with respect to their | ||||
3551 | // loops. Don't perform this transformation if it would break this | ||||
3552 | // requirement. | ||||
3553 | bool AllInvariant = all_of( | ||||
3554 | Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); | ||||
3555 | |||||
3556 | if (AllInvariant) { | ||||
3557 | // Create a recurrence for the outer loop with the same step size. | ||||
3558 | // | ||||
3559 | // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the | ||||
3560 | // inner recurrence has the same property. | ||||
3561 | SCEV::NoWrapFlags OuterFlags = | ||||
3562 | maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); | ||||
3563 | |||||
3564 | NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); | ||||
3565 | AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { | ||||
3566 | return isLoopInvariant(Op, NestedLoop); | ||||
3567 | }); | ||||
3568 | |||||
3569 | if (AllInvariant) { | ||||
3570 | // Ok, both add recurrences are valid after the transformation. | ||||
3571 | // | ||||
3572 | // The inner recurrence keeps its NW flag but only keeps NUW/NSW if | ||||
3573 | // the outer recurrence has the same property. | ||||
3574 | SCEV::NoWrapFlags InnerFlags = | ||||
3575 | maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); | ||||
3576 | return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); | ||||
3577 | } | ||||
3578 | } | ||||
3579 | // Reset Operands to its original state. | ||||
3580 | Operands[0] = NestedAR; | ||||
3581 | } | ||||
3582 | } | ||||
3583 | |||||
3584 | // Okay, it looks like we really DO need an addrec expr. Check to see if we | ||||
3585 | // already have one, otherwise create a new one. | ||||
3586 | return getOrCreateAddRecExpr(Operands, L, Flags); | ||||
3587 | } | ||||
3588 | |||||
3589 | const SCEV * | ||||
3590 | ScalarEvolution::getGEPExpr(GEPOperator *GEP, | ||||
3591 | const SmallVectorImpl<const SCEV *> &IndexExprs) { | ||||
3592 | const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); | ||||
3593 | // getSCEV(Base)->getType() has the same address space as Base->getType() | ||||
3594 | // because SCEV::getType() preserves the address space. | ||||
3595 | Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); | ||||
3596 | // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP | ||||
3597 | // instruction to its SCEV, because the Instruction may be guarded by control | ||||
3598 | // flow and the no-overflow bits may not be valid for the expression in any | ||||
3599 | // context. This can be fixed similarly to how these flags are handled for | ||||
3600 | // adds. | ||||
3601 | SCEV::NoWrapFlags OffsetWrap = | ||||
3602 | GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; | ||||
3603 | |||||
3604 | Type *CurTy = GEP->getType(); | ||||
3605 | bool FirstIter = true; | ||||
3606 | SmallVector<const SCEV *, 4> Offsets; | ||||
3607 | for (const SCEV *IndexExpr : IndexExprs) { | ||||
3608 | // Compute the (potentially symbolic) offset in bytes for this index. | ||||
3609 | if (StructType *STy = dyn_cast<StructType>(CurTy)) { | ||||
3610 | // For a struct, add the member offset. | ||||
3611 | ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); | ||||
3612 | unsigned FieldNo = Index->getZExtValue(); | ||||
3613 | const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); | ||||
3614 | Offsets.push_back(FieldOffset); | ||||
3615 | |||||
3616 | // Update CurTy to the type of the field at Index. | ||||
3617 | CurTy = STy->getTypeAtIndex(Index); | ||||
3618 | } else { | ||||
3619 | // Update CurTy to its element type. | ||||
3620 | if (FirstIter) { | ||||
3621 | assert(isa<PointerType>(CurTy) &&((void)0) | ||||
3622 | "The first index of a GEP indexes a pointer")((void)0); | ||||
3623 | CurTy = GEP->getSourceElementType(); | ||||
3624 | FirstIter = false; | ||||
3625 | } else { | ||||
3626 | CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); | ||||
3627 | } | ||||
3628 | // For an array, add the element offset, explicitly scaled. | ||||
3629 | const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); | ||||
3630 | // Getelementptr indices are signed. | ||||
3631 | IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); | ||||
3632 | |||||
3633 | // Multiply the index by the element size to compute the element offset. | ||||
3634 | const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); | ||||
3635 | Offsets.push_back(LocalOffset); | ||||
3636 | } | ||||
3637 | } | ||||
3638 | |||||
3639 | // Handle degenerate case of GEP without offsets. | ||||
3640 | if (Offsets.empty()) | ||||
3641 | return BaseExpr; | ||||
3642 | |||||
3643 | // Add the offsets together, assuming nsw if inbounds. | ||||
3644 | const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); | ||||
3645 | // Add the base address and the offset. We cannot use the nsw flag, as the | ||||
3646 | // base address is unsigned. However, if we know that the offset is | ||||
3647 | // non-negative, we can use nuw. | ||||
3648 | SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset) | ||||
3649 | ? SCEV::FlagNUW : SCEV::FlagAnyWrap; | ||||
3650 | return getAddExpr(BaseExpr, Offset, BaseWrap); | ||||
3651 | } | ||||
3652 | |||||
3653 | std::tuple<SCEV *, FoldingSetNodeID, void *> | ||||
3654 | ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, | ||||
3655 | ArrayRef<const SCEV *> Ops) { | ||||
3656 | FoldingSetNodeID ID; | ||||
3657 | void *IP = nullptr; | ||||
3658 | ID.AddInteger(SCEVType); | ||||
3659 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | ||||
3660 | ID.AddPointer(Ops[i]); | ||||
3661 | return std::tuple<SCEV *, FoldingSetNodeID, void *>( | ||||
3662 | UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); | ||||
3663 | } | ||||
3664 | |||||
3665 | const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { | ||||
3666 | SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; | ||||
3667 | return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); | ||||
3668 | } | ||||
3669 | |||||
3670 | const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, | ||||
3671 | SmallVectorImpl<const SCEV *> &Ops) { | ||||
3672 | assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!")((void)0); | ||||
3673 | if (Ops.size() == 1) return Ops[0]; | ||||
3674 | #ifndef NDEBUG1 | ||||
3675 | Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); | ||||
3676 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) { | ||||
3677 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&((void)0) | ||||
3678 | "Operand types don't match!")((void)0); | ||||
3679 | assert(Ops[0]->getType()->isPointerTy() ==((void)0) | ||||
3680 | Ops[i]->getType()->isPointerTy() &&((void)0) | ||||
3681 | "min/max should be consistently pointerish")((void)0); | ||||
3682 | } | ||||
3683 | #endif | ||||
3684 | |||||
3685 | bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; | ||||
3686 | bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; | ||||
3687 | |||||
3688 | // Sort by complexity, this groups all similar expression types together. | ||||
3689 | GroupByComplexity(Ops, &LI, DT); | ||||
3690 | |||||
3691 | // Check if we have created the same expression before. | ||||
3692 | if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { | ||||
3693 | return S; | ||||
3694 | } | ||||
3695 | |||||
3696 | // If there are any constants, fold them together. | ||||
3697 | unsigned Idx = 0; | ||||
3698 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { | ||||
3699 | ++Idx; | ||||
3700 | assert(Idx < Ops.size())((void)0); | ||||
3701 | auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { | ||||
3702 | if (Kind == scSMaxExpr) | ||||
3703 | return APIntOps::smax(LHS, RHS); | ||||
3704 | else if (Kind == scSMinExpr) | ||||
3705 | return APIntOps::smin(LHS, RHS); | ||||
3706 | else if (Kind == scUMaxExpr) | ||||
3707 | return APIntOps::umax(LHS, RHS); | ||||
3708 | else if (Kind == scUMinExpr) | ||||
3709 | return APIntOps::umin(LHS, RHS); | ||||
3710 | llvm_unreachable("Unknown SCEV min/max opcode")__builtin_unreachable(); | ||||
3711 | }; | ||||
3712 | |||||
3713 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { | ||||
3714 | // We found two constants, fold them together! | ||||
3715 | ConstantInt *Fold = ConstantInt::get( | ||||
3716 | getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); | ||||
3717 | Ops[0] = getConstant(Fold); | ||||
3718 | Ops.erase(Ops.begin()+1); // Erase the folded element | ||||
3719 | if (Ops.size() == 1) return Ops[0]; | ||||
3720 | LHSC = cast<SCEVConstant>(Ops[0]); | ||||
3721 | } | ||||
3722 | |||||
3723 | bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); | ||||
3724 | bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); | ||||
3725 | |||||
3726 | if (IsMax ? IsMinV : IsMaxV) { | ||||
3727 | // If we are left with a constant minimum(/maximum)-int, strip it off. | ||||
3728 | Ops.erase(Ops.begin()); | ||||
3729 | --Idx; | ||||
3730 | } else if (IsMax ? IsMaxV : IsMinV) { | ||||
3731 | // If we have a max(/min) with a constant maximum(/minimum)-int, | ||||
3732 | // it will always be the extremum. | ||||
3733 | return LHSC; | ||||
3734 | } | ||||
3735 | |||||
3736 | if (Ops.size() == 1) return Ops[0]; | ||||
3737 | } | ||||
3738 | |||||
3739 | // Find the first operation of the same kind | ||||
3740 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) | ||||
3741 | ++Idx; | ||||
3742 | |||||
3743 | // Check to see if one of the operands is of the same kind. If so, expand its | ||||
3744 | // operands onto our operand list, and recurse to simplify. | ||||
3745 | if (Idx < Ops.size()) { | ||||
3746 | bool DeletedAny = false; | ||||
3747 | while (Ops[Idx]->getSCEVType() == Kind) { | ||||
3748 | const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); | ||||
3749 | Ops.erase(Ops.begin()+Idx); | ||||
3750 | Ops.append(SMME->op_begin(), SMME->op_end()); | ||||
3751 | DeletedAny = true; | ||||
3752 | } | ||||
3753 | |||||
3754 | if (DeletedAny) | ||||
3755 | return getMinMaxExpr(Kind, Ops); | ||||
3756 | } | ||||
3757 | |||||
3758 | // Okay, check to see if the same value occurs in the operand list twice. If | ||||
3759 | // so, delete one. Since we sorted the list, these values are required to | ||||
3760 | // be adjacent. | ||||
3761 | llvm::CmpInst::Predicate GEPred = | ||||
3762 | IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; | ||||
3763 | llvm::CmpInst::Predicate LEPred = | ||||
3764 | IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; | ||||
3765 | llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; | ||||
3766 | llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; | ||||
3767 | for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { | ||||
3768 | if (Ops[i] == Ops[i + 1] || | ||||
3769 | isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { | ||||
3770 | // X op Y op Y --> X op Y | ||||
3771 | // X op Y --> X, if we know X, Y are ordered appropriately | ||||
3772 | Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); | ||||
3773 | --i; | ||||
3774 | --e; | ||||
3775 | } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], | ||||
3776 | Ops[i + 1])) { | ||||
3777 | // X op Y --> Y, if we know X, Y are ordered appropriately | ||||
3778 | Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); | ||||
3779 | --i; | ||||
3780 | --e; | ||||
3781 | } | ||||
3782 | } | ||||
3783 | |||||
3784 | if (Ops.size() == 1) return Ops[0]; | ||||
3785 | |||||
3786 | assert(!Ops.empty() && "Reduced smax down to nothing!")((void)0); | ||||
3787 | |||||
3788 | // Okay, it looks like we really DO need an expr. Check to see if we | ||||
3789 | // already have one, otherwise create a new one. | ||||
3790 | const SCEV *ExistingSCEV; | ||||
3791 | FoldingSetNodeID ID; | ||||
3792 | void *IP; | ||||
3793 | std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); | ||||
3794 | if (ExistingSCEV) | ||||
3795 | return ExistingSCEV; | ||||
3796 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); | ||||
3797 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); | ||||
3798 | SCEV *S = new (SCEVAllocator) | ||||
3799 | SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); | ||||
3800 | |||||
3801 | UniqueSCEVs.InsertNode(S, IP); | ||||
3802 | addToLoopUseLists(S); | ||||
3803 | return S; | ||||
3804 | } | ||||
3805 | |||||
3806 | const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { | ||||
3807 | SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; | ||||
3808 | return getSMaxExpr(Ops); | ||||
3809 | } | ||||
3810 | |||||
3811 | const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { | ||||
3812 | return getMinMaxExpr(scSMaxExpr, Ops); | ||||
3813 | } | ||||
3814 | |||||
3815 | const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { | ||||
3816 | SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; | ||||
3817 | return getUMaxExpr(Ops); | ||||
3818 | } | ||||
3819 | |||||
3820 | const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { | ||||
3821 | return getMinMaxExpr(scUMaxExpr, Ops); | ||||
3822 | } | ||||
3823 | |||||
3824 | const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, | ||||
3825 | const SCEV *RHS) { | ||||
3826 | SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; | ||||
3827 | return getSMinExpr(Ops); | ||||
3828 | } | ||||
3829 | |||||
3830 | const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { | ||||
3831 | return getMinMaxExpr(scSMinExpr, Ops); | ||||
3832 | } | ||||
3833 | |||||
3834 | const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, | ||||
3835 | const SCEV *RHS) { | ||||
3836 | SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; | ||||
3837 | return getUMinExpr(Ops); | ||||
3838 | } | ||||
3839 | |||||
3840 | const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { | ||||
3841 | return getMinMaxExpr(scUMinExpr, Ops); | ||||
3842 | } | ||||
3843 | |||||
3844 | const SCEV * | ||||
3845 | ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, | ||||
3846 | ScalableVectorType *ScalableTy) { | ||||
3847 | Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); | ||||
3848 | Constant *One = ConstantInt::get(IntTy, 1); | ||||
3849 | Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); | ||||
3850 | // Note that the expression we created is the final expression, we don't | ||||
3851 | // want to simplify it any further Also, if we call a normal getSCEV(), | ||||
3852 | // we'll end up in an endless recursion. So just create an SCEVUnknown. | ||||
3853 | return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); | ||||
3854 | } | ||||
3855 | |||||
3856 | const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { | ||||
3857 | if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) | ||||
3858 | return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); | ||||
3859 | // We can bypass creating a target-independent constant expression and then | ||||
3860 | // folding it back into a ConstantInt. This is just a compile-time | ||||
3861 | // optimization. | ||||
3862 | return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); | ||||
3863 | } | ||||
3864 | |||||
3865 | const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { | ||||
3866 | if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) | ||||
3867 | return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); | ||||
3868 | // We can bypass creating a target-independent constant expression and then | ||||
3869 | // folding it back into a ConstantInt. This is just a compile-time | ||||
3870 | // optimization. | ||||
3871 | return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); | ||||
3872 | } | ||||
3873 | |||||
3874 | const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, | ||||
3875 | StructType *STy, | ||||
3876 | unsigned FieldNo) { | ||||
3877 | // We can bypass creating a target-independent constant expression and then | ||||
3878 | // folding it back into a ConstantInt. This is just a compile-time | ||||
3879 | // optimization. | ||||
3880 | return getConstant( | ||||
3881 | IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); | ||||
3882 | } | ||||
3883 | |||||
3884 | const SCEV *ScalarEvolution::getUnknown(Value *V) { | ||||
3885 | // Don't attempt to do anything other than create a SCEVUnknown object | ||||
3886 | // here. createSCEV only calls getUnknown after checking for all other | ||||
3887 | // interesting possibilities, and any other code that calls getUnknown | ||||
3888 | // is doing so in order to hide a value from SCEV canonicalization. | ||||
3889 | |||||
3890 | FoldingSetNodeID ID; | ||||
3891 | ID.AddInteger(scUnknown); | ||||
3892 | ID.AddPointer(V); | ||||
3893 | void *IP = nullptr; | ||||
3894 | if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { | ||||
3895 | assert(cast<SCEVUnknown>(S)->getValue() == V &&((void)0) | ||||
3896 | "Stale SCEVUnknown in uniquing map!")((void)0); | ||||
3897 | return S; | ||||
3898 | } | ||||
3899 | SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, | ||||
3900 | FirstUnknown); | ||||
3901 | FirstUnknown = cast<SCEVUnknown>(S); | ||||
3902 | UniqueSCEVs.InsertNode(S, IP); | ||||
3903 | return S; | ||||
3904 | } | ||||
3905 | |||||
3906 | //===----------------------------------------------------------------------===// | ||||
3907 | // Basic SCEV Analysis and PHI Idiom Recognition Code | ||||
3908 | // | ||||
3909 | |||||
3910 | /// Test if values of the given type are analyzable within the SCEV | ||||
3911 | /// framework. This primarily includes integer types, and it can optionally | ||||
3912 | /// include pointer types if the ScalarEvolution class has access to | ||||
3913 | /// target-specific information. | ||||
3914 | bool ScalarEvolution::isSCEVable(Type *Ty) const { | ||||
3915 | // Integers and pointers are always SCEVable. | ||||
3916 | return Ty->isIntOrPtrTy(); | ||||
3917 | } | ||||
3918 | |||||
3919 | /// Return the size in bits of the specified type, for which isSCEVable must | ||||
3920 | /// return true. | ||||
3921 | uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { | ||||
3922 | assert(isSCEVable(Ty) && "Type is not SCEVable!")((void)0); | ||||
3923 | if (Ty->isPointerTy()) | ||||
3924 | return getDataLayout().getIndexTypeSizeInBits(Ty); | ||||
3925 | return getDataLayout().getTypeSizeInBits(Ty); | ||||
3926 | } | ||||
3927 | |||||
3928 | /// Return a type with the same bitwidth as the given type and which represents | ||||
3929 | /// how SCEV will treat the given type, for which isSCEVable must return | ||||
3930 | /// true. For pointer types, this is the pointer index sized integer type. | ||||
3931 | Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { | ||||
3932 | assert(isSCEVable(Ty) && "Type is not SCEVable!")((void)0); | ||||
3933 | |||||
3934 | if (Ty->isIntegerTy()) | ||||
3935 | return Ty; | ||||
3936 | |||||
3937 | // The only other support type is pointer. | ||||
3938 | assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!")((void)0); | ||||
3939 | return getDataLayout().getIndexType(Ty); | ||||
3940 | } | ||||
3941 | |||||
3942 | Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { | ||||
3943 | return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; | ||||
3944 | } | ||||
3945 | |||||
3946 | const SCEV *ScalarEvolution::getCouldNotCompute() { | ||||
3947 | return CouldNotCompute.get(); | ||||
3948 | } | ||||
3949 | |||||
3950 | bool ScalarEvolution::checkValidity(const SCEV *S) const { | ||||
3951 | bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { | ||||
3952 | auto *SU = dyn_cast<SCEVUnknown>(S); | ||||
3953 | return SU && SU->getValue() == nullptr; | ||||
3954 | }); | ||||
3955 | |||||
3956 | return !ContainsNulls; | ||||
3957 | } | ||||
3958 | |||||
3959 | bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { | ||||
3960 | HasRecMapType::iterator I = HasRecMap.find(S); | ||||
3961 | if (I != HasRecMap.end()) | ||||
3962 | return I->second; | ||||
3963 | |||||
3964 | bool FoundAddRec = | ||||
3965 | SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); | ||||
3966 | HasRecMap.insert({S, FoundAddRec}); | ||||
3967 | return FoundAddRec; | ||||
3968 | } | ||||
3969 | |||||
3970 | /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. | ||||
3971 | /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an | ||||
3972 | /// offset I, then return {S', I}, else return {\p S, nullptr}. | ||||
3973 | static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { | ||||
3974 | const auto *Add = dyn_cast<SCEVAddExpr>(S); | ||||
3975 | if (!Add) | ||||
3976 | return {S, nullptr}; | ||||
3977 | |||||
3978 | if (Add->getNumOperands() != 2) | ||||
3979 | return {S, nullptr}; | ||||
3980 | |||||
3981 | auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); | ||||
3982 | if (!ConstOp) | ||||
3983 | return {S, nullptr}; | ||||
3984 | |||||
3985 | return {Add->getOperand(1), ConstOp->getValue()}; | ||||
3986 | } | ||||
3987 | |||||
3988 | /// Return the ValueOffsetPair set for \p S. \p S can be represented | ||||
3989 | /// by the value and offset from any ValueOffsetPair in the set. | ||||
3990 | ScalarEvolution::ValueOffsetPairSetVector * | ||||
3991 | ScalarEvolution::getSCEVValues(const SCEV *S) { | ||||
3992 | ExprValueMapType::iterator SI = ExprValueMap.find_as(S); | ||||
3993 | if (SI == ExprValueMap.end()) | ||||
3994 | return nullptr; | ||||
3995 | #ifndef NDEBUG1 | ||||
3996 | if (VerifySCEVMap) { | ||||
3997 | // Check there is no dangling Value in the set returned. | ||||
3998 | for (const auto &VE : SI->second) | ||||
3999 | assert(ValueExprMap.count(VE.first))((void)0); | ||||
4000 | } | ||||
4001 | #endif | ||||
4002 | return &SI->second; | ||||
4003 | } | ||||
4004 | |||||
4005 | /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) | ||||
4006 | /// cannot be used separately. eraseValueFromMap should be used to remove | ||||
4007 | /// V from ValueExprMap and ExprValueMap at the same time. | ||||
4008 | void ScalarEvolution::eraseValueFromMap(Value *V) { | ||||
4009 | ValueExprMapType::iterator I = ValueExprMap.find_as(V); | ||||
4010 | if (I != ValueExprMap.end()) { | ||||
4011 | const SCEV *S = I->second; | ||||
4012 | // Remove {V, 0} from the set of ExprValueMap[S] | ||||
4013 | if (auto *SV = getSCEVValues(S)) | ||||
4014 | SV->remove({V, nullptr}); | ||||
4015 | |||||
4016 | // Remove {V, Offset} from the set of ExprValueMap[Stripped] | ||||
4017 | const SCEV *Stripped; | ||||
4018 | ConstantInt *Offset; | ||||
4019 | std::tie(Stripped, Offset) = splitAddExpr(S); | ||||
4020 | if (Offset != nullptr) { | ||||
4021 | if (auto *SV = getSCEVValues(Stripped)) | ||||
4022 | SV->remove({V, Offset}); | ||||
4023 | } | ||||
4024 | ValueExprMap.erase(V); | ||||
4025 | } | ||||
4026 | } | ||||
4027 | |||||
4028 | /// Check whether value has nuw/nsw/exact set but SCEV does not. | ||||
4029 | /// TODO: In reality it is better to check the poison recursively | ||||
4030 | /// but this is better than nothing. | ||||
4031 | static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { | ||||
4032 | if (auto *I = dyn_cast<Instruction>(V)) { | ||||
4033 | if (isa<OverflowingBinaryOperator>(I)) { | ||||
4034 | if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { | ||||
4035 | if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) | ||||
4036 | return true; | ||||
4037 | if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) | ||||
4038 | return true; | ||||
4039 | } | ||||
4040 | } else if (isa<PossiblyExactOperator>(I) && I->isExact()) | ||||
4041 | return true; | ||||
4042 | } | ||||
4043 | return false; | ||||
4044 | } | ||||
4045 | |||||
4046 | /// Return an existing SCEV if it exists, otherwise analyze the expression and | ||||
4047 | /// create a new one. | ||||
4048 | const SCEV *ScalarEvolution::getSCEV(Value *V) { | ||||
4049 | assert(isSCEVable(V->getType()) && "Value is not SCEVable!")((void)0); | ||||
4050 | |||||
4051 | const SCEV *S = getExistingSCEV(V); | ||||
4052 | if (S == nullptr) { | ||||
4053 | S = createSCEV(V); | ||||
4054 | // During PHI resolution, it is possible to create two SCEVs for the same | ||||
4055 | // V, so it is needed to double check whether V->S is inserted into | ||||
4056 | // ValueExprMap before insert S->{V, 0} into ExprValueMap. | ||||
4057 | std::pair<ValueExprMapType::iterator, bool> Pair = | ||||
4058 | ValueExprMap.insert({SCEVCallbackVH(V, this), S}); | ||||
4059 | if (Pair.second && !SCEVLostPoisonFlags(S, V)) { | ||||
4060 | ExprValueMap[S].insert({V, nullptr}); | ||||
4061 | |||||
4062 | // If S == Stripped + Offset, add Stripped -> {V, Offset} into | ||||
4063 | // ExprValueMap. | ||||
4064 | const SCEV *Stripped = S; | ||||
4065 | ConstantInt *Offset = nullptr; | ||||
4066 | std::tie(Stripped, Offset) = splitAddExpr(S); | ||||
4067 | // If stripped is SCEVUnknown, don't bother to save | ||||
4068 | // Stripped -> {V, offset}. It doesn't simplify and sometimes even | ||||
4069 | // increase the complexity of the expansion code. | ||||
4070 | // If V is GetElementPtrInst, don't save Stripped -> {V, offset} | ||||
4071 | // because it may generate add/sub instead of GEP in SCEV expansion. | ||||
4072 | if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && | ||||
4073 | !isa<GetElementPtrInst>(V)) | ||||
4074 | ExprValueMap[Stripped].insert({V, Offset}); | ||||
4075 | } | ||||
4076 | } | ||||
4077 | return S; | ||||
4078 | } | ||||
4079 | |||||
4080 | const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { | ||||
4081 | assert(isSCEVable(V->getType()) && "Value is not SCEVable!")((void)0); | ||||
4082 | |||||
4083 | ValueExprMapType::iterator I = ValueExprMap.find_as(V); | ||||
4084 | if (I != ValueExprMap.end()) { | ||||
4085 | const SCEV *S = I->second; | ||||
4086 | if (checkValidity(S)) | ||||
4087 | return S; | ||||
4088 | eraseValueFromMap(V); | ||||
4089 | forgetMemoizedResults(S); | ||||
4090 | } | ||||
4091 | return nullptr; | ||||
4092 | } | ||||
4093 | |||||
4094 | /// Return a SCEV corresponding to -V = -1*V | ||||
4095 | const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, | ||||
4096 | SCEV::NoWrapFlags Flags) { | ||||
4097 | if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) | ||||
4098 | return getConstant( | ||||
4099 | cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); | ||||
4100 | |||||
4101 | Type *Ty = V->getType(); | ||||
4102 | Ty = getEffectiveSCEVType(Ty); | ||||
4103 | return getMulExpr(V, getMinusOne(Ty), Flags); | ||||
4104 | } | ||||
4105 | |||||
4106 | /// If Expr computes ~A, return A else return nullptr | ||||
4107 | static const SCEV *MatchNotExpr(const SCEV *Expr) { | ||||
4108 | const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); | ||||
4109 | if (!Add || Add->getNumOperands() != 2 || | ||||
4110 | !Add->getOperand(0)->isAllOnesValue()) | ||||
4111 | return nullptr; | ||||
4112 | |||||
4113 | const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); | ||||
4114 | if (!AddRHS || AddRHS->getNumOperands() != 2 || | ||||
4115 | !AddRHS->getOperand(0)->isAllOnesValue()) | ||||
4116 | return nullptr; | ||||
4117 | |||||
4118 | return AddRHS->getOperand(1); | ||||
4119 | } | ||||
4120 | |||||
4121 | /// Return a SCEV corresponding to ~V = -1-V | ||||
4122 | const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { | ||||
4123 | if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) | ||||
4124 | return getConstant( | ||||
4125 | cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); | ||||
4126 | |||||
4127 | // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) | ||||
4128 | if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { | ||||
4129 | auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { | ||||
4130 | SmallVector<const SCEV *, 2> MatchedOperands; | ||||
4131 | for (const SCEV *Operand : MME->operands()) { | ||||
4132 | const SCEV *Matched = MatchNotExpr(Operand); | ||||
4133 | if (!Matched) | ||||
4134 | return (const SCEV *)nullptr; | ||||
4135 | MatchedOperands.push_back(Matched); | ||||
4136 | } | ||||
4137 | return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), | ||||
4138 | MatchedOperands); | ||||
4139 | }; | ||||
4140 | if (const SCEV *Replaced = MatchMinMaxNegation(MME)) | ||||
4141 | return Replaced; | ||||
4142 | } | ||||
4143 | |||||
4144 | Type *Ty = V->getType(); | ||||
4145 | Ty = getEffectiveSCEVType(Ty); | ||||
4146 | return getMinusSCEV(getMinusOne(Ty), V); | ||||
4147 | } | ||||
4148 | |||||
4149 | /// Compute an expression equivalent to S - getPointerBase(S). | ||||
4150 | static const SCEV *removePointerBase(ScalarEvolution *SE, const SCEV *P) { | ||||
4151 | assert(P->getType()->isPointerTy())((void)0); | ||||
4152 | |||||
4153 | if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) { | ||||
4154 | // The base of an AddRec is the first operand. | ||||
4155 | SmallVector<const SCEV *> Ops{AddRec->operands()}; | ||||
4156 | Ops[0] = removePointerBase(SE, Ops[0]); | ||||
4157 | // Don't try to transfer nowrap flags for now. We could in some cases | ||||
4158 | // (for example, if pointer operand of the AddRec is a SCEVUnknown). | ||||
4159 | return SE->getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap); | ||||
4160 | } | ||||
4161 | if (auto *Add = dyn_cast<SCEVAddExpr>(P)) { | ||||
4162 | // The base of an Add is the pointer operand. | ||||
4163 | SmallVector<const SCEV *> Ops{Add->operands()}; | ||||
4164 | const SCEV **PtrOp = nullptr; | ||||
4165 | for (const SCEV *&AddOp : Ops) { | ||||
4166 | if (AddOp->getType()->isPointerTy()) { | ||||
4167 | // If we find an Add with multiple pointer operands, treat it as a | ||||
4168 | // pointer base to be consistent with getPointerBase. Eventually | ||||
4169 | // we should be able to assert this is impossible. | ||||
4170 | if (PtrOp) | ||||
4171 | return SE->getZero(P->getType()); | ||||
4172 | PtrOp = &AddOp; | ||||
4173 | } | ||||
4174 | } | ||||
4175 | *PtrOp = removePointerBase(SE, *PtrOp); | ||||
4176 | // Don't try to transfer nowrap flags for now. We could in some cases | ||||
4177 | // (for example, if the pointer operand of the Add is a SCEVUnknown). | ||||
4178 | return SE->getAddExpr(Ops); | ||||
4179 | } | ||||
4180 | // Any other expression must be a pointer base. | ||||
4181 | return SE->getZero(P->getType()); | ||||
4182 | } | ||||
4183 | |||||
4184 | const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, | ||||
4185 | SCEV::NoWrapFlags Flags, | ||||
4186 | unsigned Depth) { | ||||
4187 | // Fast path: X - X --> 0. | ||||
4188 | if (LHS == RHS) | ||||
4189 | return getZero(LHS->getType()); | ||||
4190 | |||||
4191 | // If we subtract two pointers with different pointer bases, bail. | ||||
4192 | // Eventually, we're going to add an assertion to getMulExpr that we | ||||
4193 | // can't multiply by a pointer. | ||||
4194 | if (RHS->getType()->isPointerTy()) { | ||||
4195 | if (!LHS->getType()->isPointerTy() || | ||||
4196 | getPointerBase(LHS) != getPointerBase(RHS)) | ||||
4197 | return getCouldNotCompute(); | ||||
4198 | LHS = removePointerBase(this, LHS); | ||||
4199 | RHS = removePointerBase(this, RHS); | ||||
4200 | } | ||||
4201 | |||||
4202 | // We represent LHS - RHS as LHS + (-1)*RHS. This transformation | ||||
4203 | // makes it so that we cannot make much use of NUW. | ||||
4204 | auto AddFlags = SCEV::FlagAnyWrap; | ||||
4205 | const bool RHSIsNotMinSigned = | ||||
4206 | !getSignedRangeMin(RHS).isMinSignedValue(); | ||||
4207 | if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { | ||||
4208 | // Let M be the minimum representable signed value. Then (-1)*RHS | ||||
4209 | // signed-wraps if and only if RHS is M. That can happen even for | ||||
4210 | // a NSW subtraction because e.g. (-1)*M signed-wraps even though | ||||
4211 | // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + | ||||
4212 | // (-1)*RHS, we need to prove that RHS != M. | ||||
4213 | // | ||||
4214 | // If LHS is non-negative and we know that LHS - RHS does not | ||||
4215 | // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap | ||||
4216 | // either by proving that RHS > M or that LHS >= 0. | ||||
4217 | if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { | ||||
4218 | AddFlags = SCEV::FlagNSW; | ||||
4219 | } | ||||
4220 | } | ||||
4221 | |||||
4222 | // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - | ||||
4223 | // RHS is NSW and LHS >= 0. | ||||
4224 | // | ||||
4225 | // The difficulty here is that the NSW flag may have been proven | ||||
4226 | // relative to a loop that is to be found in a recurrence in LHS and | ||||
4227 | // not in RHS. Applying NSW to (-1)*M may then let the NSW have a | ||||
4228 | // larger scope than intended. | ||||
4229 | auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; | ||||
4230 | |||||
4231 | return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); | ||||
4232 | } | ||||
4233 | |||||
4234 | const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, | ||||
4235 | unsigned Depth) { | ||||
4236 | Type *SrcTy = V->getType(); | ||||
4237 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | ||||
4238 | "Cannot truncate or zero extend with non-integer arguments!")((void)0); | ||||
4239 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | ||||
4240 | return V; // No conversion | ||||
4241 | if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) | ||||
4242 | return getTruncateExpr(V, Ty, Depth); | ||||
4243 | return getZeroExtendExpr(V, Ty, Depth); | ||||
4244 | } | ||||
4245 | |||||
4246 | const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, | ||||
4247 | unsigned Depth) { | ||||
4248 | Type *SrcTy = V->getType(); | ||||
4249 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | ||||
4250 | "Cannot truncate or zero extend with non-integer arguments!")((void)0); | ||||
4251 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | ||||
4252 | return V; // No conversion | ||||
4253 | if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) | ||||
4254 | return getTruncateExpr(V, Ty, Depth); | ||||
4255 | return getSignExtendExpr(V, Ty, Depth); | ||||
4256 | } | ||||
4257 | |||||
4258 | const SCEV * | ||||
4259 | ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { | ||||
4260 | Type *SrcTy = V->getType(); | ||||
4261 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | ||||
4262 | "Cannot noop or zero extend with non-integer arguments!")((void)0); | ||||
4263 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&((void)0) | ||||
4264 | "getNoopOrZeroExtend cannot truncate!")((void)0); | ||||
4265 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | ||||
4266 | return V; // No conversion | ||||
4267 | return getZeroExtendExpr(V, Ty); | ||||
4268 | } | ||||
4269 | |||||
4270 | const SCEV * | ||||
4271 | ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { | ||||
4272 | Type *SrcTy = V->getType(); | ||||
4273 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | ||||
4274 | "Cannot noop or sign extend with non-integer arguments!")((void)0); | ||||
4275 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&((void)0) | ||||
4276 | "getNoopOrSignExtend cannot truncate!")((void)0); | ||||
4277 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | ||||
4278 | return V; // No conversion | ||||
4279 | return getSignExtendExpr(V, Ty); | ||||
4280 | } | ||||
4281 | |||||
4282 | const SCEV * | ||||
4283 | ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { | ||||
4284 | Type *SrcTy = V->getType(); | ||||
4285 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | ||||
4286 | "Cannot noop or any extend with non-integer arguments!")((void)0); | ||||
4287 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&((void)0) | ||||
4288 | "getNoopOrAnyExtend cannot truncate!")((void)0); | ||||
4289 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | ||||
4290 | return V; // No conversion | ||||
4291 | return getAnyExtendExpr(V, Ty); | ||||
4292 | } | ||||
4293 | |||||
4294 | const SCEV * | ||||
4295 | ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { | ||||
4296 | Type *SrcTy = V->getType(); | ||||
4297 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | ||||
4298 | "Cannot truncate or noop with non-integer arguments!")((void)0); | ||||
4299 | assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&((void)0) | ||||
4300 | "getTruncateOrNoop cannot extend!")((void)0); | ||||
4301 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | ||||
4302 | return V; // No conversion | ||||
4303 | return getTruncateExpr(V, Ty); | ||||
4304 | } | ||||
4305 | |||||
4306 | const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, | ||||
4307 | const SCEV *RHS) { | ||||
4308 | const SCEV *PromotedLHS = LHS; | ||||
4309 | const SCEV *PromotedRHS = RHS; | ||||
4310 | |||||
4311 | if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) | ||||
4312 | PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); | ||||
4313 | else | ||||
4314 | PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); | ||||
4315 | |||||
4316 | return getUMaxExpr(PromotedLHS, PromotedRHS); | ||||
4317 | } | ||||
4318 | |||||
4319 | const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, | ||||
4320 | const SCEV *RHS) { | ||||
4321 | SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; | ||||
4322 | return getUMinFromMismatchedTypes(Ops); | ||||
4323 | } | ||||
4324 | |||||
4325 | const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( | ||||
4326 | SmallVectorImpl<const SCEV *> &Ops) { | ||||
4327 | assert(!Ops.empty() && "At least one operand must be!")((void)0); | ||||
4328 | // Trivial case. | ||||
4329 | if (Ops.size() == 1) | ||||
4330 | return Ops[0]; | ||||
4331 | |||||
4332 | // Find the max type first. | ||||
4333 | Type *MaxType = nullptr; | ||||
4334 | for (auto *S : Ops) | ||||
4335 | if (MaxType) | ||||
4336 | MaxType = getWiderType(MaxType, S->getType()); | ||||
4337 | else | ||||
4338 | MaxType = S->getType(); | ||||
4339 | assert(MaxType && "Failed to find maximum type!")((void)0); | ||||
4340 | |||||
4341 | // Extend all ops to max type. | ||||
4342 | SmallVector<const SCEV *, 2> PromotedOps; | ||||
4343 | for (auto *S : Ops) | ||||
4344 | PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); | ||||
4345 | |||||
4346 | // Generate umin. | ||||
4347 | return getUMinExpr(PromotedOps); | ||||
4348 | } | ||||
4349 | |||||
4350 | const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { | ||||
4351 | // A pointer operand may evaluate to a nonpointer expression, such as null. | ||||
4352 | if (!V->getType()->isPointerTy()) | ||||
4353 | return V; | ||||
4354 | |||||
4355 | while (true) { | ||||
4356 | if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { | ||||
4357 | V = AddRec->getStart(); | ||||
4358 | } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) { | ||||
4359 | const SCEV *PtrOp = nullptr; | ||||
4360 | for (const SCEV *AddOp : Add->operands()) { | ||||
4361 | if (AddOp->getType()->isPointerTy()) { | ||||
4362 | // Cannot find the base of an expression with multiple pointer ops. | ||||
4363 | if (PtrOp) | ||||
4364 | return V; | ||||
4365 | PtrOp = AddOp; | ||||
4366 | } | ||||
4367 | } | ||||
4368 | if (!PtrOp) // All operands were non-pointer. | ||||
4369 | return V; | ||||
4370 | V = PtrOp; | ||||
4371 | } else // Not something we can look further into. | ||||
4372 | return V; | ||||
4373 | } | ||||
4374 | } | ||||
4375 | |||||
4376 | /// Push users of the given Instruction onto the given Worklist. | ||||
4377 | static void | ||||
4378 | PushDefUseChildren(Instruction *I, | ||||
4379 | SmallVectorImpl<Instruction *> &Worklist) { | ||||
4380 | // Push the def-use children onto the Worklist stack. | ||||
4381 | for (User *U : I->users()) | ||||
4382 | Worklist.push_back(cast<Instruction>(U)); | ||||
4383 | } | ||||
4384 | |||||
4385 | void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { | ||||
4386 | SmallVector<Instruction *, 16> Worklist; | ||||
4387 | PushDefUseChildren(PN, Worklist); | ||||
4388 | |||||
4389 | SmallPtrSet<Instruction *, 8> Visited; | ||||
4390 | Visited.insert(PN); | ||||
4391 | while (!Worklist.empty()) { | ||||
4392 | Instruction *I = Worklist.pop_back_val(); | ||||
4393 | if (!Visited.insert(I).second) | ||||
4394 | continue; | ||||
4395 | |||||
4396 | auto It = ValueExprMap.find_as(static_cast<Value *>(I)); | ||||
4397 | if (It != ValueExprMap.end()) { | ||||
4398 | const SCEV *Old = It->second; | ||||
4399 | |||||
4400 | // Short-circuit the def-use traversal if the symbolic name | ||||
4401 | // ceases to appear in expressions. | ||||
4402 | if (Old != SymName && !hasOperand(Old, SymName)) | ||||
4403 | continue; | ||||
4404 | |||||
4405 | // SCEVUnknown for a PHI either means that it has an unrecognized | ||||
4406 | // structure, it's a PHI that's in the progress of being computed | ||||
4407 | // by createNodeForPHI, or it's a single-value PHI. In the first case, | ||||
4408 | // additional loop trip count information isn't going to change anything. | ||||
4409 | // In the second case, createNodeForPHI will perform the necessary | ||||
4410 | // updates on its own when it gets to that point. In the third, we do | ||||
4411 | // want to forget the SCEVUnknown. | ||||
4412 | if (!isa<PHINode>(I) || | ||||
4413 | !isa<SCEVUnknown>(Old) || | ||||
4414 | (I != PN && Old == SymName)) { | ||||
4415 | eraseValueFromMap(It->first); | ||||
4416 | forgetMemoizedResults(Old); | ||||
4417 | } | ||||
4418 | } | ||||
4419 | |||||
4420 | PushDefUseChildren(I, Worklist); | ||||
4421 | } | ||||
4422 | } | ||||
4423 | |||||
4424 | namespace { | ||||
4425 | |||||
4426 | /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start | ||||
4427 | /// expression in case its Loop is L. If it is not L then | ||||
4428 | /// if IgnoreOtherLoops is true then use AddRec itself | ||||
4429 | /// otherwise rewrite cannot be done. | ||||
4430 | /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. | ||||
4431 | class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { | ||||
4432 | public: | ||||
4433 | static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, | ||||
4434 | bool IgnoreOtherLoops = true) { | ||||
4435 | SCEVInitRewriter Rewriter(L, SE); | ||||
4436 | const SCEV *Result = Rewriter.visit(S); | ||||
4437 | if (Rewriter.hasSeenLoopVariantSCEVUnknown()) | ||||
4438 | return SE.getCouldNotCompute(); | ||||
4439 | return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops | ||||
4440 | ? SE.getCouldNotCompute() | ||||
4441 | : Result; | ||||
4442 | } | ||||
4443 | |||||
4444 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | ||||
4445 | if (!SE.isLoopInvariant(Expr, L)) | ||||
4446 | SeenLoopVariantSCEVUnknown = true; | ||||
4447 | return Expr; | ||||
4448 | } | ||||
4449 | |||||
4450 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { | ||||
4451 | // Only re-write AddRecExprs for this loop. | ||||
4452 | if (Expr->getLoop() == L) | ||||
4453 | return Expr->getStart(); | ||||
4454 | SeenOtherLoops = true; | ||||
4455 | return Expr; | ||||
4456 | } | ||||
4457 | |||||
4458 | bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } | ||||
4459 | |||||
4460 | bool hasSeenOtherLoops() { return SeenOtherLoops; } | ||||
4461 | |||||
4462 | private: | ||||
4463 | explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) | ||||
4464 | : SCEVRewriteVisitor(SE), L(L) {} | ||||
4465 | |||||
4466 | const Loop *L; | ||||
4467 | bool SeenLoopVariantSCEVUnknown = false; | ||||
4468 | bool SeenOtherLoops = false; | ||||
4469 | }; | ||||
4470 | |||||
4471 | /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post | ||||
4472 | /// increment expression in case its Loop is L. If it is not L then | ||||
4473 | /// use AddRec itself. | ||||
4474 | /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. | ||||
4475 | class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { | ||||
4476 | public: | ||||
4477 | static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { | ||||
4478 | SCEVPostIncRewriter Rewriter(L, SE); | ||||
4479 | const SCEV *Result = Rewriter.visit(S); | ||||
4480 | return Rewriter.hasSeenLoopVariantSCEVUnknown() | ||||
4481 | ? SE.getCouldNotCompute() | ||||
4482 | : Result; | ||||
4483 | } | ||||
4484 | |||||
4485 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | ||||
4486 | if (!SE.isLoopInvariant(Expr, L)) | ||||
4487 | SeenLoopVariantSCEVUnknown = true; | ||||
4488 | return Expr; | ||||
4489 | } | ||||
4490 | |||||
4491 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { | ||||
4492 | // Only re-write AddRecExprs for this loop. | ||||
4493 | if (Expr->getLoop() == L) | ||||
4494 | return Expr->getPostIncExpr(SE); | ||||
4495 | SeenOtherLoops = true; | ||||
4496 | return Expr; | ||||
4497 | } | ||||
4498 | |||||
4499 | bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } | ||||
4500 | |||||
4501 | bool hasSeenOtherLoops() { return SeenOtherLoops; } | ||||
4502 | |||||
4503 | private: | ||||
4504 | explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) | ||||
4505 | : SCEVRewriteVisitor(SE), L(L) {} | ||||
4506 | |||||
4507 | const Loop *L; | ||||
4508 | bool SeenLoopVariantSCEVUnknown = false; | ||||
4509 | bool SeenOtherLoops = false; | ||||
4510 | }; | ||||
4511 | |||||
4512 | /// This class evaluates the compare condition by matching it against the | ||||
4513 | /// condition of loop latch. If there is a match we assume a true value | ||||
4514 | /// for the condition while building SCEV nodes. | ||||
4515 | class SCEVBackedgeConditionFolder | ||||
4516 | : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { | ||||
4517 | public: | ||||
4518 | static const SCEV *rewrite(const SCEV *S, const Loop *L, | ||||
4519 | ScalarEvolution &SE) { | ||||
4520 | bool IsPosBECond = false; | ||||
4521 | Value *BECond = nullptr; | ||||
4522 | if (BasicBlock *Latch = L->getLoopLatch()) { | ||||
4523 | BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); | ||||
4524 | if (BI && BI->isConditional()) { | ||||
4525 | assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&((void)0) | ||||
4526 | "Both outgoing branches should not target same header!")((void)0); | ||||
4527 | BECond = BI->getCondition(); | ||||
4528 | IsPosBECond = BI->getSuccessor(0) == L->getHeader(); | ||||
4529 | } else { | ||||
4530 | return S; | ||||
4531 | } | ||||
4532 | } | ||||
4533 | SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); | ||||
4534 | return Rewriter.visit(S); | ||||
4535 | } | ||||
4536 | |||||
4537 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | ||||
4538 | const SCEV *Result = Expr; | ||||
4539 | bool InvariantF = SE.isLoopInvariant(Expr, L); | ||||
4540 | |||||
4541 | if (!InvariantF) { | ||||
4542 | Instruction *I = cast<Instruction>(Expr->getValue()); | ||||
4543 | switch (I->getOpcode()) { | ||||
4544 | case Instruction::Select: { | ||||
4545 | SelectInst *SI = cast<SelectInst>(I); | ||||
4546 | Optional<const SCEV *> Res = | ||||
4547 | compareWithBackedgeCondition(SI->getCondition()); | ||||
4548 | if (Res.hasValue()) { | ||||
4549 | bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); | ||||
4550 | Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); | ||||
4551 | } | ||||
4552 | break; | ||||
4553 | } | ||||
4554 | default: { | ||||
4555 | Optional<const SCEV *> Res = compareWithBackedgeCondition(I); | ||||
4556 | if (Res.hasValue()) | ||||
4557 | Result = Res.getValue(); | ||||
4558 | break; | ||||
4559 | } | ||||
4560 | } | ||||
4561 | } | ||||
4562 | return Result; | ||||
4563 | } | ||||
4564 | |||||
4565 | private: | ||||
4566 | explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, | ||||
4567 | bool IsPosBECond, ScalarEvolution &SE) | ||||
4568 | : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), | ||||
4569 | IsPositiveBECond(IsPosBECond) {} | ||||
4570 | |||||
4571 | Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); | ||||
4572 | |||||
4573 | const Loop *L; | ||||
4574 | /// Loop back condition. | ||||
4575 | Value *BackedgeCond = nullptr; | ||||
4576 | /// Set to true if loop back is on positive branch condition. | ||||
4577 | bool IsPositiveBECond; | ||||
4578 | }; | ||||
4579 | |||||
4580 | Optional<const SCEV *> | ||||
4581 | SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { | ||||
4582 | |||||
4583 | // If value matches the backedge condition for loop latch, | ||||
4584 | // then return a constant evolution node based on loopback | ||||
4585 | // branch taken. | ||||
4586 | if (BackedgeCond == IC) | ||||
4587 | return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) | ||||
4588 | : SE.getZero(Type::getInt1Ty(SE.getContext())); | ||||
4589 | return None; | ||||
4590 | } | ||||
4591 | |||||
4592 | class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { | ||||
4593 | public: | ||||
4594 | static const SCEV *rewrite(const SCEV *S, const Loop *L, | ||||
4595 | ScalarEvolution &SE) { | ||||
4596 | SCEVShiftRewriter Rewriter(L, SE); | ||||
4597 | const SCEV *Result = Rewriter.visit(S); | ||||
4598 | return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); | ||||
4599 | } | ||||
4600 | |||||
4601 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | ||||
4602 | // Only allow AddRecExprs for this loop. | ||||
4603 | if (!SE.isLoopInvariant(Expr, L)) | ||||
4604 | Valid = false; | ||||
4605 | return Expr; | ||||
4606 | } | ||||
4607 | |||||
4608 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { | ||||
4609 | if (Expr->getLoop() == L && Expr->isAffine()) | ||||
4610 | return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); | ||||
4611 | Valid = false; | ||||
4612 | return Expr; | ||||
4613 | } | ||||
4614 | |||||
4615 | bool isValid() { return Valid; } | ||||
4616 | |||||
4617 | private: | ||||
4618 | explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) | ||||
4619 | : SCEVRewriteVisitor(SE), L(L) {} | ||||
4620 | |||||
4621 | const Loop *L; | ||||
4622 | bool Valid = true; | ||||
4623 | }; | ||||
4624 | |||||
4625 | } // end anonymous namespace | ||||
4626 | |||||
4627 | SCEV::NoWrapFlags | ||||
4628 | ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { | ||||
4629 | if (!AR->isAffine()) | ||||
4630 | return SCEV::FlagAnyWrap; | ||||
4631 | |||||
4632 | using OBO = OverflowingBinaryOperator; | ||||
4633 | |||||
4634 | SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; | ||||
4635 | |||||
4636 | if (!AR->hasNoSignedWrap()) { | ||||
4637 | ConstantRange AddRecRange = getSignedRange(AR); | ||||
4638 | ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); | ||||
4639 | |||||
4640 | auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( | ||||
4641 | Instruction::Add, IncRange, OBO::NoSignedWrap); | ||||
4642 | if (NSWRegion.contains(AddRecRange)) | ||||
4643 | Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); | ||||
4644 | } | ||||
4645 | |||||
4646 | if (!AR->hasNoUnsignedWrap()) { | ||||
4647 | ConstantRange AddRecRange = getUnsignedRange(AR); | ||||
4648 | ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); | ||||
4649 | |||||
4650 | auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( | ||||
4651 | Instruction::Add, IncRange, OBO::NoUnsignedWrap); | ||||
4652 | if (NUWRegion.contains(AddRecRange)) | ||||
4653 | Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); | ||||
4654 | } | ||||
4655 | |||||
4656 | return Result; | ||||
4657 | } | ||||
4658 | |||||
4659 | SCEV::NoWrapFlags | ||||
4660 | ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { | ||||
4661 | SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); | ||||
4662 | |||||
4663 | if (AR->hasNoSignedWrap()) | ||||
4664 | return Result; | ||||
4665 | |||||
4666 | if (!AR->isAffine()) | ||||
4667 | return Result; | ||||
4668 | |||||
4669 | const SCEV *Step = AR->getStepRecurrence(*this); | ||||
4670 | const Loop *L = AR->getLoop(); | ||||
4671 | |||||
4672 | // Check whether the backedge-taken count is SCEVCouldNotCompute. | ||||
4673 | // Note that this serves two purposes: It filters out loops that are | ||||
4674 | // simply not analyzable, and it covers the case where this code is | ||||
4675 | // being called from within backedge-taken count analysis, such that | ||||
4676 | // attempting to ask for the backedge-taken count would likely result | ||||
4677 | // in infinite recursion. In the later case, the analysis code will | ||||
4678 | // cope with a conservative value, and it will take care to purge | ||||
4679 | // that value once it has finished. | ||||
4680 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); | ||||
4681 | |||||
4682 | // Normally, in the cases we can prove no-overflow via a | ||||
4683 | // backedge guarding condition, we can also compute a backedge | ||||
4684 | // taken count for the loop. The exceptions are assumptions and | ||||
4685 | // guards present in the loop -- SCEV is not great at exploiting | ||||
4686 | // these to compute max backedge taken counts, but can still use | ||||
4687 | // these to prove lack of overflow. Use this fact to avoid | ||||
4688 | // doing extra work that may not pay off. | ||||
4689 | |||||
4690 | if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && | ||||
4691 | AC.assumptions().empty()) | ||||
4692 | return Result; | ||||
4693 | |||||
4694 | // If the backedge is guarded by a comparison with the pre-inc value the | ||||
4695 | // addrec is safe. Also, if the entry is guarded by a comparison with the | ||||
4696 | // start value and the backedge is guarded by a comparison with the post-inc | ||||
4697 | // value, the addrec is safe. | ||||
4698 | ICmpInst::Predicate Pred; | ||||
4699 | const SCEV *OverflowLimit = | ||||
4700 | getSignedOverflowLimitForStep(Step, &Pred, this); | ||||
4701 | if (OverflowLimit && | ||||
4702 | (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || | ||||
4703 | isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { | ||||
4704 | Result = setFlags(Result, SCEV::FlagNSW); | ||||
4705 | } | ||||
4706 | return Result; | ||||
4707 | } | ||||
4708 | SCEV::NoWrapFlags | ||||
4709 | ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { | ||||
4710 | SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); | ||||
4711 | |||||
4712 | if (AR->hasNoUnsignedWrap()) | ||||
4713 | return Result; | ||||
4714 | |||||
4715 | if (!AR->isAffine()) | ||||
4716 | return Result; | ||||
4717 | |||||
4718 | const SCEV *Step = AR->getStepRecurrence(*this); | ||||
4719 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); | ||||
4720 | const Loop *L = AR->getLoop(); | ||||
4721 | |||||
4722 | // Check whether the backedge-taken count is SCEVCouldNotCompute. | ||||
4723 | // Note that this serves two purposes: It filters out loops that are | ||||
4724 | // simply not analyzable, and it covers the case where this code is | ||||
4725 | // being called from within backedge-taken count analysis, such that | ||||
4726 | // attempting to ask for the backedge-taken count would likely result | ||||
4727 | // in infinite recursion. In the later case, the analysis code will | ||||
4728 | // cope with a conservative value, and it will take care to purge | ||||
4729 | // that value once it has finished. | ||||
4730 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); | ||||
4731 | |||||
4732 | // Normally, in the cases we can prove no-overflow via a | ||||
4733 | // backedge guarding condition, we can also compute a backedge | ||||
4734 | // taken count for the loop. The exceptions are assumptions and | ||||
4735 | // guards present in the loop -- SCEV is not great at exploiting | ||||
4736 | // these to compute max backedge taken counts, but can still use | ||||
4737 | // these to prove lack of overflow. Use this fact to avoid | ||||
4738 | // doing extra work that may not pay off. | ||||
4739 | |||||
4740 | if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && | ||||
4741 | AC.assumptions().empty()) | ||||
4742 | return Result; | ||||
4743 | |||||
4744 | // If the backedge is guarded by a comparison with the pre-inc value the | ||||
4745 | // addrec is safe. Also, if the entry is guarded by a comparison with the | ||||
4746 | // start value and the backedge is guarded by a comparison with the post-inc | ||||
4747 | // value, the addrec is safe. | ||||
4748 | if (isKnownPositive(Step)) { | ||||
4749 | const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - | ||||
4750 | getUnsignedRangeMax(Step)); | ||||
4751 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || | ||||
4752 | isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { | ||||
4753 | Result = setFlags(Result, SCEV::FlagNUW); | ||||
4754 | } | ||||
4755 | } | ||||
4756 | |||||
4757 | return Result; | ||||
4758 | } | ||||
4759 | |||||
4760 | namespace { | ||||
4761 | |||||
4762 | /// Represents an abstract binary operation. This may exist as a | ||||
4763 | /// normal instruction or constant expression, or may have been | ||||
4764 | /// derived from an expression tree. | ||||
4765 | struct BinaryOp { | ||||
4766 | unsigned Opcode; | ||||
4767 | Value *LHS; | ||||
4768 | Value *RHS; | ||||
4769 | bool IsNSW = false; | ||||
4770 | bool IsNUW = false; | ||||
4771 | |||||
4772 | /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or | ||||
4773 | /// constant expression. | ||||
4774 | Operator *Op = nullptr; | ||||
4775 | |||||
4776 | explicit BinaryOp(Operator *Op) | ||||
4777 | : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), | ||||
4778 | Op(Op) { | ||||
4779 | if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { | ||||
4780 | IsNSW = OBO->hasNoSignedWrap(); | ||||
4781 | IsNUW = OBO->hasNoUnsignedWrap(); | ||||
4782 | } | ||||
4783 | } | ||||
4784 | |||||
4785 | explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, | ||||
4786 | bool IsNUW = false) | ||||
4787 | : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} | ||||
4788 | }; | ||||
4789 | |||||
4790 | } // end anonymous namespace | ||||
4791 | |||||
4792 | /// Try to map \p V into a BinaryOp, and return \c None on failure. | ||||
4793 | static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { | ||||
4794 | auto *Op = dyn_cast<Operator>(V); | ||||
4795 | if (!Op) | ||||
4796 | return None; | ||||
4797 | |||||
4798 | // Implementation detail: all the cleverness here should happen without | ||||
4799 | // creating new SCEV expressions -- our caller knowns tricks to avoid creating | ||||
4800 | // SCEV expressions when possible, and we should not break that. | ||||
4801 | |||||
4802 | switch (Op->getOpcode()) { | ||||
4803 | case Instruction::Add: | ||||
4804 | case Instruction::Sub: | ||||
4805 | case Instruction::Mul: | ||||
4806 | case Instruction::UDiv: | ||||
4807 | case Instruction::URem: | ||||
4808 | case Instruction::And: | ||||
4809 | case Instruction::Or: | ||||
4810 | case Instruction::AShr: | ||||
4811 | case Instruction::Shl: | ||||
4812 | return BinaryOp(Op); | ||||
4813 | |||||
4814 | case Instruction::Xor: | ||||
4815 | if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) | ||||
4816 | // If the RHS of the xor is a signmask, then this is just an add. | ||||
4817 | // Instcombine turns add of signmask into xor as a strength reduction step. | ||||
4818 | if (RHSC->getValue().isSignMask()) | ||||
4819 | return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); | ||||
4820 | return BinaryOp(Op); | ||||
4821 | |||||
4822 | case Instruction::LShr: | ||||
4823 | // Turn logical shift right of a constant into a unsigned divide. | ||||
4824 | if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { | ||||
4825 | uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); | ||||
4826 | |||||
4827 | // If the shift count is not less than the bitwidth, the result of | ||||
4828 | // the shift is undefined. Don't try to analyze it, because the | ||||
4829 | // resolution chosen here may differ from the resolution chosen in | ||||
4830 | // other parts of the compiler. | ||||
4831 | if (SA->getValue().ult(BitWidth)) { | ||||
4832 | Constant *X = | ||||
4833 | ConstantInt::get(SA->getContext(), | ||||
4834 | APInt::getOneBitSet(BitWidth, SA->getZExtValue())); | ||||
4835 | return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); | ||||
4836 | } | ||||
4837 | } | ||||
4838 | return BinaryOp(Op); | ||||
4839 | |||||
4840 | case Instruction::ExtractValue: { | ||||
4841 | auto *EVI = cast<ExtractValueInst>(Op); | ||||
4842 | if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) | ||||
4843 | break; | ||||
4844 | |||||
4845 | auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); | ||||
4846 | if (!WO) | ||||
4847 | break; | ||||
4848 | |||||
4849 | Instruction::BinaryOps BinOp = WO->getBinaryOp(); | ||||
4850 | bool Signed = WO->isSigned(); | ||||
4851 | // TODO: Should add nuw/nsw flags for mul as well. | ||||
4852 | if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) | ||||
4853 | return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); | ||||
4854 | |||||
4855 | // Now that we know that all uses of the arithmetic-result component of | ||||
4856 | // CI are guarded by the overflow check, we can go ahead and pretend | ||||
4857 | // that the arithmetic is non-overflowing. | ||||
4858 | return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), | ||||
4859 | /* IsNSW = */ Signed, /* IsNUW = */ !Signed); | ||||
4860 | } | ||||
4861 | |||||
4862 | default: | ||||
4863 | break; | ||||
4864 | } | ||||
4865 | |||||
4866 | // Recognise intrinsic loop.decrement.reg, and as this has exactly the same | ||||
4867 | // semantics as a Sub, return a binary sub expression. | ||||
4868 | if (auto *II = dyn_cast<IntrinsicInst>(V)) | ||||
4869 | if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) | ||||
4870 | return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); | ||||
4871 | |||||
4872 | return None; | ||||
4873 | } | ||||
4874 | |||||
4875 | /// Helper function to createAddRecFromPHIWithCasts. We have a phi | ||||
4876 | /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via | ||||
4877 | /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the | ||||
4878 | /// way. This function checks if \p Op, an operand of this SCEVAddExpr, | ||||
4879 | /// follows one of the following patterns: | ||||
4880 | /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) | ||||
4881 | /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) | ||||
4882 | /// If the SCEV expression of \p Op conforms with one of the expected patterns | ||||
4883 | /// we return the type of the truncation operation, and indicate whether the | ||||
4884 | /// truncated type should be treated as signed/unsigned by setting | ||||
4885 | /// \p Signed to true/false, respectively. | ||||
4886 | static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, | ||||
4887 | bool &Signed, ScalarEvolution &SE) { | ||||
4888 | // The case where Op == SymbolicPHI (that is, with no type conversions on | ||||
4889 | // the way) is handled by the regular add recurrence creating logic and | ||||
4890 | // would have already been triggered in createAddRecForPHI. Reaching it here | ||||
4891 | // means that createAddRecFromPHI had failed for this PHI before (e.g., | ||||
4892 | // because one of the other operands of the SCEVAddExpr updating this PHI is | ||||
4893 | // not invariant). | ||||
4894 | // | ||||
4895 | // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in | ||||
4896 | // this case predicates that allow us to prove that Op == SymbolicPHI will | ||||
4897 | // be added. | ||||
4898 | if (Op == SymbolicPHI) | ||||
4899 | return nullptr; | ||||
4900 | |||||
4901 | unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); | ||||
4902 | unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); | ||||
4903 | if (SourceBits != NewBits) | ||||
4904 | return nullptr; | ||||
4905 | |||||
4906 | const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); | ||||
4907 | const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); | ||||
4908 | if (!SExt && !ZExt) | ||||
4909 | return nullptr; | ||||
4910 | const SCEVTruncateExpr *Trunc = | ||||
4911 | SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) | ||||
4912 | : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); | ||||
4913 | if (!Trunc) | ||||
4914 | return nullptr; | ||||
4915 | const SCEV *X = Trunc->getOperand(); | ||||
4916 | if (X != SymbolicPHI) | ||||
4917 | return nullptr; | ||||
4918 | Signed = SExt != nullptr; | ||||
4919 | return Trunc->getType(); | ||||
4920 | } | ||||
4921 | |||||
4922 | static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { | ||||
4923 | if (!PN->getType()->isIntegerTy()) | ||||
4924 | return nullptr; | ||||
4925 | const Loop *L = LI.getLoopFor(PN->getParent()); | ||||
4926 | if (!L || L->getHeader() != PN->getParent()) | ||||
4927 | return nullptr; | ||||
4928 | return L; | ||||
4929 | } | ||||
4930 | |||||
4931 | // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the | ||||
4932 | // computation that updates the phi follows the following pattern: | ||||
4933 | // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum | ||||
4934 | // which correspond to a phi->trunc->sext/zext->add->phi update chain. | ||||
4935 | // If so, try to see if it can be rewritten as an AddRecExpr under some | ||||
4936 | // Predicates. If successful, return them as a pair. Also cache the results | ||||
4937 | // of the analysis. | ||||
4938 | // | ||||
4939 | // Example usage scenario: | ||||
4940 | // Say the Rewriter is called for the following SCEV: | ||||
4941 | // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) | ||||
4942 | // where: | ||||
4943 | // %X = phi i64 (%Start, %BEValue) | ||||
4944 | // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), | ||||
4945 | // and call this function with %SymbolicPHI = %X. | ||||
4946 | // | ||||
4947 | // The analysis will find that the value coming around the backedge has | ||||
4948 | // the following SCEV: | ||||
4949 | // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) | ||||
4950 | // Upon concluding that this matches the desired pattern, the function | ||||
4951 | // will return the pair {NewAddRec, SmallPredsVec} where: | ||||
4952 | // NewAddRec = {%Start,+,%Step} | ||||
4953 | // SmallPredsVec = {P1, P2, P3} as follows: | ||||
4954 | // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> | ||||
4955 | // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) | ||||
4956 | // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) | ||||
4957 | // The returned pair means that SymbolicPHI can be rewritten into NewAddRec | ||||
4958 | // under the predicates {P1,P2,P3}. | ||||
4959 | // This predicated rewrite will be cached in PredicatedSCEVRewrites: | ||||
4960 | // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} | ||||
4961 | // | ||||
4962 | // TODO's: | ||||
4963 | // | ||||
4964 | // 1) Extend the Induction descriptor to also support inductions that involve | ||||
4965 | // casts: When needed (namely, when we are called in the context of the | ||||
4966 | // vectorizer induction analysis), a Set of cast instructions will be | ||||
4967 | // populated by this method, and provided back to isInductionPHI. This is | ||||
4968 | // needed to allow the vectorizer to properly record them to be ignored by | ||||
4969 | // the cost model and to avoid vectorizing them (otherwise these casts, | ||||
4970 | // which are redundant under the runtime overflow checks, will be | ||||
4971 | // vectorized, which can be costly). | ||||
4972 | // | ||||
4973 | // 2) Support additional induction/PHISCEV patterns: We also want to support | ||||
4974 | // inductions where the sext-trunc / zext-trunc operations (partly) occur | ||||
4975 | // after the induction update operation (the induction increment): | ||||
4976 | // | ||||
4977 | // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) | ||||
4978 | // which correspond to a phi->add->trunc->sext/zext->phi update chain. | ||||
4979 | // | ||||
4980 | // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) | ||||
4981 | // which correspond to a phi->trunc->add->sext/zext->phi update chain. | ||||
4982 | // | ||||
4983 | // 3) Outline common code with createAddRecFromPHI to avoid duplication. | ||||
4984 | Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> | ||||
4985 | ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { | ||||
4986 | SmallVector<const SCEVPredicate *, 3> Predicates; | ||||
4987 | |||||
4988 | // *** Part1: Analyze if we have a phi-with-cast pattern for which we can | ||||
4989 | // return an AddRec expression under some predicate. | ||||
4990 | |||||
4991 | auto *PN = cast<PHINode>(SymbolicPHI->getValue()); | ||||
4992 | const Loop *L = isIntegerLoopHeaderPHI(PN, LI); | ||||
4993 | assert(L && "Expecting an integer loop header phi")((void)0); | ||||
4994 | |||||
4995 | // The loop may have multiple entrances or multiple exits; we can analyze | ||||
4996 | // this phi as an addrec if it has a unique entry value and a unique | ||||
4997 | // backedge value. | ||||
4998 | Value *BEValueV = nullptr, *StartValueV = nullptr; | ||||
4999 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | ||||
5000 | Value *V = PN->getIncomingValue(i); | ||||
5001 | if (L->contains(PN->getIncomingBlock(i))) { | ||||
5002 | if (!BEValueV) { | ||||
5003 | BEValueV = V; | ||||
5004 | } else if (BEValueV != V) { | ||||
5005 | BEValueV = nullptr; | ||||
5006 | break; | ||||
5007 | } | ||||
5008 | } else if (!StartValueV) { | ||||
5009 | StartValueV = V; | ||||
5010 | } else if (StartValueV != V) { | ||||
5011 | StartValueV = nullptr; | ||||
5012 | break; | ||||
5013 | } | ||||
5014 | } | ||||
5015 | if (!BEValueV || !StartValueV) | ||||
5016 | return None; | ||||
5017 | |||||
5018 | const SCEV *BEValue = getSCEV(BEValueV); | ||||
5019 | |||||
5020 | // If the value coming around the backedge is an add with the symbolic | ||||
5021 | // value we just inserted, possibly with casts that we can ignore under | ||||
5022 | // an appropriate runtime guard, then we found a simple induction variable! | ||||
5023 | const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); | ||||
5024 | if (!Add) | ||||
5025 | return None; | ||||
5026 | |||||
5027 | // If there is a single occurrence of the symbolic value, possibly | ||||
5028 | // casted, replace it with a recurrence. | ||||
5029 | unsigned FoundIndex = Add->getNumOperands(); | ||||
5030 | Type *TruncTy = nullptr; | ||||
5031 | bool Signed; | ||||
5032 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) | ||||
5033 | if ((TruncTy = | ||||
5034 | isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) | ||||
5035 | if (FoundIndex == e) { | ||||
5036 | FoundIndex = i; | ||||
5037 | break; | ||||
5038 | } | ||||
5039 | |||||
5040 | if (FoundIndex == Add->getNumOperands()) | ||||
5041 | return None; | ||||
5042 | |||||
5043 | // Create an add with everything but the specified operand. | ||||
5044 | SmallVector<const SCEV *, 8> Ops; | ||||
5045 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) | ||||
5046 | if (i != FoundIndex) | ||||
5047 | Ops.push_back(Add->getOperand(i)); | ||||
5048 | const SCEV *Accum = getAddExpr(Ops); | ||||
5049 | |||||
5050 | // The runtime checks will not be valid if the step amount is | ||||
5051 | // varying inside the loop. | ||||
5052 | if (!isLoopInvariant(Accum, L)) | ||||
5053 | return None; | ||||
5054 | |||||
5055 | // *** Part2: Create the predicates | ||||
5056 | |||||
5057 | // Analysis was successful: we have a phi-with-cast pattern for which we | ||||
5058 | // can return an AddRec expression under the following predicates: | ||||
5059 | // | ||||
5060 | // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) | ||||
5061 | // fits within the truncated type (does not overflow) for i = 0 to n-1. | ||||
5062 | // P2: An Equal predicate that guarantees that | ||||
5063 | // Start = (Ext ix (Trunc iy (Start) to ix) to iy) | ||||
5064 | // P3: An Equal predicate that guarantees that | ||||
5065 | // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) | ||||
5066 | // | ||||
5067 | // As we next prove, the above predicates guarantee that: | ||||
5068 | // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) | ||||
5069 | // | ||||
5070 | // | ||||
5071 | // More formally, we want to prove that: | ||||
5072 | // Expr(i+1) = Start + (i+1) * Accum | ||||
5073 | // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum | ||||
5074 | // | ||||
5075 | // Given that: | ||||
5076 | // 1) Expr(0) = Start | ||||
5077 | // 2) Expr(1) = Start + Accum | ||||
5078 | // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 | ||||
5079 | // 3) Induction hypothesis (step i): | ||||
5080 | // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum | ||||
5081 | // | ||||
5082 | // Proof: | ||||
5083 | // Expr(i+1) = | ||||
5084 | // = Start + (i+1)*Accum | ||||
5085 | // = (Start + i*Accum) + Accum | ||||
5086 | // = Expr(i) + Accum | ||||
5087 | // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum | ||||
5088 | // :: from step i | ||||
5089 | // | ||||
5090 | // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum | ||||
5091 | // | ||||
5092 | // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) | ||||
5093 | // + (Ext ix (Trunc iy (Accum) to ix) to iy) | ||||
5094 | // + Accum :: from P3 | ||||
5095 | // | ||||
5096 | // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) | ||||
5097 | // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) | ||||
5098 | // | ||||
5099 | // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum | ||||
5100 | // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum | ||||
5101 | // | ||||
5102 | // By induction, the same applies to all iterations 1<=i<n: | ||||
5103 | // | ||||
5104 | |||||
5105 | // Create a truncated addrec for which we will add a no overflow check (P1). | ||||
5106 | const SCEV *StartVal = getSCEV(StartValueV); | ||||
5107 | const SCEV *PHISCEV = | ||||
5108 | getAddRecExpr(getTruncateExpr(StartVal, TruncTy), | ||||
5109 | getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); | ||||
5110 | |||||
5111 | // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. | ||||
5112 | // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV | ||||
5113 | // will be constant. | ||||
5114 | // | ||||
5115 | // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't | ||||
5116 | // add P1. | ||||
5117 | if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { | ||||
5118 | SCEVWrapPredicate::IncrementWrapFlags AddedFlags = | ||||
5119 | Signed ? SCEVWrapPredicate::IncrementNSSW | ||||
5120 | : SCEVWrapPredicate::IncrementNUSW; | ||||
5121 | const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); | ||||
5122 | Predicates.push_back(AddRecPred); | ||||
5123 | } | ||||
5124 | |||||
5125 | // Create the Equal Predicates P2,P3: | ||||
5126 | |||||
5127 | // It is possible that the predicates P2 and/or P3 are computable at | ||||
5128 | // compile time due to StartVal and/or Accum being constants. | ||||
5129 | // If either one is, then we can check that now and escape if either P2 | ||||
5130 | // or P3 is false. | ||||
5131 | |||||
5132 | // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) | ||||
5133 | // for each of StartVal and Accum | ||||
5134 | auto getExtendedExpr = [&](const SCEV *Expr, | ||||
5135 | bool CreateSignExtend) -> const SCEV * { | ||||
5136 | assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant")((void)0); | ||||
5137 | const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); | ||||
5138 | const SCEV *ExtendedExpr = | ||||
5139 | CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) | ||||
5140 | : getZeroExtendExpr(TruncatedExpr, Expr->getType()); | ||||
5141 | return ExtendedExpr; | ||||
5142 | }; | ||||
5143 | |||||
5144 | // Given: | ||||
5145 | // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy | ||||
5146 | // = getExtendedExpr(Expr) | ||||
5147 | // Determine whether the predicate P: Expr == ExtendedExpr | ||||
5148 | // is known to be false at compile time | ||||
5149 | auto PredIsKnownFalse = [&](const SCEV *Expr, | ||||
5150 | const SCEV *ExtendedExpr) -> bool { | ||||
5151 | return Expr != ExtendedExpr && | ||||
5152 | isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); | ||||
5153 | }; | ||||
5154 | |||||
5155 | const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); | ||||
5156 | if (PredIsKnownFalse(StartVal, StartExtended)) { | ||||
5157 | LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";)do { } while (false); | ||||
5158 | return None; | ||||
5159 | } | ||||
5160 | |||||
5161 | // The Step is always Signed (because the overflow checks are either | ||||
5162 | // NSSW or NUSW) | ||||
5163 | const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); | ||||
5164 | if (PredIsKnownFalse(Accum, AccumExtended)) { | ||||
5165 | LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";)do { } while (false); | ||||
5166 | return None; | ||||
5167 | } | ||||
5168 | |||||
5169 | auto AppendPredicate = [&](const SCEV *Expr, | ||||
5170 | const SCEV *ExtendedExpr) -> void { | ||||
5171 | if (Expr != ExtendedExpr && | ||||
5172 | !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { | ||||
5173 | const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); | ||||
5174 | LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred)do { } while (false); | ||||
5175 | Predicates.push_back(Pred); | ||||
5176 | } | ||||
5177 | }; | ||||
5178 | |||||
5179 | AppendPredicate(StartVal, StartExtended); | ||||
5180 | AppendPredicate(Accum, AccumExtended); | ||||
5181 | |||||
5182 | // *** Part3: Predicates are ready. Now go ahead and create the new addrec in | ||||
5183 | // which the casts had been folded away. The caller can rewrite SymbolicPHI | ||||
5184 | // into NewAR if it will also add the runtime overflow checks specified in | ||||
5185 | // Predicates. | ||||
5186 | auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); | ||||
5187 | |||||
5188 | std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = | ||||
5189 | std::make_pair(NewAR, Predicates); | ||||
5190 | // Remember the result of the analysis for this SCEV at this locayyytion. | ||||
5191 | PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; | ||||
5192 | return PredRewrite; | ||||
5193 | } | ||||
5194 | |||||
5195 | Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> | ||||
5196 | ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { | ||||
5197 | auto *PN = cast<PHINode>(SymbolicPHI->getValue()); | ||||
5198 | const Loop *L = isIntegerLoopHeaderPHI(PN, LI); | ||||
5199 | if (!L) | ||||
5200 | return None; | ||||
5201 | |||||
5202 | // Check to see if we already analyzed this PHI. | ||||
5203 | auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); | ||||
5204 | if (I != PredicatedSCEVRewrites.end()) { | ||||
5205 | std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = | ||||
5206 | I->second; | ||||
5207 | // Analysis was done before and failed to create an AddRec: | ||||
5208 | if (Rewrite.first == SymbolicPHI) | ||||
5209 | return None; | ||||
5210 | // Analysis was done before and succeeded to create an AddRec under | ||||
5211 | // a predicate: | ||||
5212 | assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec")((void)0); | ||||
5213 | assert(!(Rewrite.second).empty() && "Expected to find Predicates")((void)0); | ||||
5214 | return Rewrite; | ||||
5215 | } | ||||
5216 | |||||
5217 | Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> | ||||
5218 | Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); | ||||
5219 | |||||
5220 | // Record in the cache that the analysis failed | ||||
5221 | if (!Rewrite) { | ||||
5222 | SmallVector<const SCEVPredicate *, 3> Predicates; | ||||
5223 | PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; | ||||
5224 | return None; | ||||
5225 | } | ||||
5226 | |||||
5227 | return Rewrite; | ||||
5228 | } | ||||
5229 | |||||
5230 | // FIXME: This utility is currently required because the Rewriter currently | ||||
5231 | // does not rewrite this expression: | ||||
5232 | // {0, +, (sext ix (trunc iy to ix) to iy)} | ||||
5233 | // into {0, +, %step}, | ||||
5234 | // even when the following Equal predicate exists: | ||||
5235 | // "%step == (sext ix (trunc iy to ix) to iy)". | ||||
5236 | bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( | ||||
5237 | const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { | ||||
5238 | if (AR1 == AR2) | ||||
5239 | return true; | ||||
5240 | |||||
5241 | auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { | ||||
5242 | if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && | ||||
5243 | !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) | ||||
5244 | return false; | ||||
5245 | return true; | ||||
5246 | }; | ||||
5247 | |||||
5248 | if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || | ||||
5249 | !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) | ||||
5250 | return false; | ||||
5251 | return true; | ||||
5252 | } | ||||
5253 | |||||
5254 | /// A helper function for createAddRecFromPHI to handle simple cases. | ||||
5255 | /// | ||||
5256 | /// This function tries to find an AddRec expression for the simplest (yet most | ||||
5257 | /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). | ||||
5258 | /// If it fails, createAddRecFromPHI will use a more general, but slow, | ||||
5259 | /// technique for finding the AddRec expression. | ||||
5260 | const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, | ||||
5261 | Value *BEValueV, | ||||
5262 | Value *StartValueV) { | ||||
5263 | const Loop *L = LI.getLoopFor(PN->getParent()); | ||||
5264 | assert(L && L->getHeader() == PN->getParent())((void)0); | ||||
5265 | assert(BEValueV && StartValueV)((void)0); | ||||
5266 | |||||
5267 | auto BO = MatchBinaryOp(BEValueV, DT); | ||||
5268 | if (!BO) | ||||
5269 | return nullptr; | ||||
5270 | |||||
5271 | if (BO->Opcode != Instruction::Add) | ||||
5272 | return nullptr; | ||||
5273 | |||||
5274 | const SCEV *Accum = nullptr; | ||||
5275 | if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) | ||||
5276 | Accum = getSCEV(BO->RHS); | ||||
5277 | else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) | ||||
5278 | Accum = getSCEV(BO->LHS); | ||||
5279 | |||||
5280 | if (!Accum) | ||||
5281 | return nullptr; | ||||
5282 | |||||
5283 | SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; | ||||
5284 | if (BO->IsNUW) | ||||
5285 | Flags = setFlags(Flags, SCEV::FlagNUW); | ||||
5286 | if (BO->IsNSW) | ||||
5287 | Flags = setFlags(Flags, SCEV::FlagNSW); | ||||
5288 | |||||
5289 | const SCEV *StartVal = getSCEV(StartValueV); | ||||
5290 | const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); | ||||
5291 | |||||
5292 | ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; | ||||
5293 | |||||
5294 | // We can add Flags to the post-inc expression only if we | ||||
5295 | // know that it is *undefined behavior* for BEValueV to | ||||
5296 | // overflow. | ||||
5297 | if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) | ||||
5298 | if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) | ||||
5299 | (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); | ||||
5300 | |||||
5301 | return PHISCEV; | ||||
5302 | } | ||||
5303 | |||||
5304 | const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { | ||||
5305 | const Loop *L = LI.getLoopFor(PN->getParent()); | ||||
5306 | if (!L || L->getHeader() != PN->getParent()) | ||||
5307 | return nullptr; | ||||
5308 | |||||
5309 | // The loop may have multiple entrances or multiple exits; we can analyze | ||||
5310 | // this phi as an addrec if it has a unique entry value and a unique | ||||
5311 | // backedge value. | ||||
5312 | Value *BEValueV = nullptr, *StartValueV = nullptr; | ||||
5313 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | ||||
5314 | Value *V = PN->getIncomingValue(i); | ||||
5315 | if (L->contains(PN->getIncomingBlock(i))) { | ||||
5316 | if (!BEValueV) { | ||||
5317 | BEValueV = V; | ||||
5318 | } else if (BEValueV != V) { | ||||
5319 | BEValueV = nullptr; | ||||
5320 | break; | ||||
5321 | } | ||||
5322 | } else if (!StartValueV) { | ||||
5323 | StartValueV = V; | ||||
5324 | } else if (StartValueV != V) { | ||||
5325 | StartValueV = nullptr; | ||||
5326 | break; | ||||
5327 | } | ||||
5328 | } | ||||
5329 | if (!BEValueV || !StartValueV) | ||||
5330 | return nullptr; | ||||
5331 | |||||
5332 | assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&((void)0) | ||||
5333 | "PHI node already processed?")((void)0); | ||||
5334 | |||||
5335 | // First, try to find AddRec expression without creating a fictituos symbolic | ||||
5336 | // value for PN. | ||||
5337 | if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) | ||||
5338 | return S; | ||||
5339 | |||||
5340 | // Handle PHI node value symbolically. | ||||
5341 | const SCEV *SymbolicName = getUnknown(PN); | ||||
5342 | ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); | ||||
5343 | |||||
5344 | // Using this symbolic name for the PHI, analyze the value coming around | ||||
5345 | // the back-edge. | ||||
5346 | const SCEV *BEValue = getSCEV(BEValueV); | ||||
5347 | |||||
5348 | // NOTE: If BEValue is loop invariant, we know that the PHI node just | ||||
5349 | // has a special value for the first iteration of the loop. | ||||
5350 | |||||
5351 | // If the value coming around the backedge is an add with the symbolic | ||||
5352 | // value we just inserted, then we found a simple induction variable! | ||||
5353 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { | ||||
5354 | // If there is a single occurrence of the symbolic value, replace it | ||||
5355 | // with a recurrence. | ||||
5356 | unsigned FoundIndex = Add->getNumOperands(); | ||||
5357 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) | ||||
5358 | if (Add->getOperand(i) == SymbolicName) | ||||
5359 | if (FoundIndex == e) { | ||||
5360 | FoundIndex = i; | ||||
5361 | break; | ||||
5362 | } | ||||
5363 | |||||
5364 | if (FoundIndex != Add->getNumOperands()) { | ||||
5365 | // Create an add with everything but the specified operand. | ||||
5366 | SmallVector<const SCEV *, 8> Ops; | ||||
5367 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) | ||||
5368 | if (i != FoundIndex) | ||||
5369 | Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), | ||||
5370 | L, *this)); | ||||
5371 | const SCEV *Accum = getAddExpr(Ops); | ||||
5372 | |||||
5373 | // This is not a valid addrec if the step amount is varying each | ||||
5374 | // loop iteration, but is not itself an addrec in this loop. | ||||
5375 | if (isLoopInvariant(Accum, L) || | ||||
5376 | (isa<SCEVAddRecExpr>(Accum) && | ||||
5377 | cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { | ||||
5378 | SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; | ||||
5379 | |||||
5380 | if (auto BO = MatchBinaryOp(BEValueV, DT)) { | ||||
5381 | if (BO->Opcode == Instruction::Add && BO->LHS == PN) { | ||||
5382 | if (BO->IsNUW) | ||||
5383 | Flags = setFlags(Flags, SCEV::FlagNUW); | ||||
5384 | if (BO->IsNSW) | ||||
5385 | Flags = setFlags(Flags, SCEV::FlagNSW); | ||||
5386 | } | ||||
5387 | } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { | ||||
5388 | // If the increment is an inbounds GEP, then we know the address | ||||
5389 | // space cannot be wrapped around. We cannot make any guarantee | ||||
5390 | // about signed or unsigned overflow because pointers are | ||||
5391 | // unsigned but we may have a negative index from the base | ||||
5392 | // pointer. We can guarantee that no unsigned wrap occurs if the | ||||
5393 | // indices form a positive value. | ||||
5394 | if (GEP->isInBounds() && GEP->getOperand(0) == PN) { | ||||
5395 | Flags = setFlags(Flags, SCEV::FlagNW); | ||||
5396 | |||||
5397 | const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); | ||||
5398 | if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) | ||||
5399 | Flags = setFlags(Flags, SCEV::FlagNUW); | ||||
5400 | } | ||||
5401 | |||||
5402 | // We cannot transfer nuw and nsw flags from subtraction | ||||
5403 | // operations -- sub nuw X, Y is not the same as add nuw X, -Y | ||||
5404 | // for instance. | ||||
5405 | } | ||||
5406 | |||||
5407 | const SCEV *StartVal = getSCEV(StartValueV); | ||||
5408 | const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); | ||||
5409 | |||||
5410 | // Okay, for the entire analysis of this edge we assumed the PHI | ||||
5411 | // to be symbolic. We now need to go back and purge all of the | ||||
5412 | // entries for the scalars that use the symbolic expression. | ||||
5413 | forgetSymbolicName(PN, SymbolicName); | ||||
5414 | ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; | ||||
5415 | |||||
5416 | // We can add Flags to the post-inc expression only if we | ||||
5417 | // know that it is *undefined behavior* for BEValueV to | ||||
5418 | // overflow. | ||||
5419 | if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) | ||||
5420 | if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) | ||||
5421 | (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); | ||||
5422 | |||||
5423 | return PHISCEV; | ||||
5424 | } | ||||
5425 | } | ||||
5426 | } else { | ||||
5427 | // Otherwise, this could be a loop like this: | ||||
5428 | // i = 0; for (j = 1; ..; ++j) { .... i = j; } | ||||
5429 | // In this case, j = {1,+,1} and BEValue is j. | ||||
5430 | // Because the other in-value of i (0) fits the evolution of BEValue | ||||
5431 | // i really is an addrec evolution. | ||||
5432 | // | ||||
5433 | // We can generalize this saying that i is the shifted value of BEValue | ||||
5434 | // by one iteration: | ||||
5435 | // PHI(f(0), f({1,+,1})) --> f({0,+,1}) | ||||
5436 | const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); | ||||
5437 | const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); | ||||
5438 | if (Shifted != getCouldNotCompute() && | ||||
5439 | Start != getCouldNotCompute()) { | ||||
5440 | const SCEV *StartVal = getSCEV(StartValueV); | ||||
5441 | if (Start == StartVal) { | ||||
5442 | // Okay, for the entire analysis of this edge we assumed the PHI | ||||
5443 | // to be symbolic. We now need to go back and purge all of the | ||||
5444 | // entries for the scalars that use the symbolic expression. | ||||
5445 | forgetSymbolicName(PN, SymbolicName); | ||||
5446 | ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; | ||||
5447 | return Shifted; | ||||
5448 | } | ||||
5449 | } | ||||
5450 | } | ||||
5451 | |||||
5452 | // Remove the temporary PHI node SCEV that has been inserted while intending | ||||
5453 | // to create an AddRecExpr for this PHI node. We can not keep this temporary | ||||
5454 | // as it will prevent later (possibly simpler) SCEV expressions to be added | ||||
5455 | // to the ValueExprMap. | ||||
5456 | eraseValueFromMap(PN); | ||||
5457 | |||||
5458 | return nullptr; | ||||
5459 | } | ||||
5460 | |||||
5461 | // Checks if the SCEV S is available at BB. S is considered available at BB | ||||
5462 | // if S can be materialized at BB without introducing a fault. | ||||
5463 | static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, | ||||
5464 | BasicBlock *BB) { | ||||
5465 | struct CheckAvailable { | ||||
5466 | bool TraversalDone = false; | ||||
5467 | bool Available = true; | ||||
5468 | |||||
5469 | const Loop *L = nullptr; // The loop BB is in (can be nullptr) | ||||
5470 | BasicBlock *BB = nullptr; | ||||
5471 | DominatorTree &DT; | ||||
5472 | |||||
5473 | CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) | ||||
5474 | : L(L), BB(BB), DT(DT) {} | ||||
5475 | |||||
5476 | bool setUnavailable() { | ||||
5477 | TraversalDone = true; | ||||
5478 | Available = false; | ||||
5479 | return false; | ||||
5480 | } | ||||
5481 | |||||
5482 | bool follow(const SCEV *S) { | ||||
5483 | switch (S->getSCEVType()) { | ||||
5484 | case scConstant: | ||||
5485 | case scPtrToInt: | ||||
5486 | case scTruncate: | ||||
5487 | case scZeroExtend: | ||||
5488 | case scSignExtend: | ||||
5489 | case scAddExpr: | ||||
5490 | case scMulExpr: | ||||
5491 | case scUMaxExpr: | ||||
5492 | case scSMaxExpr: | ||||
5493 | case scUMinExpr: | ||||
5494 | case scSMinExpr: | ||||
5495 | // These expressions are available if their operand(s) is/are. | ||||
5496 | return true; | ||||
5497 | |||||
5498 | case scAddRecExpr: { | ||||
5499 | // We allow add recurrences that are on the loop BB is in, or some | ||||
5500 | // outer loop. This guarantees availability because the value of the | ||||
5501 | // add recurrence at BB is simply the "current" value of the induction | ||||
5502 | // variable. We can relax this in the future; for instance an add | ||||
5503 | // recurrence on a sibling dominating loop is also available at BB. | ||||
5504 | const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); | ||||
5505 | if (L && (ARLoop == L || ARLoop->contains(L))) | ||||
5506 | return true; | ||||
5507 | |||||
5508 | return setUnavailable(); | ||||
5509 | } | ||||
5510 | |||||
5511 | case scUnknown: { | ||||
5512 | // For SCEVUnknown, we check for simple dominance. | ||||
5513 | const auto *SU = cast<SCEVUnknown>(S); | ||||
5514 | Value *V = SU->getValue(); | ||||
5515 | |||||
5516 | if (isa<Argument>(V)) | ||||
5517 | return false; | ||||
5518 | |||||
5519 | if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) | ||||
5520 | return false; | ||||
5521 | |||||
5522 | return setUnavailable(); | ||||
5523 | } | ||||
5524 | |||||
5525 | case scUDivExpr: | ||||
5526 | case scCouldNotCompute: | ||||
5527 | // We do not try to smart about these at all. | ||||
5528 | return setUnavailable(); | ||||
5529 | } | ||||
5530 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | ||||
5531 | } | ||||
5532 | |||||
5533 | bool isDone() { return TraversalDone; } | ||||
5534 | }; | ||||
5535 | |||||
5536 | CheckAvailable CA(L, BB, DT); | ||||
5537 | SCEVTraversal<CheckAvailable> ST(CA); | ||||
5538 | |||||
5539 | ST.visitAll(S); | ||||
5540 | return CA.Available; | ||||
5541 | } | ||||
5542 | |||||
5543 | // Try to match a control flow sequence that branches out at BI and merges back | ||||
5544 | // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful | ||||
5545 | // match. | ||||
5546 | static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, | ||||
5547 | Value *&C, Value *&LHS, Value *&RHS) { | ||||
5548 | C = BI->getCondition(); | ||||
5549 | |||||
5550 | BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); | ||||
5551 | BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); | ||||
5552 | |||||
5553 | if (!LeftEdge.isSingleEdge()) | ||||
5554 | return false; | ||||
5555 | |||||
5556 | assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()")((void)0); | ||||
5557 | |||||
5558 | Use &LeftUse = Merge->getOperandUse(0); | ||||
5559 | Use &RightUse = Merge->getOperandUse(1); | ||||
5560 | |||||
5561 | if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { | ||||
5562 | LHS = LeftUse; | ||||
5563 | RHS = RightUse; | ||||
5564 | return true; | ||||
5565 | } | ||||
5566 | |||||
5567 | if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { | ||||
5568 | LHS = RightUse; | ||||
5569 | RHS = LeftUse; | ||||
5570 | return true; | ||||
5571 | } | ||||
5572 | |||||
5573 | return false; | ||||
5574 | } | ||||
5575 | |||||
5576 | const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { | ||||
5577 | auto IsReachable = | ||||
5578 | [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; | ||||
5579 | if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { | ||||
5580 | const Loop *L = LI.getLoopFor(PN->getParent()); | ||||
5581 | |||||
5582 | // We don't want to break LCSSA, even in a SCEV expression tree. | ||||
5583 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) | ||||
5584 | if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) | ||||
5585 | return nullptr; | ||||
5586 | |||||
5587 | // Try to match | ||||
5588 | // | ||||
5589 | // br %cond, label %left, label %right | ||||
5590 | // left: | ||||
5591 | // br label %merge | ||||
5592 | // right: | ||||
5593 | // br label %merge | ||||
5594 | // merge: | ||||
5595 | // V = phi [ %x, %left ], [ %y, %right ] | ||||
5596 | // | ||||
5597 | // as "select %cond, %x, %y" | ||||
5598 | |||||
5599 | BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); | ||||
5600 | assert(IDom && "At least the entry block should dominate PN")((void)0); | ||||
5601 | |||||
5602 | auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); | ||||
5603 | Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; | ||||
5604 | |||||
5605 | if (BI && BI->isConditional() && | ||||
5606 | BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && | ||||
5607 | IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && | ||||
5608 | IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) | ||||
5609 | return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); | ||||
5610 | } | ||||
5611 | |||||
5612 | return nullptr; | ||||
5613 | } | ||||
5614 | |||||
5615 | const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { | ||||
5616 | if (const SCEV *S = createAddRecFromPHI(PN)) | ||||
5617 | return S; | ||||
5618 | |||||
5619 | if (const SCEV *S = createNodeFromSelectLikePHI(PN)) | ||||
5620 | return S; | ||||
5621 | |||||
5622 | // If the PHI has a single incoming value, follow that value, unless the | ||||
5623 | // PHI's incoming blocks are in a different loop, in which case doing so | ||||
5624 | // risks breaking LCSSA form. Instcombine would normally zap these, but | ||||
5625 | // it doesn't have DominatorTree information, so it may miss cases. | ||||
5626 | if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) | ||||
5627 | if (LI.replacementPreservesLCSSAForm(PN, V)) | ||||
5628 | return getSCEV(V); | ||||
5629 | |||||
5630 | // If it's not a loop phi, we can't handle it yet. | ||||
5631 | return getUnknown(PN); | ||||
5632 | } | ||||
5633 | |||||
5634 | const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, | ||||
5635 | Value *Cond, | ||||
5636 | Value *TrueVal, | ||||
5637 | Value *FalseVal) { | ||||
5638 | // Handle "constant" branch or select. This can occur for instance when a | ||||
5639 | // loop pass transforms an inner loop and moves on to process the outer loop. | ||||
5640 | if (auto *CI = dyn_cast<ConstantInt>(Cond)) | ||||
5641 | return getSCEV(CI->isOne() ? TrueVal : FalseVal); | ||||
5642 | |||||
5643 | // Try to match some simple smax or umax patterns. | ||||
5644 | auto *ICI = dyn_cast<ICmpInst>(Cond); | ||||
5645 | if (!ICI) | ||||
5646 | return getUnknown(I); | ||||
5647 | |||||
5648 | Value *LHS = ICI->getOperand(0); | ||||
5649 | Value *RHS = ICI->getOperand(1); | ||||
5650 | |||||
5651 | switch (ICI->getPredicate()) { | ||||
5652 | case ICmpInst::ICMP_SLT: | ||||
5653 | case ICmpInst::ICMP_SLE: | ||||
5654 | case ICmpInst::ICMP_ULT: | ||||
5655 | case ICmpInst::ICMP_ULE: | ||||
5656 | std::swap(LHS, RHS); | ||||
5657 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
5658 | case ICmpInst::ICMP_SGT: | ||||
5659 | case ICmpInst::ICMP_SGE: | ||||
5660 | case ICmpInst::ICMP_UGT: | ||||
5661 | case ICmpInst::ICMP_UGE: | ||||
5662 | // a > b ? a+x : b+x -> max(a, b)+x | ||||
5663 | // a > b ? b+x : a+x -> min(a, b)+x | ||||
5664 | if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { | ||||
5665 | bool Signed = ICI->isSigned(); | ||||
5666 | const SCEV *LA = getSCEV(TrueVal); | ||||
5667 | const SCEV *RA = getSCEV(FalseVal); | ||||
5668 | const SCEV *LS = getSCEV(LHS); | ||||
5669 | const SCEV *RS = getSCEV(RHS); | ||||
5670 | if (LA->getType()->isPointerTy()) { | ||||
5671 | // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA. | ||||
5672 | // Need to make sure we can't produce weird expressions involving | ||||
5673 | // negated pointers. | ||||
5674 | if (LA == LS && RA == RS) | ||||
5675 | return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS); | ||||
5676 | if (LA == RS && RA == LS) | ||||
5677 | return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS); | ||||
5678 | } | ||||
5679 | auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * { | ||||
5680 | if (Op->getType()->isPointerTy()) { | ||||
5681 | Op = getLosslessPtrToIntExpr(Op); | ||||
5682 | if (isa<SCEVCouldNotCompute>(Op)) | ||||
5683 | return Op; | ||||
5684 | } | ||||
5685 | if (Signed) | ||||
5686 | Op = getNoopOrSignExtend(Op, I->getType()); | ||||
5687 | else | ||||
5688 | Op = getNoopOrZeroExtend(Op, I->getType()); | ||||
5689 | return Op; | ||||
5690 | }; | ||||
5691 | LS = CoerceOperand(LS); | ||||
5692 | RS = CoerceOperand(RS); | ||||
5693 | if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS)) | ||||
5694 | break; | ||||
5695 | const SCEV *LDiff = getMinusSCEV(LA, LS); | ||||
5696 | const SCEV *RDiff = getMinusSCEV(RA, RS); | ||||
5697 | if (LDiff == RDiff) | ||||
5698 | return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), | ||||
5699 | LDiff); | ||||
5700 | LDiff = getMinusSCEV(LA, RS); | ||||
5701 | RDiff = getMinusSCEV(RA, LS); | ||||
5702 | if (LDiff == RDiff) | ||||
5703 | return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), | ||||
5704 | LDiff); | ||||
5705 | } | ||||
5706 | break; | ||||
5707 | case ICmpInst::ICMP_NE: | ||||
5708 | // n != 0 ? n+x : 1+x -> umax(n, 1)+x | ||||
5709 | if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && | ||||
5710 | isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { | ||||
5711 | const SCEV *One = getOne(I->getType()); | ||||
5712 | const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); | ||||
5713 | const SCEV *LA = getSCEV(TrueVal); | ||||
5714 | const SCEV *RA = getSCEV(FalseVal); | ||||
5715 | const SCEV *LDiff = getMinusSCEV(LA, LS); | ||||
5716 | const SCEV *RDiff = getMinusSCEV(RA, One); | ||||
5717 | if (LDiff == RDiff) | ||||
5718 | return getAddExpr(getUMaxExpr(One, LS), LDiff); | ||||
5719 | } | ||||
5720 | break; | ||||
5721 | case ICmpInst::ICMP_EQ: | ||||
5722 | // n == 0 ? 1+x : n+x -> umax(n, 1)+x | ||||
5723 | if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && | ||||
5724 | isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { | ||||
5725 | const SCEV *One = getOne(I->getType()); | ||||
5726 | const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); | ||||
5727 | const SCEV *LA = getSCEV(TrueVal); | ||||
5728 | const SCEV *RA = getSCEV(FalseVal); | ||||
5729 | const SCEV *LDiff = getMinusSCEV(LA, One); | ||||
5730 | const SCEV *RDiff = getMinusSCEV(RA, LS); | ||||
5731 | if (LDiff == RDiff) | ||||
5732 | return getAddExpr(getUMaxExpr(One, LS), LDiff); | ||||
5733 | } | ||||
5734 | break; | ||||
5735 | default: | ||||
5736 | break; | ||||
5737 | } | ||||
5738 | |||||
5739 | return getUnknown(I); | ||||
5740 | } | ||||
5741 | |||||
5742 | /// Expand GEP instructions into add and multiply operations. This allows them | ||||
5743 | /// to be analyzed by regular SCEV code. | ||||
5744 | const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { | ||||
5745 | // Don't attempt to analyze GEPs over unsized objects. | ||||
5746 | if (!GEP->getSourceElementType()->isSized()) | ||||
5747 | return getUnknown(GEP); | ||||
5748 | |||||
5749 | SmallVector<const SCEV *, 4> IndexExprs; | ||||
5750 | for (Value *Index : GEP->indices()) | ||||
5751 | IndexExprs.push_back(getSCEV(Index)); | ||||
5752 | return getGEPExpr(GEP, IndexExprs); | ||||
5753 | } | ||||
5754 | |||||
5755 | uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { | ||||
5756 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) | ||||
5757 | return C->getAPInt().countTrailingZeros(); | ||||
5758 | |||||
5759 | if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) | ||||
5760 | return GetMinTrailingZeros(I->getOperand()); | ||||
5761 | |||||
5762 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) | ||||
5763 | return std::min(GetMinTrailingZeros(T->getOperand()), | ||||
5764 | (uint32_t)getTypeSizeInBits(T->getType())); | ||||
5765 | |||||
5766 | if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { | ||||
5767 | uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); | ||||
5768 | return OpRes == getTypeSizeInBits(E->getOperand()->getType()) | ||||
5769 | ? getTypeSizeInBits(E->getType()) | ||||
5770 | : OpRes; | ||||
5771 | } | ||||
5772 | |||||
5773 | if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { | ||||
5774 | uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); | ||||
5775 | return OpRes == getTypeSizeInBits(E->getOperand()->getType()) | ||||
5776 | ? getTypeSizeInBits(E->getType()) | ||||
5777 | : OpRes; | ||||
5778 | } | ||||
5779 | |||||
5780 | if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { | ||||
5781 | // The result is the min of all operands results. | ||||
5782 | uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); | ||||
5783 | for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) | ||||
5784 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); | ||||
5785 | return MinOpRes; | ||||
5786 | } | ||||
5787 | |||||
5788 | if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { | ||||
5789 | // The result is the sum of all operands results. | ||||
5790 | uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); | ||||
5791 | uint32_t BitWidth = getTypeSizeInBits(M->getType()); | ||||
5792 | for (unsigned i = 1, e = M->getNumOperands(); | ||||
5793 | SumOpRes != BitWidth && i != e; ++i) | ||||
5794 | SumOpRes = | ||||
5795 | std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); | ||||
5796 | return SumOpRes; | ||||
5797 | } | ||||
5798 | |||||
5799 | if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { | ||||
5800 | // The result is the min of all operands results. | ||||
5801 | uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); | ||||
5802 | for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) | ||||
5803 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); | ||||
5804 | return MinOpRes; | ||||
5805 | } | ||||
5806 | |||||
5807 | if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { | ||||
5808 | // The result is the min of all operands results. | ||||
5809 | uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); | ||||
5810 | for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) | ||||
5811 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); | ||||
5812 | return MinOpRes; | ||||
5813 | } | ||||
5814 | |||||
5815 | if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { | ||||
5816 | // The result is the min of all operands results. | ||||
5817 | uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); | ||||
5818 | for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) | ||||
5819 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); | ||||
5820 | return MinOpRes; | ||||
5821 | } | ||||
5822 | |||||
5823 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { | ||||
5824 | // For a SCEVUnknown, ask ValueTracking. | ||||
5825 | KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); | ||||
5826 | return Known.countMinTrailingZeros(); | ||||
5827 | } | ||||
5828 | |||||
5829 | // SCEVUDivExpr | ||||
5830 | return 0; | ||||
5831 | } | ||||
5832 | |||||
5833 | uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { | ||||
5834 | auto I = MinTrailingZerosCache.find(S); | ||||
5835 | if (I != MinTrailingZerosCache.end()) | ||||
5836 | return I->second; | ||||
5837 | |||||
5838 | uint32_t Result = GetMinTrailingZerosImpl(S); | ||||
5839 | auto InsertPair = MinTrailingZerosCache.insert({S, Result}); | ||||
5840 | assert(InsertPair.second && "Should insert a new key")((void)0); | ||||
5841 | return InsertPair.first->second; | ||||
5842 | } | ||||
5843 | |||||
5844 | /// Helper method to assign a range to V from metadata present in the IR. | ||||
5845 | static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { | ||||
5846 | if (Instruction *I = dyn_cast<Instruction>(V)) | ||||
5847 | if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) | ||||
5848 | return getConstantRangeFromMetadata(*MD); | ||||
5849 | |||||
5850 | return None; | ||||
5851 | } | ||||
5852 | |||||
5853 | void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, | ||||
5854 | SCEV::NoWrapFlags Flags) { | ||||
5855 | if (AddRec->getNoWrapFlags(Flags) != Flags) { | ||||
5856 | AddRec->setNoWrapFlags(Flags); | ||||
5857 | UnsignedRanges.erase(AddRec); | ||||
5858 | SignedRanges.erase(AddRec); | ||||
5859 | } | ||||
5860 | } | ||||
5861 | |||||
5862 | ConstantRange ScalarEvolution:: | ||||
5863 | getRangeForUnknownRecurrence(const SCEVUnknown *U) { | ||||
5864 | const DataLayout &DL = getDataLayout(); | ||||
5865 | |||||
5866 | unsigned BitWidth = getTypeSizeInBits(U->getType()); | ||||
5867 | const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); | ||||
5868 | |||||
5869 | // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then | ||||
5870 | // use information about the trip count to improve our available range. Note | ||||
5871 | // that the trip count independent cases are already handled by known bits. | ||||
5872 | // WARNING: The definition of recurrence used here is subtly different than | ||||
5873 | // the one used by AddRec (and thus most of this file). Step is allowed to | ||||
5874 | // be arbitrarily loop varying here, where AddRec allows only loop invariant | ||||
5875 | // and other addrecs in the same loop (for non-affine addrecs). The code | ||||
5876 | // below intentionally handles the case where step is not loop invariant. | ||||
5877 | auto *P = dyn_cast<PHINode>(U->getValue()); | ||||
5878 | if (!P) | ||||
5879 | return FullSet; | ||||
5880 | |||||
5881 | // Make sure that no Phi input comes from an unreachable block. Otherwise, | ||||
5882 | // even the values that are not available in these blocks may come from them, | ||||
5883 | // and this leads to false-positive recurrence test. | ||||
5884 | for (auto *Pred : predecessors(P->getParent())) | ||||
5885 | if (!DT.isReachableFromEntry(Pred)) | ||||
5886 | return FullSet; | ||||
5887 | |||||
5888 | BinaryOperator *BO; | ||||
5889 | Value *Start, *Step; | ||||
5890 | if (!matchSimpleRecurrence(P, BO, Start, Step)) | ||||
5891 | return FullSet; | ||||
5892 | |||||
5893 | // If we found a recurrence in reachable code, we must be in a loop. Note | ||||
5894 | // that BO might be in some subloop of L, and that's completely okay. | ||||
5895 | auto *L = LI.getLoopFor(P->getParent()); | ||||
5896 | assert(L && L->getHeader() == P->getParent())((void)0); | ||||
5897 | if (!L->contains(BO->getParent())) | ||||
5898 | // NOTE: This bailout should be an assert instead. However, asserting | ||||
5899 | // the condition here exposes a case where LoopFusion is querying SCEV | ||||
5900 | // with malformed loop information during the midst of the transform. | ||||
5901 | // There doesn't appear to be an obvious fix, so for the moment bailout | ||||
5902 | // until the caller issue can be fixed. PR49566 tracks the bug. | ||||
5903 | return FullSet; | ||||
5904 | |||||
5905 | // TODO: Extend to other opcodes such as mul, and div | ||||
5906 | switch (BO->getOpcode()) { | ||||
5907 | default: | ||||
5908 | return FullSet; | ||||
5909 | case Instruction::AShr: | ||||
5910 | case Instruction::LShr: | ||||
5911 | case Instruction::Shl: | ||||
5912 | break; | ||||
5913 | }; | ||||
5914 | |||||
5915 | if (BO->getOperand(0) != P) | ||||
5916 | // TODO: Handle the power function forms some day. | ||||
5917 | return FullSet; | ||||
5918 | |||||
5919 | unsigned TC = getSmallConstantMaxTripCount(L); | ||||
5920 | if (!TC || TC >= BitWidth) | ||||
5921 | return FullSet; | ||||
5922 | |||||
5923 | auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); | ||||
5924 | auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); | ||||
5925 | assert(KnownStart.getBitWidth() == BitWidth &&((void)0) | ||||
5926 | KnownStep.getBitWidth() == BitWidth)((void)0); | ||||
5927 | |||||
5928 | // Compute total shift amount, being careful of overflow and bitwidths. | ||||
5929 | auto MaxShiftAmt = KnownStep.getMaxValue(); | ||||
5930 | APInt TCAP(BitWidth, TC-1); | ||||
5931 | bool Overflow = false; | ||||
5932 | auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); | ||||
5933 | if (Overflow) | ||||
5934 | return FullSet; | ||||
5935 | |||||
5936 | switch (BO->getOpcode()) { | ||||
5937 | default: | ||||
5938 | llvm_unreachable("filtered out above")__builtin_unreachable(); | ||||
5939 | case Instruction::AShr: { | ||||
5940 | // For each ashr, three cases: | ||||
5941 | // shift = 0 => unchanged value | ||||
5942 | // saturation => 0 or -1 | ||||
5943 | // other => a value closer to zero (of the same sign) | ||||
5944 | // Thus, the end value is closer to zero than the start. | ||||
5945 | auto KnownEnd = KnownBits::ashr(KnownStart, | ||||
5946 | KnownBits::makeConstant(TotalShift)); | ||||
5947 | if (KnownStart.isNonNegative()) | ||||
5948 | // Analogous to lshr (simply not yet canonicalized) | ||||
5949 | return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), | ||||
5950 | KnownStart.getMaxValue() + 1); | ||||
5951 | if (KnownStart.isNegative()) | ||||
5952 | // End >=u Start && End <=s Start | ||||
5953 | return ConstantRange::getNonEmpty(KnownStart.getMinValue(), | ||||
5954 | KnownEnd.getMaxValue() + 1); | ||||
5955 | break; | ||||
5956 | } | ||||
5957 | case Instruction::LShr: { | ||||
5958 | // For each lshr, three cases: | ||||
5959 | // shift = 0 => unchanged value | ||||
5960 | // saturation => 0 | ||||
5961 | // other => a smaller positive number | ||||
5962 | // Thus, the low end of the unsigned range is the last value produced. | ||||
5963 | auto KnownEnd = KnownBits::lshr(KnownStart, | ||||
5964 | KnownBits::makeConstant(TotalShift)); | ||||
5965 | return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), | ||||
5966 | KnownStart.getMaxValue() + 1); | ||||
5967 | } | ||||
5968 | case Instruction::Shl: { | ||||
5969 | // Iff no bits are shifted out, value increases on every shift. | ||||
5970 | auto KnownEnd = KnownBits::shl(KnownStart, | ||||
5971 | KnownBits::makeConstant(TotalShift)); | ||||
5972 | if (TotalShift.ult(KnownStart.countMinLeadingZeros())) | ||||
5973 | return ConstantRange(KnownStart.getMinValue(), | ||||
5974 | KnownEnd.getMaxValue() + 1); | ||||
5975 | break; | ||||
5976 | } | ||||
5977 | }; | ||||
5978 | return FullSet; | ||||
5979 | } | ||||
5980 | |||||
5981 | /// Determine the range for a particular SCEV. If SignHint is | ||||
5982 | /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges | ||||
5983 | /// with a "cleaner" unsigned (resp. signed) representation. | ||||
5984 | const ConstantRange & | ||||
5985 | ScalarEvolution::getRangeRef(const SCEV *S, | ||||
5986 | ScalarEvolution::RangeSignHint SignHint) { | ||||
5987 | DenseMap<const SCEV *, ConstantRange> &Cache = | ||||
5988 | SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges | ||||
5989 | : SignedRanges; | ||||
5990 | ConstantRange::PreferredRangeType RangeType = | ||||
5991 | SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED | ||||
5992 | ? ConstantRange::Unsigned : ConstantRange::Signed; | ||||
5993 | |||||
5994 | // See if we've computed this range already. | ||||
5995 | DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); | ||||
5996 | if (I != Cache.end()) | ||||
5997 | return I->second; | ||||
5998 | |||||
5999 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) | ||||
6000 | return setRange(C, SignHint, ConstantRange(C->getAPInt())); | ||||
6001 | |||||
6002 | unsigned BitWidth = getTypeSizeInBits(S->getType()); | ||||
6003 | ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); | ||||
6004 | using OBO = OverflowingBinaryOperator; | ||||
6005 | |||||
6006 | // If the value has known zeros, the maximum value will have those known zeros | ||||
6007 | // as well. | ||||
6008 | uint32_t TZ = GetMinTrailingZeros(S); | ||||
6009 | if (TZ != 0) { | ||||
6010 | if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) | ||||
6011 | ConservativeResult = | ||||
6012 | ConstantRange(APInt::getMinValue(BitWidth), | ||||
6013 | APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); | ||||
6014 | else | ||||
6015 | ConservativeResult = ConstantRange( | ||||
6016 | APInt::getSignedMinValue(BitWidth), | ||||
6017 | APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); | ||||
6018 | } | ||||
6019 | |||||
6020 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { | ||||
6021 | ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); | ||||
6022 | unsigned WrapType = OBO::AnyWrap; | ||||
6023 | if (Add->hasNoSignedWrap()) | ||||
6024 | WrapType |= OBO::NoSignedWrap; | ||||
6025 | if (Add->hasNoUnsignedWrap()) | ||||
6026 | WrapType |= OBO::NoUnsignedWrap; | ||||
6027 | for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) | ||||
6028 | X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), | ||||
6029 | WrapType, RangeType); | ||||
6030 | return setRange(Add, SignHint, | ||||
6031 | ConservativeResult.intersectWith(X, RangeType)); | ||||
6032 | } | ||||
6033 | |||||
6034 | if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { | ||||
6035 | ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); | ||||
6036 | for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) | ||||
6037 | X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); | ||||
6038 | return setRange(Mul, SignHint, | ||||
6039 | ConservativeResult.intersectWith(X, RangeType)); | ||||
6040 | } | ||||
6041 | |||||
6042 | if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { | ||||
6043 | ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); | ||||
6044 | for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) | ||||
6045 | X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); | ||||
6046 | return setRange(SMax, SignHint, | ||||
6047 | ConservativeResult.intersectWith(X, RangeType)); | ||||
6048 | } | ||||
6049 | |||||
6050 | if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { | ||||
6051 | ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); | ||||
6052 | for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) | ||||
6053 | X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); | ||||
6054 | return setRange(UMax, SignHint, | ||||
6055 | ConservativeResult.intersectWith(X, RangeType)); | ||||
6056 | } | ||||
6057 | |||||
6058 | if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { | ||||
6059 | ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); | ||||
6060 | for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) | ||||
6061 | X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); | ||||
6062 | return setRange(SMin, SignHint, | ||||
6063 | ConservativeResult.intersectWith(X, RangeType)); | ||||
6064 | } | ||||
6065 | |||||
6066 | if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { | ||||
6067 | ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); | ||||
6068 | for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) | ||||
6069 | X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); | ||||
6070 | return setRange(UMin, SignHint, | ||||
6071 | ConservativeResult.intersectWith(X, RangeType)); | ||||
6072 | } | ||||
6073 | |||||
6074 | if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { | ||||
6075 | ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); | ||||
6076 | ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); | ||||
6077 | return setRange(UDiv, SignHint, | ||||
6078 | ConservativeResult.intersectWith(X.udiv(Y), RangeType)); | ||||
6079 | } | ||||
6080 | |||||
6081 | if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { | ||||
6082 | ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); | ||||
6083 | return setRange(ZExt, SignHint, | ||||
6084 | ConservativeResult.intersectWith(X.zeroExtend(BitWidth), | ||||
6085 | RangeType)); | ||||
6086 | } | ||||
6087 | |||||
6088 | if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { | ||||
6089 | ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); | ||||
6090 | return setRange(SExt, SignHint, | ||||
6091 | ConservativeResult.intersectWith(X.signExtend(BitWidth), | ||||
6092 | RangeType)); | ||||
6093 | } | ||||
6094 | |||||
6095 | if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { | ||||
6096 | ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); | ||||
6097 | return setRange(PtrToInt, SignHint, X); | ||||
6098 | } | ||||
6099 | |||||
6100 | if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { | ||||
6101 | ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); | ||||
6102 | return setRange(Trunc, SignHint, | ||||
6103 | ConservativeResult.intersectWith(X.truncate(BitWidth), | ||||
6104 | RangeType)); | ||||
6105 | } | ||||
6106 | |||||
6107 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { | ||||
6108 | // If there's no unsigned wrap, the value will never be less than its | ||||
6109 | // initial value. | ||||
6110 | if (AddRec->hasNoUnsignedWrap()) { | ||||
6111 | APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); | ||||
6112 | if (!UnsignedMinValue.isNullValue()) | ||||
6113 | ConservativeResult = ConservativeResult.intersectWith( | ||||
6114 | ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); | ||||
6115 | } | ||||
6116 | |||||
6117 | // If there's no signed wrap, and all the operands except initial value have | ||||
6118 | // the same sign or zero, the value won't ever be: | ||||
6119 | // 1: smaller than initial value if operands are non negative, | ||||
6120 | // 2: bigger than initial value if operands are non positive. | ||||
6121 | // For both cases, value can not cross signed min/max boundary. | ||||
6122 | if (AddRec->hasNoSignedWrap()) { | ||||
6123 | bool AllNonNeg = true; | ||||
6124 | bool AllNonPos = true; | ||||
6125 | for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { | ||||
6126 | if (!isKnownNonNegative(AddRec->getOperand(i))) | ||||
6127 | AllNonNeg = false; | ||||
6128 | if (!isKnownNonPositive(AddRec->getOperand(i))) | ||||
6129 | AllNonPos = false; | ||||
6130 | } | ||||
6131 | if (AllNonNeg) | ||||
6132 | ConservativeResult = ConservativeResult.intersectWith( | ||||
6133 | ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), | ||||
6134 | APInt::getSignedMinValue(BitWidth)), | ||||
6135 | RangeType); | ||||
6136 | else if (AllNonPos) | ||||
6137 | ConservativeResult = ConservativeResult.intersectWith( | ||||
6138 | ConstantRange::getNonEmpty( | ||||
6139 | APInt::getSignedMinValue(BitWidth), | ||||
6140 | getSignedRangeMax(AddRec->getStart()) + 1), | ||||
6141 | RangeType); | ||||
6142 | } | ||||
6143 | |||||
6144 | // TODO: non-affine addrec | ||||
6145 | if (AddRec->isAffine()) { | ||||
6146 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); | ||||
6147 | if (!isa<SCEVCouldNotCompute>(MaxBECount) && | ||||
6148 | getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { | ||||
6149 | auto RangeFromAffine = getRangeForAffineAR( | ||||
6150 | AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, | ||||
6151 | BitWidth); | ||||
6152 | ConservativeResult = | ||||
6153 | ConservativeResult.intersectWith(RangeFromAffine, RangeType); | ||||
6154 | |||||
6155 | auto RangeFromFactoring = getRangeViaFactoring( | ||||
6156 | AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, | ||||
6157 | BitWidth); | ||||
6158 | ConservativeResult = | ||||
6159 | ConservativeResult.intersectWith(RangeFromFactoring, RangeType); | ||||
6160 | } | ||||
6161 | |||||
6162 | // Now try symbolic BE count and more powerful methods. | ||||
6163 | if (UseExpensiveRangeSharpening) { | ||||
6164 | const SCEV *SymbolicMaxBECount = | ||||
6165 | getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); | ||||
6166 | if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && | ||||
6167 | getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && | ||||
6168 | AddRec->hasNoSelfWrap()) { | ||||
6169 | auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( | ||||
6170 | AddRec, SymbolicMaxBECount, BitWidth, SignHint); | ||||
6171 | ConservativeResult = | ||||
6172 | ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); | ||||
6173 | } | ||||
6174 | } | ||||
6175 | } | ||||
6176 | |||||
6177 | return setRange(AddRec, SignHint, std::move(ConservativeResult)); | ||||
6178 | } | ||||
6179 | |||||
6180 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { | ||||
6181 | |||||
6182 | // Check if the IR explicitly contains !range metadata. | ||||
6183 | Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); | ||||
6184 | if (MDRange.hasValue()) | ||||
6185 | ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), | ||||
6186 | RangeType); | ||||
6187 | |||||
6188 | // Use facts about recurrences in the underlying IR. Note that add | ||||
6189 | // recurrences are AddRecExprs and thus don't hit this path. This | ||||
6190 | // primarily handles shift recurrences. | ||||
6191 | auto CR = getRangeForUnknownRecurrence(U); | ||||
6192 | ConservativeResult = ConservativeResult.intersectWith(CR); | ||||
6193 | |||||
6194 | // See if ValueTracking can give us a useful range. | ||||
6195 | const DataLayout &DL = getDataLayout(); | ||||
6196 | KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); | ||||
6197 | if (Known.getBitWidth() != BitWidth) | ||||
6198 | Known = Known.zextOrTrunc(BitWidth); | ||||
6199 | |||||
6200 | // ValueTracking may be able to compute a tighter result for the number of | ||||
6201 | // sign bits than for the value of those sign bits. | ||||
6202 | unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); | ||||
6203 | if (U->getType()->isPointerTy()) { | ||||
6204 | // If the pointer size is larger than the index size type, this can cause | ||||
6205 | // NS to be larger than BitWidth. So compensate for this. | ||||
6206 | unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); | ||||
6207 | int ptrIdxDiff = ptrSize - BitWidth; | ||||
6208 | if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) | ||||
6209 | NS -= ptrIdxDiff; | ||||
6210 | } | ||||
6211 | |||||
6212 | if (NS > 1) { | ||||
6213 | // If we know any of the sign bits, we know all of the sign bits. | ||||
6214 | if (!Known.Zero.getHiBits(NS).isNullValue()) | ||||
6215 | Known.Zero.setHighBits(NS); | ||||
6216 | if (!Known.One.getHiBits(NS).isNullValue()) | ||||
6217 | Known.One.setHighBits(NS); | ||||
6218 | } | ||||
6219 | |||||
6220 | if (Known.getMinValue() != Known.getMaxValue() + 1) | ||||
6221 | ConservativeResult = ConservativeResult.intersectWith( | ||||
6222 | ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), | ||||
6223 | RangeType); | ||||
6224 | if (NS > 1) | ||||
6225 | ConservativeResult = ConservativeResult.intersectWith( | ||||
6226 | ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), | ||||
6227 | APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), | ||||
6228 | RangeType); | ||||
6229 | |||||
6230 | // A range of Phi is a subset of union of all ranges of its input. | ||||
6231 | if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { | ||||
6232 | // Make sure that we do not run over cycled Phis. | ||||
6233 | if (PendingPhiRanges.insert(Phi).second) { | ||||
6234 | ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); | ||||
6235 | for (auto &Op : Phi->operands()) { | ||||
6236 | auto OpRange = getRangeRef(getSCEV(Op), SignHint); | ||||
6237 | RangeFromOps = RangeFromOps.unionWith(OpRange); | ||||
6238 | // No point to continue if we already have a full set. | ||||
6239 | if (RangeFromOps.isFullSet()) | ||||
6240 | break; | ||||
6241 | } | ||||
6242 | ConservativeResult = | ||||
6243 | ConservativeResult.intersectWith(RangeFromOps, RangeType); | ||||
6244 | bool Erased = PendingPhiRanges.erase(Phi); | ||||
6245 | assert(Erased && "Failed to erase Phi properly?")((void)0); | ||||
6246 | (void) Erased; | ||||
6247 | } | ||||
6248 | } | ||||
6249 | |||||
6250 | return setRange(U, SignHint, std::move(ConservativeResult)); | ||||
6251 | } | ||||
6252 | |||||
6253 | return setRange(S, SignHint, std::move(ConservativeResult)); | ||||
6254 | } | ||||
6255 | |||||
6256 | // Given a StartRange, Step and MaxBECount for an expression compute a range of | ||||
6257 | // values that the expression can take. Initially, the expression has a value | ||||
6258 | // from StartRange and then is changed by Step up to MaxBECount times. Signed | ||||
6259 | // argument defines if we treat Step as signed or unsigned. | ||||
6260 | static ConstantRange getRangeForAffineARHelper(APInt Step, | ||||
6261 | const ConstantRange &StartRange, | ||||
6262 | const APInt &MaxBECount, | ||||
6263 | unsigned BitWidth, bool Signed) { | ||||
6264 | // If either Step or MaxBECount is 0, then the expression won't change, and we | ||||
6265 | // just need to return the initial range. | ||||
6266 | if (Step == 0 || MaxBECount == 0) | ||||
6267 | return StartRange; | ||||
6268 | |||||
6269 | // If we don't know anything about the initial value (i.e. StartRange is | ||||
6270 | // FullRange), then we don't know anything about the final range either. | ||||
6271 | // Return FullRange. | ||||
6272 | if (StartRange.isFullSet()) | ||||
6273 | return ConstantRange::getFull(BitWidth); | ||||
6274 | |||||
6275 | // If Step is signed and negative, then we use its absolute value, but we also | ||||
6276 | // note that we're moving in the opposite direction. | ||||
6277 | bool Descending = Signed && Step.isNegative(); | ||||
6278 | |||||
6279 | if (Signed) | ||||
6280 | // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: | ||||
6281 | // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. | ||||
6282 | // This equations hold true due to the well-defined wrap-around behavior of | ||||
6283 | // APInt. | ||||
6284 | Step = Step.abs(); | ||||
6285 | |||||
6286 | // Check if Offset is more than full span of BitWidth. If it is, the | ||||
6287 | // expression is guaranteed to overflow. | ||||
6288 | if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) | ||||
6289 | return ConstantRange::getFull(BitWidth); | ||||
6290 | |||||
6291 | // Offset is by how much the expression can change. Checks above guarantee no | ||||
6292 | // overflow here. | ||||
6293 | APInt Offset = Step * MaxBECount; | ||||
6294 | |||||
6295 | // Minimum value of the final range will match the minimal value of StartRange | ||||
6296 | // if the expression is increasing and will be decreased by Offset otherwise. | ||||
6297 | // Maximum value of the final range will match the maximal value of StartRange | ||||
6298 | // if the expression is decreasing and will be increased by Offset otherwise. | ||||
6299 | APInt StartLower = StartRange.getLower(); | ||||
6300 | APInt StartUpper = StartRange.getUpper() - 1; | ||||
6301 | APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) | ||||
6302 | : (StartUpper + std::move(Offset)); | ||||
6303 | |||||
6304 | // It's possible that the new minimum/maximum value will fall into the initial | ||||
6305 | // range (due to wrap around). This means that the expression can take any | ||||
6306 | // value in this bitwidth, and we have to return full range. | ||||
6307 | if (StartRange.contains(MovedBoundary)) | ||||
6308 | return ConstantRange::getFull(BitWidth); | ||||
6309 | |||||
6310 | APInt NewLower = | ||||
6311 | Descending ? std::move(MovedBoundary) : std::move(StartLower); | ||||
6312 | APInt NewUpper = | ||||
6313 | Descending ? std::move(StartUpper) : std::move(MovedBoundary); | ||||
6314 | NewUpper += 1; | ||||
6315 | |||||
6316 | // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. | ||||
6317 | return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); | ||||
6318 | } | ||||
6319 | |||||
6320 | ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, | ||||
6321 | const SCEV *Step, | ||||
6322 | const SCEV *MaxBECount, | ||||
6323 | unsigned BitWidth) { | ||||
6324 | assert(!isa<SCEVCouldNotCompute>(MaxBECount) &&((void)0) | ||||
6325 | getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&((void)0) | ||||
6326 | "Precondition!")((void)0); | ||||
6327 | |||||
6328 | MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); | ||||
6329 | APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); | ||||
6330 | |||||
6331 | // First, consider step signed. | ||||
6332 | ConstantRange StartSRange = getSignedRange(Start); | ||||
6333 | ConstantRange StepSRange = getSignedRange(Step); | ||||
6334 | |||||
6335 | // If Step can be both positive and negative, we need to find ranges for the | ||||
6336 | // maximum absolute step values in both directions and union them. | ||||
6337 | ConstantRange SR = | ||||
6338 | getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, | ||||
6339 | MaxBECountValue, BitWidth, /* Signed = */ true); | ||||
6340 | SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), | ||||
6341 | StartSRange, MaxBECountValue, | ||||
6342 | BitWidth, /* Signed = */ true)); | ||||
6343 | |||||
6344 | // Next, consider step unsigned. | ||||
6345 | ConstantRange UR = getRangeForAffineARHelper( | ||||
6346 | getUnsignedRangeMax(Step), getUnsignedRange(Start), | ||||
6347 | MaxBECountValue, BitWidth, /* Signed = */ false); | ||||
6348 | |||||
6349 | // Finally, intersect signed and unsigned ranges. | ||||
6350 | return SR.intersectWith(UR, ConstantRange::Smallest); | ||||
6351 | } | ||||
6352 | |||||
6353 | ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( | ||||
6354 | const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, | ||||
6355 | ScalarEvolution::RangeSignHint SignHint) { | ||||
6356 | assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n")((void)0); | ||||
6357 | assert(AddRec->hasNoSelfWrap() &&((void)0) | ||||
6358 | "This only works for non-self-wrapping AddRecs!")((void)0); | ||||
6359 | const bool IsSigned = SignHint == HINT_RANGE_SIGNED; | ||||
6360 | const SCEV *Step = AddRec->getStepRecurrence(*this); | ||||
6361 | // Only deal with constant step to save compile time. | ||||
6362 | if (!isa<SCEVConstant>(Step)) | ||||
6363 | return ConstantRange::getFull(BitWidth); | ||||
6364 | // Let's make sure that we can prove that we do not self-wrap during | ||||
6365 | // MaxBECount iterations. We need this because MaxBECount is a maximum | ||||
6366 | // iteration count estimate, and we might infer nw from some exit for which we | ||||
6367 | // do not know max exit count (or any other side reasoning). | ||||
6368 | // TODO: Turn into assert at some point. | ||||
6369 | if (getTypeSizeInBits(MaxBECount->getType()) > | ||||
6370 | getTypeSizeInBits(AddRec->getType())) | ||||
6371 | return ConstantRange::getFull(BitWidth); | ||||
6372 | MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); | ||||
6373 | const SCEV *RangeWidth = getMinusOne(AddRec->getType()); | ||||
6374 | const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); | ||||
6375 | const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); | ||||
6376 | if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, | ||||
6377 | MaxItersWithoutWrap)) | ||||
6378 | return ConstantRange::getFull(BitWidth); | ||||
6379 | |||||
6380 | ICmpInst::Predicate LEPred = | ||||
6381 | IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; | ||||
6382 | ICmpInst::Predicate GEPred = | ||||
6383 | IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; | ||||
6384 | const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); | ||||
6385 | |||||
6386 | // We know that there is no self-wrap. Let's take Start and End values and | ||||
6387 | // look at all intermediate values V1, V2, ..., Vn that IndVar takes during | ||||
6388 | // the iteration. They either lie inside the range [Min(Start, End), | ||||
6389 | // Max(Start, End)] or outside it: | ||||
6390 | // | ||||
6391 | // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; | ||||
6392 | // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; | ||||
6393 | // | ||||
6394 | // No self wrap flag guarantees that the intermediate values cannot be BOTH | ||||
6395 | // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that | ||||
6396 | // knowledge, let's try to prove that we are dealing with Case 1. It is so if | ||||
6397 | // Start <= End and step is positive, or Start >= End and step is negative. | ||||
6398 | const SCEV *Start = AddRec->getStart(); | ||||
6399 | ConstantRange StartRange = getRangeRef(Start, SignHint); | ||||
6400 | ConstantRange EndRange = getRangeRef(End, SignHint); | ||||
6401 | ConstantRange RangeBetween = StartRange.unionWith(EndRange); | ||||
6402 | // If they already cover full iteration space, we will know nothing useful | ||||
6403 | // even if we prove what we want to prove. | ||||
6404 | if (RangeBetween.isFullSet()) | ||||
6405 | return RangeBetween; | ||||
6406 | // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). | ||||
6407 | bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() | ||||
6408 | : RangeBetween.isWrappedSet(); | ||||
6409 | if (IsWrappedSet) | ||||
6410 | return ConstantRange::getFull(BitWidth); | ||||
6411 | |||||
6412 | if (isKnownPositive(Step) && | ||||
6413 | isKnownPredicateViaConstantRanges(LEPred, Start, End)) | ||||
6414 | return RangeBetween; | ||||
6415 | else if (isKnownNegative(Step) && | ||||
6416 | isKnownPredicateViaConstantRanges(GEPred, Start, End)) | ||||
6417 | return RangeBetween; | ||||
6418 | return ConstantRange::getFull(BitWidth); | ||||
6419 | } | ||||
6420 | |||||
6421 | ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, | ||||
6422 | const SCEV *Step, | ||||
6423 | const SCEV *MaxBECount, | ||||
6424 | unsigned BitWidth) { | ||||
6425 | // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) | ||||
6426 | // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) | ||||
6427 | |||||
6428 | struct SelectPattern { | ||||
6429 | Value *Condition = nullptr; | ||||
6430 | APInt TrueValue; | ||||
6431 | APInt FalseValue; | ||||
6432 | |||||
6433 | explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, | ||||
6434 | const SCEV *S) { | ||||
6435 | Optional<unsigned> CastOp; | ||||
6436 | APInt Offset(BitWidth, 0); | ||||
6437 | |||||
6438 | assert(SE.getTypeSizeInBits(S->getType()) == BitWidth &&((void)0) | ||||
6439 | "Should be!")((void)0); | ||||
6440 | |||||
6441 | // Peel off a constant offset: | ||||
6442 | if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { | ||||
6443 | // In the future we could consider being smarter here and handle | ||||
6444 | // {Start+Step,+,Step} too. | ||||
6445 | if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) | ||||
6446 | return; | ||||
6447 | |||||
6448 | Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); | ||||
6449 | S = SA->getOperand(1); | ||||
6450 | } | ||||
6451 | |||||
6452 | // Peel off a cast operation | ||||
6453 | if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { | ||||
6454 | CastOp = SCast->getSCEVType(); | ||||
6455 | S = SCast->getOperand(); | ||||
6456 | } | ||||
6457 | |||||
6458 | using namespace llvm::PatternMatch; | ||||
6459 | |||||
6460 | auto *SU = dyn_cast<SCEVUnknown>(S); | ||||
6461 | const APInt *TrueVal, *FalseVal; | ||||
6462 | if (!SU || | ||||
6463 | !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), | ||||
6464 | m_APInt(FalseVal)))) { | ||||
6465 | Condition = nullptr; | ||||
6466 | return; | ||||
6467 | } | ||||
6468 | |||||
6469 | TrueValue = *TrueVal; | ||||
6470 | FalseValue = *FalseVal; | ||||
6471 | |||||
6472 | // Re-apply the cast we peeled off earlier | ||||
6473 | if (CastOp.hasValue()) | ||||
6474 | switch (*CastOp) { | ||||
6475 | default: | ||||
6476 | llvm_unreachable("Unknown SCEV cast type!")__builtin_unreachable(); | ||||
6477 | |||||
6478 | case scTruncate: | ||||
6479 | TrueValue = TrueValue.trunc(BitWidth); | ||||
6480 | FalseValue = FalseValue.trunc(BitWidth); | ||||
6481 | break; | ||||
6482 | case scZeroExtend: | ||||
6483 | TrueValue = TrueValue.zext(BitWidth); | ||||
6484 | FalseValue = FalseValue.zext(BitWidth); | ||||
6485 | break; | ||||
6486 | case scSignExtend: | ||||
6487 | TrueValue = TrueValue.sext(BitWidth); | ||||
6488 | FalseValue = FalseValue.sext(BitWidth); | ||||
6489 | break; | ||||
6490 | } | ||||
6491 | |||||
6492 | // Re-apply the constant offset we peeled off earlier | ||||
6493 | TrueValue += Offset; | ||||
6494 | FalseValue += Offset; | ||||
6495 | } | ||||
6496 | |||||
6497 | bool isRecognized() { return Condition != nullptr; } | ||||
6498 | }; | ||||
6499 | |||||
6500 | SelectPattern StartPattern(*this, BitWidth, Start); | ||||
6501 | if (!StartPattern.isRecognized()) | ||||
6502 | return ConstantRange::getFull(BitWidth); | ||||
6503 | |||||
6504 | SelectPattern StepPattern(*this, BitWidth, Step); | ||||
6505 | if (!StepPattern.isRecognized()) | ||||
6506 | return ConstantRange::getFull(BitWidth); | ||||
6507 | |||||
6508 | if (StartPattern.Condition != StepPattern.Condition) { | ||||
6509 | // We don't handle this case today; but we could, by considering four | ||||
6510 | // possibilities below instead of two. I'm not sure if there are cases where | ||||
6511 | // that will help over what getRange already does, though. | ||||
6512 | return ConstantRange::getFull(BitWidth); | ||||
6513 | } | ||||
6514 | |||||
6515 | // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to | ||||
6516 | // construct arbitrary general SCEV expressions here. This function is called | ||||
6517 | // from deep in the call stack, and calling getSCEV (on a sext instruction, | ||||
6518 | // say) can end up caching a suboptimal value. | ||||
6519 | |||||
6520 | // FIXME: without the explicit `this` receiver below, MSVC errors out with | ||||
6521 | // C2352 and C2512 (otherwise it isn't needed). | ||||
6522 | |||||
6523 | const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); | ||||
6524 | const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); | ||||
6525 | const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); | ||||
6526 | const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); | ||||
6527 | |||||
6528 | ConstantRange TrueRange = | ||||
6529 | this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); | ||||
6530 | ConstantRange FalseRange = | ||||
6531 | this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); | ||||
6532 | |||||
6533 | return TrueRange.unionWith(FalseRange); | ||||
6534 | } | ||||
6535 | |||||
6536 | SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { | ||||
6537 | if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; | ||||
6538 | const BinaryOperator *BinOp = cast<BinaryOperator>(V); | ||||
6539 | |||||
6540 | // Return early if there are no flags to propagate to the SCEV. | ||||
6541 | SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; | ||||
6542 | if (BinOp->hasNoUnsignedWrap()) | ||||
6543 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); | ||||
6544 | if (BinOp->hasNoSignedWrap()) | ||||
6545 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); | ||||
6546 | if (Flags == SCEV::FlagAnyWrap) | ||||
6547 | return SCEV::FlagAnyWrap; | ||||
6548 | |||||
6549 | return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; | ||||
6550 | } | ||||
6551 | |||||
6552 | bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { | ||||
6553 | // Here we check that I is in the header of the innermost loop containing I, | ||||
6554 | // since we only deal with instructions in the loop header. The actual loop we | ||||
6555 | // need to check later will come from an add recurrence, but getting that | ||||
6556 | // requires computing the SCEV of the operands, which can be expensive. This | ||||
6557 | // check we can do cheaply to rule out some cases early. | ||||
6558 | Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); | ||||
6559 | if (InnermostContainingLoop == nullptr || | ||||
6560 | InnermostContainingLoop->getHeader() != I->getParent()) | ||||
6561 | return false; | ||||
6562 | |||||
6563 | // Only proceed if we can prove that I does not yield poison. | ||||
6564 | if (!programUndefinedIfPoison(I)) | ||||
6565 | return false; | ||||
6566 | |||||
6567 | // At this point we know that if I is executed, then it does not wrap | ||||
6568 | // according to at least one of NSW or NUW. If I is not executed, then we do | ||||
6569 | // not know if the calculation that I represents would wrap. Multiple | ||||
6570 | // instructions can map to the same SCEV. If we apply NSW or NUW from I to | ||||
6571 | // the SCEV, we must guarantee no wrapping for that SCEV also when it is | ||||
6572 | // derived from other instructions that map to the same SCEV. We cannot make | ||||
6573 | // that guarantee for cases where I is not executed. So we need to find the | ||||
6574 | // loop that I is considered in relation to and prove that I is executed for | ||||
6575 | // every iteration of that loop. That implies that the value that I | ||||
6576 | // calculates does not wrap anywhere in the loop, so then we can apply the | ||||
6577 | // flags to the SCEV. | ||||
6578 | // | ||||
6579 | // We check isLoopInvariant to disambiguate in case we are adding recurrences | ||||
6580 | // from different loops, so that we know which loop to prove that I is | ||||
6581 | // executed in. | ||||
6582 | for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { | ||||
6583 | // I could be an extractvalue from a call to an overflow intrinsic. | ||||
6584 | // TODO: We can do better here in some cases. | ||||
6585 | if (!isSCEVable(I->getOperand(OpIndex)->getType())) | ||||
6586 | return false; | ||||
6587 | const SCEV *Op = getSCEV(I->getOperand(OpIndex)); | ||||
6588 | if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { | ||||
6589 | bool AllOtherOpsLoopInvariant = true; | ||||
6590 | for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); | ||||
6591 | ++OtherOpIndex) { | ||||
6592 | if (OtherOpIndex != OpIndex) { | ||||
6593 | const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); | ||||
6594 | if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { | ||||
6595 | AllOtherOpsLoopInvariant = false; | ||||
6596 | break; | ||||
6597 | } | ||||
6598 | } | ||||
6599 | } | ||||
6600 | if (AllOtherOpsLoopInvariant && | ||||
6601 | isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) | ||||
6602 | return true; | ||||
6603 | } | ||||
6604 | } | ||||
6605 | return false; | ||||
6606 | } | ||||
6607 | |||||
6608 | bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { | ||||
6609 | // If we know that \c I can never be poison period, then that's enough. | ||||
6610 | if (isSCEVExprNeverPoison(I)) | ||||
6611 | return true; | ||||
6612 | |||||
6613 | // For an add recurrence specifically, we assume that infinite loops without | ||||
6614 | // side effects are undefined behavior, and then reason as follows: | ||||
6615 | // | ||||
6616 | // If the add recurrence is poison in any iteration, it is poison on all | ||||
6617 | // future iterations (since incrementing poison yields poison). If the result | ||||
6618 | // of the add recurrence is fed into the loop latch condition and the loop | ||||
6619 | // does not contain any throws or exiting blocks other than the latch, we now | ||||
6620 | // have the ability to "choose" whether the backedge is taken or not (by | ||||
6621 | // choosing a sufficiently evil value for the poison feeding into the branch) | ||||
6622 | // for every iteration including and after the one in which \p I first became | ||||
6623 | // poison. There are two possibilities (let's call the iteration in which \p | ||||
6624 | // I first became poison as K): | ||||
6625 | // | ||||
6626 | // 1. In the set of iterations including and after K, the loop body executes | ||||
6627 | // no side effects. In this case executing the backege an infinte number | ||||
6628 | // of times will yield undefined behavior. | ||||
6629 | // | ||||
6630 | // 2. In the set of iterations including and after K, the loop body executes | ||||
6631 | // at least one side effect. In this case, that specific instance of side | ||||
6632 | // effect is control dependent on poison, which also yields undefined | ||||
6633 | // behavior. | ||||
6634 | |||||
6635 | auto *ExitingBB = L->getExitingBlock(); | ||||
6636 | auto *LatchBB = L->getLoopLatch(); | ||||
6637 | if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) | ||||
6638 | return false; | ||||
6639 | |||||
6640 | SmallPtrSet<const Instruction *, 16> Pushed; | ||||
6641 | SmallVector<const Instruction *, 8> PoisonStack; | ||||
6642 | |||||
6643 | // We start by assuming \c I, the post-inc add recurrence, is poison. Only | ||||
6644 | // things that are known to be poison under that assumption go on the | ||||
6645 | // PoisonStack. | ||||
6646 | Pushed.insert(I); | ||||
6647 | PoisonStack.push_back(I); | ||||
6648 | |||||
6649 | bool LatchControlDependentOnPoison = false; | ||||
6650 | while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { | ||||
6651 | const Instruction *Poison = PoisonStack.pop_back_val(); | ||||
6652 | |||||
6653 | for (auto *PoisonUser : Poison->users()) { | ||||
6654 | if (propagatesPoison(cast<Operator>(PoisonUser))) { | ||||
6655 | if (Pushed.insert(cast<Instruction>(PoisonUser)).second) | ||||
6656 | PoisonStack.push_back(cast<Instruction>(PoisonUser)); | ||||
6657 | } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { | ||||
6658 | assert(BI->isConditional() && "Only possibility!")((void)0); | ||||
6659 | if (BI->getParent() == LatchBB) { | ||||
6660 | LatchControlDependentOnPoison = true; | ||||
6661 | break; | ||||
6662 | } | ||||
6663 | } | ||||
6664 | } | ||||
6665 | } | ||||
6666 | |||||
6667 | return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); | ||||
6668 | } | ||||
6669 | |||||
6670 | ScalarEvolution::LoopProperties | ||||
6671 | ScalarEvolution::getLoopProperties(const Loop *L) { | ||||
6672 | using LoopProperties = ScalarEvolution::LoopProperties; | ||||
6673 | |||||
6674 | auto Itr = LoopPropertiesCache.find(L); | ||||
6675 | if (Itr == LoopPropertiesCache.end()) { | ||||
6676 | auto HasSideEffects = [](Instruction *I) { | ||||
6677 | if (auto *SI = dyn_cast<StoreInst>(I)) | ||||
6678 | return !SI->isSimple(); | ||||
6679 | |||||
6680 | return I->mayThrow() || I->mayWriteToMemory(); | ||||
6681 | }; | ||||
6682 | |||||
6683 | LoopProperties LP = {/* HasNoAbnormalExits */ true, | ||||
6684 | /*HasNoSideEffects*/ true}; | ||||
6685 | |||||
6686 | for (auto *BB : L->getBlocks()) | ||||
6687 | for (auto &I : *BB) { | ||||
6688 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) | ||||
6689 | LP.HasNoAbnormalExits = false; | ||||
6690 | if (HasSideEffects(&I)) | ||||
6691 | LP.HasNoSideEffects = false; | ||||
6692 | if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) | ||||
6693 | break; // We're already as pessimistic as we can get. | ||||
6694 | } | ||||
6695 | |||||
6696 | auto InsertPair = LoopPropertiesCache.insert({L, LP}); | ||||
6697 | assert(InsertPair.second && "We just checked!")((void)0); | ||||
6698 | Itr = InsertPair.first; | ||||
6699 | } | ||||
6700 | |||||
6701 | return Itr->second; | ||||
6702 | } | ||||
6703 | |||||
6704 | bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) { | ||||
6705 | // A mustprogress loop without side effects must be finite. | ||||
6706 | // TODO: The check used here is very conservative. It's only *specific* | ||||
6707 | // side effects which are well defined in infinite loops. | ||||
6708 | return isMustProgress(L) && loopHasNoSideEffects(L); | ||||
6709 | } | ||||
6710 | |||||
6711 | const SCEV *ScalarEvolution::createSCEV(Value *V) { | ||||
6712 | if (!isSCEVable(V->getType())) | ||||
6713 | return getUnknown(V); | ||||
6714 | |||||
6715 | if (Instruction *I = dyn_cast<Instruction>(V)) { | ||||
6716 | // Don't attempt to analyze instructions in blocks that aren't | ||||
6717 | // reachable. Such instructions don't matter, and they aren't required | ||||
6718 | // to obey basic rules for definitions dominating uses which this | ||||
6719 | // analysis depends on. | ||||
6720 | if (!DT.isReachableFromEntry(I->getParent())) | ||||
6721 | return getUnknown(UndefValue::get(V->getType())); | ||||
6722 | } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) | ||||
6723 | return getConstant(CI); | ||||
6724 | else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) | ||||
6725 | return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); | ||||
6726 | else if (!isa<ConstantExpr>(V)) | ||||
6727 | return getUnknown(V); | ||||
6728 | |||||
6729 | Operator *U = cast<Operator>(V); | ||||
6730 | if (auto BO = MatchBinaryOp(U, DT)) { | ||||
6731 | switch (BO->Opcode) { | ||||
6732 | case Instruction::Add: { | ||||
6733 | // The simple thing to do would be to just call getSCEV on both operands | ||||
6734 | // and call getAddExpr with the result. However if we're looking at a | ||||
6735 | // bunch of things all added together, this can be quite inefficient, | ||||
6736 | // because it leads to N-1 getAddExpr calls for N ultimate operands. | ||||
6737 | // Instead, gather up all the operands and make a single getAddExpr call. | ||||
6738 | // LLVM IR canonical form means we need only traverse the left operands. | ||||
6739 | SmallVector<const SCEV *, 4> AddOps; | ||||
6740 | do { | ||||
6741 | if (BO->Op) { | ||||
6742 | if (auto *OpSCEV = getExistingSCEV(BO->Op)) { | ||||
6743 | AddOps.push_back(OpSCEV); | ||||
6744 | break; | ||||
6745 | } | ||||
6746 | |||||
6747 | // If a NUW or NSW flag can be applied to the SCEV for this | ||||
6748 | // addition, then compute the SCEV for this addition by itself | ||||
6749 | // with a separate call to getAddExpr. We need to do that | ||||
6750 | // instead of pushing the operands of the addition onto AddOps, | ||||
6751 | // since the flags are only known to apply to this particular | ||||
6752 | // addition - they may not apply to other additions that can be | ||||
6753 | // formed with operands from AddOps. | ||||
6754 | const SCEV *RHS = getSCEV(BO->RHS); | ||||
6755 | SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); | ||||
6756 | if (Flags != SCEV::FlagAnyWrap) { | ||||
6757 | const SCEV *LHS = getSCEV(BO->LHS); | ||||
6758 | if (BO->Opcode == Instruction::Sub) | ||||
6759 | AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); | ||||
6760 | else | ||||
6761 | AddOps.push_back(getAddExpr(LHS, RHS, Flags)); | ||||
6762 | break; | ||||
6763 | } | ||||
6764 | } | ||||
6765 | |||||
6766 | if (BO->Opcode == Instruction::Sub) | ||||
6767 | AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); | ||||
6768 | else | ||||
6769 | AddOps.push_back(getSCEV(BO->RHS)); | ||||
6770 | |||||
6771 | auto NewBO = MatchBinaryOp(BO->LHS, DT); | ||||
6772 | if (!NewBO || (NewBO->Opcode != Instruction::Add && | ||||
6773 | NewBO->Opcode != Instruction::Sub)) { | ||||
6774 | AddOps.push_back(getSCEV(BO->LHS)); | ||||
6775 | break; | ||||
6776 | } | ||||
6777 | BO = NewBO; | ||||
6778 | } while (true); | ||||
6779 | |||||
6780 | return getAddExpr(AddOps); | ||||
6781 | } | ||||
6782 | |||||
6783 | case Instruction::Mul: { | ||||
6784 | SmallVector<const SCEV *, 4> MulOps; | ||||
6785 | do { | ||||
6786 | if (BO->Op) { | ||||
6787 | if (auto *OpSCEV = getExistingSCEV(BO->Op)) { | ||||
6788 | MulOps.push_back(OpSCEV); | ||||
6789 | break; | ||||
6790 | } | ||||
6791 | |||||
6792 | SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); | ||||
6793 | if (Flags != SCEV::FlagAnyWrap) { | ||||
6794 | MulOps.push_back( | ||||
6795 | getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); | ||||
6796 | break; | ||||
6797 | } | ||||
6798 | } | ||||
6799 | |||||
6800 | MulOps.push_back(getSCEV(BO->RHS)); | ||||
6801 | auto NewBO = MatchBinaryOp(BO->LHS, DT); | ||||
6802 | if (!NewBO || NewBO->Opcode != Instruction::Mul) { | ||||
6803 | MulOps.push_back(getSCEV(BO->LHS)); | ||||
6804 | break; | ||||
6805 | } | ||||
6806 | BO = NewBO; | ||||
6807 | } while (true); | ||||
6808 | |||||
6809 | return getMulExpr(MulOps); | ||||
6810 | } | ||||
6811 | case Instruction::UDiv: | ||||
6812 | return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); | ||||
6813 | case Instruction::URem: | ||||
6814 | return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); | ||||
6815 | case Instruction::Sub: { | ||||
6816 | SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; | ||||
6817 | if (BO->Op) | ||||
6818 | Flags = getNoWrapFlagsFromUB(BO->Op); | ||||
6819 | return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); | ||||
6820 | } | ||||
6821 | case Instruction::And: | ||||
6822 | // For an expression like x&255 that merely masks off the high bits, | ||||
6823 | // use zext(trunc(x)) as the SCEV expression. | ||||
6824 | if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { | ||||
6825 | if (CI->isZero()) | ||||
6826 | return getSCEV(BO->RHS); | ||||
6827 | if (CI->isMinusOne()) | ||||
6828 | return getSCEV(BO->LHS); | ||||
6829 | const APInt &A = CI->getValue(); | ||||
6830 | |||||
6831 | // Instcombine's ShrinkDemandedConstant may strip bits out of | ||||
6832 | // constants, obscuring what would otherwise be a low-bits mask. | ||||
6833 | // Use computeKnownBits to compute what ShrinkDemandedConstant | ||||
6834 | // knew about to reconstruct a low-bits mask value. | ||||
6835 | unsigned LZ = A.countLeadingZeros(); | ||||
6836 | unsigned TZ = A.countTrailingZeros(); | ||||
6837 | unsigned BitWidth = A.getBitWidth(); | ||||
6838 | KnownBits Known(BitWidth); | ||||
6839 | computeKnownBits(BO->LHS, Known, getDataLayout(), | ||||
6840 | 0, &AC, nullptr, &DT); | ||||
6841 | |||||
6842 | APInt EffectiveMask = | ||||
6843 | APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); | ||||
6844 | if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { | ||||
6845 | const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); | ||||
6846 | const SCEV *LHS = getSCEV(BO->LHS); | ||||
6847 | const SCEV *ShiftedLHS = nullptr; | ||||
6848 | if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { | ||||
6849 | if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { | ||||
6850 | // For an expression like (x * 8) & 8, simplify the multiply. | ||||
6851 | unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); | ||||
6852 | unsigned GCD = std::min(MulZeros, TZ); | ||||
6853 | APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); | ||||
6854 | SmallVector<const SCEV*, 4> MulOps; | ||||
6855 | MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); | ||||
6856 | MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); | ||||
6857 | auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); | ||||
6858 | ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); | ||||
6859 | } | ||||
6860 | } | ||||
6861 | if (!ShiftedLHS) | ||||
6862 | ShiftedLHS = getUDivExpr(LHS, MulCount); | ||||
6863 | return getMulExpr( | ||||
6864 | getZeroExtendExpr( | ||||
6865 | getTruncateExpr(ShiftedLHS, | ||||
6866 | IntegerType::get(getContext(), BitWidth - LZ - TZ)), | ||||
6867 | BO->LHS->getType()), | ||||
6868 | MulCount); | ||||
6869 | } | ||||
6870 | } | ||||
6871 | break; | ||||
6872 | |||||
6873 | case Instruction::Or: | ||||
6874 | // If the RHS of the Or is a constant, we may have something like: | ||||
6875 | // X*4+1 which got turned into X*4|1. Handle this as an Add so loop | ||||
6876 | // optimizations will transparently handle this case. | ||||
6877 | // | ||||
6878 | // In order for this transformation to be safe, the LHS must be of the | ||||
6879 | // form X*(2^n) and the Or constant must be less than 2^n. | ||||
6880 | if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { | ||||
6881 | const SCEV *LHS = getSCEV(BO->LHS); | ||||
6882 | const APInt &CIVal = CI->getValue(); | ||||
6883 | if (GetMinTrailingZeros(LHS) >= | ||||
6884 | (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { | ||||
6885 | // Build a plain add SCEV. | ||||
6886 | return getAddExpr(LHS, getSCEV(CI), | ||||
6887 | (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); | ||||
6888 | } | ||||
6889 | } | ||||
6890 | break; | ||||
6891 | |||||
6892 | case Instruction::Xor: | ||||
6893 | if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { | ||||
6894 | // If the RHS of xor is -1, then this is a not operation. | ||||
6895 | if (CI->isMinusOne()) | ||||
6896 | return getNotSCEV(getSCEV(BO->LHS)); | ||||
6897 | |||||
6898 | // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. | ||||
6899 | // This is a variant of the check for xor with -1, and it handles | ||||
6900 | // the case where instcombine has trimmed non-demanded bits out | ||||
6901 | // of an xor with -1. | ||||
6902 | if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) | ||||
6903 | if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) | ||||
6904 | if (LBO->getOpcode() == Instruction::And && | ||||
6905 | LCI->getValue() == CI->getValue()) | ||||
6906 | if (const SCEVZeroExtendExpr *Z = | ||||
6907 | dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { | ||||
6908 | Type *UTy = BO->LHS->getType(); | ||||
6909 | const SCEV *Z0 = Z->getOperand(); | ||||
6910 | Type *Z0Ty = Z0->getType(); | ||||
6911 | unsigned Z0TySize = getTypeSizeInBits(Z0Ty); | ||||
6912 | |||||
6913 | // If C is a low-bits mask, the zero extend is serving to | ||||
6914 | // mask off the high bits. Complement the operand and | ||||
6915 | // re-apply the zext. | ||||
6916 | if (CI->getValue().isMask(Z0TySize)) | ||||
6917 | return getZeroExtendExpr(getNotSCEV(Z0), UTy); | ||||
6918 | |||||
6919 | // If C is a single bit, it may be in the sign-bit position | ||||
6920 | // before the zero-extend. In this case, represent the xor | ||||
6921 | // using an add, which is equivalent, and re-apply the zext. | ||||
6922 | APInt Trunc = CI->getValue().trunc(Z0TySize); | ||||
6923 | if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && | ||||
6924 | Trunc.isSignMask()) | ||||
6925 | return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), | ||||
6926 | UTy); | ||||
6927 | } | ||||
6928 | } | ||||
6929 | break; | ||||
6930 | |||||
6931 | case Instruction::Shl: | ||||
6932 | // Turn shift left of a constant amount into a multiply. | ||||
6933 | if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { | ||||
6934 | uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); | ||||
6935 | |||||
6936 | // If the shift count is not less than the bitwidth, the result of | ||||
6937 | // the shift is undefined. Don't try to analyze it, because the | ||||
6938 | // resolution chosen here may differ from the resolution chosen in | ||||
6939 | // other parts of the compiler. | ||||
6940 | if (SA->getValue().uge(BitWidth)) | ||||
6941 | break; | ||||
6942 | |||||
6943 | // We can safely preserve the nuw flag in all cases. It's also safe to | ||||
6944 | // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation | ||||
6945 | // requires special handling. It can be preserved as long as we're not | ||||
6946 | // left shifting by bitwidth - 1. | ||||
6947 | auto Flags = SCEV::FlagAnyWrap; | ||||
6948 | if (BO->Op) { | ||||
6949 | auto MulFlags = getNoWrapFlagsFromUB(BO->Op); | ||||
6950 | if ((MulFlags & SCEV::FlagNSW) && | ||||
6951 | ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) | ||||
6952 | Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); | ||||
6953 | if (MulFlags & SCEV::FlagNUW) | ||||
6954 | Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); | ||||
6955 | } | ||||
6956 | |||||
6957 | Constant *X = ConstantInt::get( | ||||
6958 | getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); | ||||
6959 | return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); | ||||
6960 | } | ||||
6961 | break; | ||||
6962 | |||||
6963 | case Instruction::AShr: { | ||||
6964 | // AShr X, C, where C is a constant. | ||||
6965 | ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); | ||||
6966 | if (!CI) | ||||
6967 | break; | ||||
6968 | |||||
6969 | Type *OuterTy = BO->LHS->getType(); | ||||
6970 | uint64_t BitWidth = getTypeSizeInBits(OuterTy); | ||||
6971 | // If the shift count is not less than the bitwidth, the result of | ||||
6972 | // the shift is undefined. Don't try to analyze it, because the | ||||
6973 | // resolution chosen here may differ from the resolution chosen in | ||||
6974 | // other parts of the compiler. | ||||
6975 | if (CI->getValue().uge(BitWidth)) | ||||
6976 | break; | ||||
6977 | |||||
6978 | if (CI->isZero()) | ||||
6979 | return getSCEV(BO->LHS); // shift by zero --> noop | ||||
6980 | |||||
6981 | uint64_t AShrAmt = CI->getZExtValue(); | ||||
6982 | Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); | ||||
6983 | |||||
6984 | Operator *L = dyn_cast<Operator>(BO->LHS); | ||||
6985 | if (L && L->getOpcode() == Instruction::Shl) { | ||||
6986 | // X = Shl A, n | ||||
6987 | // Y = AShr X, m | ||||
6988 | // Both n and m are constant. | ||||
6989 | |||||
6990 | const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); | ||||
6991 | if (L->getOperand(1) == BO->RHS) | ||||
6992 | // For a two-shift sext-inreg, i.e. n = m, | ||||
6993 | // use sext(trunc(x)) as the SCEV expression. | ||||
6994 | return getSignExtendExpr( | ||||
6995 | getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); | ||||
6996 | |||||
6997 | ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); | ||||
6998 | if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { | ||||
6999 | uint64_t ShlAmt = ShlAmtCI->getZExtValue(); | ||||
7000 | if (ShlAmt > AShrAmt) { | ||||
7001 | // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV | ||||
7002 | // expression. We already checked that ShlAmt < BitWidth, so | ||||
7003 | // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as | ||||
7004 | // ShlAmt - AShrAmt < Amt. | ||||
7005 | APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, | ||||
7006 | ShlAmt - AShrAmt); | ||||
7007 | return getSignExtendExpr( | ||||
7008 | getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), | ||||
7009 | getConstant(Mul)), OuterTy); | ||||
7010 | } | ||||
7011 | } | ||||
7012 | } | ||||
7013 | break; | ||||
7014 | } | ||||
7015 | } | ||||
7016 | } | ||||
7017 | |||||
7018 | switch (U->getOpcode()) { | ||||
7019 | case Instruction::Trunc: | ||||
7020 | return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); | ||||
7021 | |||||
7022 | case Instruction::ZExt: | ||||
7023 | return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); | ||||
7024 | |||||
7025 | case Instruction::SExt: | ||||
7026 | if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { | ||||
7027 | // The NSW flag of a subtract does not always survive the conversion to | ||||
7028 | // A + (-1)*B. By pushing sign extension onto its operands we are much | ||||
7029 | // more likely to preserve NSW and allow later AddRec optimisations. | ||||
7030 | // | ||||
7031 | // NOTE: This is effectively duplicating this logic from getSignExtend: | ||||
7032 | // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> | ||||
7033 | // but by that point the NSW information has potentially been lost. | ||||
7034 | if (BO->Opcode == Instruction::Sub && BO->IsNSW) { | ||||
7035 | Type *Ty = U->getType(); | ||||
7036 | auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); | ||||
7037 | auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); | ||||
7038 | return getMinusSCEV(V1, V2, SCEV::FlagNSW); | ||||
7039 | } | ||||
7040 | } | ||||
7041 | return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); | ||||
7042 | |||||
7043 | case Instruction::BitCast: | ||||
7044 | // BitCasts are no-op casts so we just eliminate the cast. | ||||
7045 | if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) | ||||
7046 | return getSCEV(U->getOperand(0)); | ||||
7047 | break; | ||||
7048 | |||||
7049 | case Instruction::PtrToInt: { | ||||
7050 | // Pointer to integer cast is straight-forward, so do model it. | ||||
7051 | const SCEV *Op = getSCEV(U->getOperand(0)); | ||||
7052 | Type *DstIntTy = U->getType(); | ||||
7053 | // But only if effective SCEV (integer) type is wide enough to represent | ||||
7054 | // all possible pointer values. | ||||
7055 | const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); | ||||
7056 | if (isa<SCEVCouldNotCompute>(IntOp)) | ||||
7057 | return getUnknown(V); | ||||
7058 | return IntOp; | ||||
7059 | } | ||||
7060 | case Instruction::IntToPtr: | ||||
7061 | // Just don't deal with inttoptr casts. | ||||
7062 | return getUnknown(V); | ||||
7063 | |||||
7064 | case Instruction::SDiv: | ||||
7065 | // If both operands are non-negative, this is just an udiv. | ||||
7066 | if (isKnownNonNegative(getSCEV(U->getOperand(0))) && | ||||
7067 | isKnownNonNegative(getSCEV(U->getOperand(1)))) | ||||
7068 | return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); | ||||
7069 | break; | ||||
7070 | |||||
7071 | case Instruction::SRem: | ||||
7072 | // If both operands are non-negative, this is just an urem. | ||||
7073 | if (isKnownNonNegative(getSCEV(U->getOperand(0))) && | ||||
7074 | isKnownNonNegative(getSCEV(U->getOperand(1)))) | ||||
7075 | return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); | ||||
7076 | break; | ||||
7077 | |||||
7078 | case Instruction::GetElementPtr: | ||||
7079 | return createNodeForGEP(cast<GEPOperator>(U)); | ||||
7080 | |||||
7081 | case Instruction::PHI: | ||||
7082 | return createNodeForPHI(cast<PHINode>(U)); | ||||
7083 | |||||
7084 | case Instruction::Select: | ||||
7085 | // U can also be a select constant expr, which let fall through. Since | ||||
7086 | // createNodeForSelect only works for a condition that is an `ICmpInst`, and | ||||
7087 | // constant expressions cannot have instructions as operands, we'd have | ||||
7088 | // returned getUnknown for a select constant expressions anyway. | ||||
7089 | if (isa<Instruction>(U)) | ||||
7090 | return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), | ||||
7091 | U->getOperand(1), U->getOperand(2)); | ||||
7092 | break; | ||||
7093 | |||||
7094 | case Instruction::Call: | ||||
7095 | case Instruction::Invoke: | ||||
7096 | if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) | ||||
7097 | return getSCEV(RV); | ||||
7098 | |||||
7099 | if (auto *II = dyn_cast<IntrinsicInst>(U)) { | ||||
7100 | switch (II->getIntrinsicID()) { | ||||
7101 | case Intrinsic::abs: | ||||
7102 | return getAbsExpr( | ||||
7103 | getSCEV(II->getArgOperand(0)), | ||||
7104 | /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); | ||||
7105 | case Intrinsic::umax: | ||||
7106 | return getUMaxExpr(getSCEV(II->getArgOperand(0)), | ||||
7107 | getSCEV(II->getArgOperand(1))); | ||||
7108 | case Intrinsic::umin: | ||||
7109 | return getUMinExpr(getSCEV(II->getArgOperand(0)), | ||||
7110 | getSCEV(II->getArgOperand(1))); | ||||
7111 | case Intrinsic::smax: | ||||
7112 | return getSMaxExpr(getSCEV(II->getArgOperand(0)), | ||||
7113 | getSCEV(II->getArgOperand(1))); | ||||
7114 | case Intrinsic::smin: | ||||
7115 | return getSMinExpr(getSCEV(II->getArgOperand(0)), | ||||
7116 | getSCEV(II->getArgOperand(1))); | ||||
7117 | case Intrinsic::usub_sat: { | ||||
7118 | const SCEV *X = getSCEV(II->getArgOperand(0)); | ||||
7119 | const SCEV *Y = getSCEV(II->getArgOperand(1)); | ||||
7120 | const SCEV *ClampedY = getUMinExpr(X, Y); | ||||
7121 | return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); | ||||
7122 | } | ||||
7123 | case Intrinsic::uadd_sat: { | ||||
7124 | const SCEV *X = getSCEV(II->getArgOperand(0)); | ||||
7125 | const SCEV *Y = getSCEV(II->getArgOperand(1)); | ||||
7126 | const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); | ||||
7127 | return getAddExpr(ClampedX, Y, SCEV::FlagNUW); | ||||
7128 | } | ||||
7129 | case Intrinsic::start_loop_iterations: | ||||
7130 | // A start_loop_iterations is just equivalent to the first operand for | ||||
7131 | // SCEV purposes. | ||||
7132 | return getSCEV(II->getArgOperand(0)); | ||||
7133 | default: | ||||
7134 | break; | ||||
7135 | } | ||||
7136 | } | ||||
7137 | break; | ||||
7138 | } | ||||
7139 | |||||
7140 | return getUnknown(V); | ||||
7141 | } | ||||
7142 | |||||
7143 | //===----------------------------------------------------------------------===// | ||||
7144 | // Iteration Count Computation Code | ||||
7145 | // | ||||
7146 | |||||
7147 | const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount) { | ||||
7148 | // Get the trip count from the BE count by adding 1. Overflow, results | ||||
7149 | // in zero which means "unknown". | ||||
7150 | return getAddExpr(ExitCount, getOne(ExitCount->getType())); | ||||
7151 | } | ||||
7152 | |||||
7153 | static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { | ||||
7154 | if (!ExitCount) | ||||
7155 | return 0; | ||||
7156 | |||||
7157 | ConstantInt *ExitConst = ExitCount->getValue(); | ||||
7158 | |||||
7159 | // Guard against huge trip counts. | ||||
7160 | if (ExitConst->getValue().getActiveBits() > 32) | ||||
7161 | return 0; | ||||
7162 | |||||
7163 | // In case of integer overflow, this returns 0, which is correct. | ||||
7164 | return ((unsigned)ExitConst->getZExtValue()) + 1; | ||||
7165 | } | ||||
7166 | |||||
7167 | unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { | ||||
7168 | auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact)); | ||||
7169 | return getConstantTripCount(ExitCount); | ||||
7170 | } | ||||
7171 | |||||
7172 | unsigned | ||||
7173 | ScalarEvolution::getSmallConstantTripCount(const Loop *L, | ||||
7174 | const BasicBlock *ExitingBlock) { | ||||
7175 | assert(ExitingBlock && "Must pass a non-null exiting block!")((void)0); | ||||
7176 | assert(L->isLoopExiting(ExitingBlock) &&((void)0) | ||||
7177 | "Exiting block must actually branch out of the loop!")((void)0); | ||||
7178 | const SCEVConstant *ExitCount = | ||||
7179 | dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); | ||||
7180 | return getConstantTripCount(ExitCount); | ||||
7181 | } | ||||
7182 | |||||
7183 | unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { | ||||
7184 | const auto *MaxExitCount = | ||||
7185 | dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); | ||||
7186 | return getConstantTripCount(MaxExitCount); | ||||
7187 | } | ||||
7188 | |||||
7189 | unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { | ||||
7190 | SmallVector<BasicBlock *, 8> ExitingBlocks; | ||||
7191 | L->getExitingBlocks(ExitingBlocks); | ||||
7192 | |||||
7193 | Optional<unsigned> Res = None; | ||||
7194 | for (auto *ExitingBB : ExitingBlocks) { | ||||
7195 | unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB); | ||||
7196 | if (!Res) | ||||
7197 | Res = Multiple; | ||||
7198 | Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple); | ||||
7199 | } | ||||
7200 | return Res.getValueOr(1); | ||||
7201 | } | ||||
7202 | |||||
7203 | unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, | ||||
7204 | const SCEV *ExitCount) { | ||||
7205 | if (ExitCount == getCouldNotCompute()) | ||||
7206 | return 1; | ||||
7207 | |||||
7208 | // Get the trip count | ||||
7209 | const SCEV *TCExpr = getTripCountFromExitCount(ExitCount); | ||||
7210 | |||||
7211 | const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); | ||||
7212 | if (!TC) | ||||
7213 | // Attempt to factor more general cases. Returns the greatest power of | ||||
7214 | // two divisor. If overflow happens, the trip count expression is still | ||||
7215 | // divisible by the greatest power of 2 divisor returned. | ||||
7216 | return 1U << std::min((uint32_t)31, | ||||
7217 | GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); | ||||
7218 | |||||
7219 | ConstantInt *Result = TC->getValue(); | ||||
7220 | |||||
7221 | // Guard against huge trip counts (this requires checking | ||||
7222 | // for zero to handle the case where the trip count == -1 and the | ||||
7223 | // addition wraps). | ||||
7224 | if (!Result || Result->getValue().getActiveBits() > 32 || | ||||
7225 | Result->getValue().getActiveBits() == 0) | ||||
7226 | return 1; | ||||
7227 | |||||
7228 | return (unsigned)Result->getZExtValue(); | ||||
7229 | } | ||||
7230 | |||||
7231 | /// Returns the largest constant divisor of the trip count of this loop as a | ||||
7232 | /// normal unsigned value, if possible. This means that the actual trip count is | ||||
7233 | /// always a multiple of the returned value (don't forget the trip count could | ||||
7234 | /// very well be zero as well!). | ||||
7235 | /// | ||||
7236 | /// Returns 1 if the trip count is unknown or not guaranteed to be the | ||||
7237 | /// multiple of a constant (which is also the case if the trip count is simply | ||||
7238 | /// constant, use getSmallConstantTripCount for that case), Will also return 1 | ||||
7239 | /// if the trip count is very large (>= 2^32). | ||||
7240 | /// | ||||
7241 | /// As explained in the comments for getSmallConstantTripCount, this assumes | ||||
7242 | /// that control exits the loop via ExitingBlock. | ||||
7243 | unsigned | ||||
7244 | ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, | ||||
7245 | const BasicBlock *ExitingBlock) { | ||||
7246 | assert(ExitingBlock && "Must pass a non-null exiting block!")((void)0); | ||||
7247 | assert(L->isLoopExiting(ExitingBlock) &&((void)0) | ||||
7248 | "Exiting block must actually branch out of the loop!")((void)0); | ||||
7249 | const SCEV *ExitCount = getExitCount(L, ExitingBlock); | ||||
7250 | return getSmallConstantTripMultiple(L, ExitCount); | ||||
7251 | } | ||||
7252 | |||||
7253 | const SCEV *ScalarEvolution::getExitCount(const Loop *L, | ||||
7254 | const BasicBlock *ExitingBlock, | ||||
7255 | ExitCountKind Kind) { | ||||
7256 | switch (Kind) { | ||||
7257 | case Exact: | ||||
7258 | case SymbolicMaximum: | ||||
7259 | return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); | ||||
7260 | case ConstantMaximum: | ||||
7261 | return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); | ||||
7262 | }; | ||||
7263 | llvm_unreachable("Invalid ExitCountKind!")__builtin_unreachable(); | ||||
7264 | } | ||||
7265 | |||||
7266 | const SCEV * | ||||
7267 | ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, | ||||
7268 | SCEVUnionPredicate &Preds) { | ||||
7269 | return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); | ||||
7270 | } | ||||
7271 | |||||
7272 | const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, | ||||
7273 | ExitCountKind Kind) { | ||||
7274 | switch (Kind) { | ||||
7275 | case Exact: | ||||
7276 | return getBackedgeTakenInfo(L).getExact(L, this); | ||||
7277 | case ConstantMaximum: | ||||
7278 | return getBackedgeTakenInfo(L).getConstantMax(this); | ||||
7279 | case SymbolicMaximum: | ||||
7280 | return getBackedgeTakenInfo(L).getSymbolicMax(L, this); | ||||
7281 | }; | ||||
7282 | llvm_unreachable("Invalid ExitCountKind!")__builtin_unreachable(); | ||||
7283 | } | ||||
7284 | |||||
7285 | bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { | ||||
7286 | return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); | ||||
7287 | } | ||||
7288 | |||||
7289 | /// Push PHI nodes in the header of the given loop onto the given Worklist. | ||||
7290 | static void | ||||
7291 | PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { | ||||
7292 | BasicBlock *Header = L->getHeader(); | ||||
7293 | |||||
7294 | // Push all Loop-header PHIs onto the Worklist stack. | ||||
7295 | for (PHINode &PN : Header->phis()) | ||||
7296 | Worklist.push_back(&PN); | ||||
7297 | } | ||||
7298 | |||||
7299 | const ScalarEvolution::BackedgeTakenInfo & | ||||
7300 | ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { | ||||
7301 | auto &BTI = getBackedgeTakenInfo(L); | ||||
7302 | if (BTI.hasFullInfo()) | ||||
7303 | return BTI; | ||||
7304 | |||||
7305 | auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); | ||||
7306 | |||||
7307 | if (!Pair.second) | ||||
7308 | return Pair.first->second; | ||||
7309 | |||||
7310 | BackedgeTakenInfo Result = | ||||
7311 | computeBackedgeTakenCount(L, /*AllowPredicates=*/true); | ||||
7312 | |||||
7313 | return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); | ||||
7314 | } | ||||
7315 | |||||
7316 | ScalarEvolution::BackedgeTakenInfo & | ||||
7317 | ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { | ||||
7318 | // Initially insert an invalid entry for this loop. If the insertion | ||||
7319 | // succeeds, proceed to actually compute a backedge-taken count and | ||||
7320 | // update the value. The temporary CouldNotCompute value tells SCEV | ||||
7321 | // code elsewhere that it shouldn't attempt to request a new | ||||
7322 | // backedge-taken count, which could result in infinite recursion. | ||||
7323 | std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = | ||||
7324 | BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); | ||||
7325 | if (!Pair.second) | ||||
7326 | return Pair.first->second; | ||||
7327 | |||||
7328 | // computeBackedgeTakenCount may allocate memory for its result. Inserting it | ||||
7329 | // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result | ||||
7330 | // must be cleared in this scope. | ||||
7331 | BackedgeTakenInfo Result = computeBackedgeTakenCount(L); | ||||
7332 | |||||
7333 | // In product build, there are no usage of statistic. | ||||
7334 | (void)NumTripCountsComputed; | ||||
7335 | (void)NumTripCountsNotComputed; | ||||
7336 | #if LLVM_ENABLE_STATS0 || !defined(NDEBUG1) | ||||
7337 | const SCEV *BEExact = Result.getExact(L, this); | ||||
7338 | if (BEExact != getCouldNotCompute()) { | ||||
7339 | assert(isLoopInvariant(BEExact, L) &&((void)0) | ||||
7340 | isLoopInvariant(Result.getConstantMax(this), L) &&((void)0) | ||||
7341 | "Computed backedge-taken count isn't loop invariant for loop!")((void)0); | ||||
7342 | ++NumTripCountsComputed; | ||||
7343 | } else if (Result.getConstantMax(this) == getCouldNotCompute() && | ||||
7344 | isa<PHINode>(L->getHeader()->begin())) { | ||||
7345 | // Only count loops that have phi nodes as not being computable. | ||||
7346 | ++NumTripCountsNotComputed; | ||||
7347 | } | ||||
7348 | #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) | ||||
7349 | |||||
7350 | // Now that we know more about the trip count for this loop, forget any | ||||
7351 | // existing SCEV values for PHI nodes in this loop since they are only | ||||
7352 | // conservative estimates made without the benefit of trip count | ||||
7353 | // information. This is similar to the code in forgetLoop, except that | ||||
7354 | // it handles SCEVUnknown PHI nodes specially. | ||||
7355 | if (Result.hasAnyInfo()) { | ||||
7356 | SmallVector<Instruction *, 16> Worklist; | ||||
7357 | PushLoopPHIs(L, Worklist); | ||||
7358 | |||||
7359 | SmallPtrSet<Instruction *, 8> Discovered; | ||||
7360 | while (!Worklist.empty()) { | ||||
7361 | Instruction *I = Worklist.pop_back_val(); | ||||
7362 | |||||
7363 | ValueExprMapType::iterator It = | ||||
7364 | ValueExprMap.find_as(static_cast<Value *>(I)); | ||||
7365 | if (It != ValueExprMap.end()) { | ||||
7366 | const SCEV *Old = It->second; | ||||
7367 | |||||
7368 | // SCEVUnknown for a PHI either means that it has an unrecognized | ||||
7369 | // structure, or it's a PHI that's in the progress of being computed | ||||
7370 | // by createNodeForPHI. In the former case, additional loop trip | ||||
7371 | // count information isn't going to change anything. In the later | ||||
7372 | // case, createNodeForPHI will perform the necessary updates on its | ||||
7373 | // own when it gets to that point. | ||||
7374 | if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { | ||||
7375 | eraseValueFromMap(It->first); | ||||
7376 | forgetMemoizedResults(Old); | ||||
7377 | } | ||||
7378 | if (PHINode *PN = dyn_cast<PHINode>(I)) | ||||
7379 | ConstantEvolutionLoopExitValue.erase(PN); | ||||
7380 | } | ||||
7381 | |||||
7382 | // Since we don't need to invalidate anything for correctness and we're | ||||
7383 | // only invalidating to make SCEV's results more precise, we get to stop | ||||
7384 | // early to avoid invalidating too much. This is especially important in | ||||
7385 | // cases like: | ||||
7386 | // | ||||
7387 | // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node | ||||
7388 | // loop0: | ||||
7389 | // %pn0 = phi | ||||
7390 | // ... | ||||
7391 | // loop1: | ||||
7392 | // %pn1 = phi | ||||
7393 | // ... | ||||
7394 | // | ||||
7395 | // where both loop0 and loop1's backedge taken count uses the SCEV | ||||
7396 | // expression for %v. If we don't have the early stop below then in cases | ||||
7397 | // like the above, getBackedgeTakenInfo(loop1) will clear out the trip | ||||
7398 | // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip | ||||
7399 | // count for loop1, effectively nullifying SCEV's trip count cache. | ||||
7400 | for (auto *U : I->users()) | ||||
7401 | if (auto *I = dyn_cast<Instruction>(U)) { | ||||
7402 | auto *LoopForUser = LI.getLoopFor(I->getParent()); | ||||
7403 | if (LoopForUser && L->contains(LoopForUser) && | ||||
7404 | Discovered.insert(I).second) | ||||
7405 | Worklist.push_back(I); | ||||
7406 | } | ||||
7407 | } | ||||
7408 | } | ||||
7409 | |||||
7410 | // Re-lookup the insert position, since the call to | ||||
7411 | // computeBackedgeTakenCount above could result in a | ||||
7412 | // recusive call to getBackedgeTakenInfo (on a different | ||||
7413 | // loop), which would invalidate the iterator computed | ||||
7414 | // earlier. | ||||
7415 | return BackedgeTakenCounts.find(L)->second = std::move(Result); | ||||
7416 | } | ||||
7417 | |||||
7418 | void ScalarEvolution::forgetAllLoops() { | ||||
7419 | // This method is intended to forget all info about loops. It should | ||||
7420 | // invalidate caches as if the following happened: | ||||
7421 | // - The trip counts of all loops have changed arbitrarily | ||||
7422 | // - Every llvm::Value has been updated in place to produce a different | ||||
7423 | // result. | ||||
7424 | BackedgeTakenCounts.clear(); | ||||
7425 | PredicatedBackedgeTakenCounts.clear(); | ||||
7426 | LoopPropertiesCache.clear(); | ||||
7427 | ConstantEvolutionLoopExitValue.clear(); | ||||
7428 | ValueExprMap.clear(); | ||||
7429 | ValuesAtScopes.clear(); | ||||
7430 | LoopDispositions.clear(); | ||||
7431 | BlockDispositions.clear(); | ||||
7432 | UnsignedRanges.clear(); | ||||
7433 | SignedRanges.clear(); | ||||
7434 | ExprValueMap.clear(); | ||||
7435 | HasRecMap.clear(); | ||||
7436 | MinTrailingZerosCache.clear(); | ||||
7437 | PredicatedSCEVRewrites.clear(); | ||||
7438 | } | ||||
7439 | |||||
7440 | void ScalarEvolution::forgetLoop(const Loop *L) { | ||||
7441 | SmallVector<const Loop *, 16> LoopWorklist(1, L); | ||||
7442 | SmallVector<Instruction *, 32> Worklist; | ||||
7443 | SmallPtrSet<Instruction *, 16> Visited; | ||||
7444 | |||||
7445 | // Iterate over all the loops and sub-loops to drop SCEV information. | ||||
7446 | while (!LoopWorklist.empty()) { | ||||
7447 | auto *CurrL = LoopWorklist.pop_back_val(); | ||||
7448 | |||||
7449 | // Drop any stored trip count value. | ||||
7450 | BackedgeTakenCounts.erase(CurrL); | ||||
7451 | PredicatedBackedgeTakenCounts.erase(CurrL); | ||||
7452 | |||||
7453 | // Drop information about predicated SCEV rewrites for this loop. | ||||
7454 | for (auto I = PredicatedSCEVRewrites.begin(); | ||||
7455 | I != PredicatedSCEVRewrites.end();) { | ||||
7456 | std::pair<const SCEV *, const Loop *> Entry = I->first; | ||||
7457 | if (Entry.second == CurrL) | ||||
7458 | PredicatedSCEVRewrites.erase(I++); | ||||
7459 | else | ||||
7460 | ++I; | ||||
7461 | } | ||||
7462 | |||||
7463 | auto LoopUsersItr = LoopUsers.find(CurrL); | ||||
7464 | if (LoopUsersItr != LoopUsers.end()) { | ||||
7465 | for (auto *S : LoopUsersItr->second) | ||||
7466 | forgetMemoizedResults(S); | ||||
7467 | LoopUsers.erase(LoopUsersItr); | ||||
7468 | } | ||||
7469 | |||||
7470 | // Drop information about expressions based on loop-header PHIs. | ||||
7471 | PushLoopPHIs(CurrL, Worklist); | ||||
7472 | |||||
7473 | while (!Worklist.empty()) { | ||||
7474 | Instruction *I = Worklist.pop_back_val(); | ||||
7475 | if (!Visited.insert(I).second) | ||||
7476 | continue; | ||||
7477 | |||||
7478 | ValueExprMapType::iterator It = | ||||
7479 | ValueExprMap.find_as(static_cast<Value *>(I)); | ||||
7480 | if (It != ValueExprMap.end()) { | ||||
7481 | eraseValueFromMap(It->first); | ||||
7482 | forgetMemoizedResults(It->second); | ||||
7483 | if (PHINode *PN = dyn_cast<PHINode>(I)) | ||||
7484 | ConstantEvolutionLoopExitValue.erase(PN); | ||||
7485 | } | ||||
7486 | |||||
7487 | PushDefUseChildren(I, Worklist); | ||||
7488 | } | ||||
7489 | |||||
7490 | LoopPropertiesCache.erase(CurrL); | ||||
7491 | // Forget all contained loops too, to avoid dangling entries in the | ||||
7492 | // ValuesAtScopes map. | ||||
7493 | LoopWorklist.append(CurrL->begin(), CurrL->end()); | ||||
7494 | } | ||||
7495 | } | ||||
7496 | |||||
7497 | void ScalarEvolution::forgetTopmostLoop(const Loop *L) { | ||||
7498 | while (Loop *Parent = L->getParentLoop()) | ||||
7499 | L = Parent; | ||||
7500 | forgetLoop(L); | ||||
7501 | } | ||||
7502 | |||||
7503 | void ScalarEvolution::forgetValue(Value *V) { | ||||
7504 | Instruction *I = dyn_cast<Instruction>(V); | ||||
7505 | if (!I) return; | ||||
7506 | |||||
7507 | // Drop information about expressions based on loop-header PHIs. | ||||
7508 | SmallVector<Instruction *, 16> Worklist; | ||||
7509 | Worklist.push_back(I); | ||||
7510 | |||||
7511 | SmallPtrSet<Instruction *, 8> Visited; | ||||
7512 | while (!Worklist.empty()) { | ||||
7513 | I = Worklist.pop_back_val(); | ||||
7514 | if (!Visited.insert(I).second) | ||||
7515 | continue; | ||||
7516 | |||||
7517 | ValueExprMapType::iterator It = | ||||
7518 | ValueExprMap.find_as(static_cast<Value *>(I)); | ||||
7519 | if (It != ValueExprMap.end()) { | ||||
7520 | eraseValueFromMap(It->first); | ||||
7521 | forgetMemoizedResults(It->second); | ||||
7522 | if (PHINode *PN = dyn_cast<PHINode>(I)) | ||||
7523 | ConstantEvolutionLoopExitValue.erase(PN); | ||||
7524 | } | ||||
7525 | |||||
7526 | PushDefUseChildren(I, Worklist); | ||||
7527 | } | ||||
7528 | } | ||||
7529 | |||||
7530 | void ScalarEvolution::forgetLoopDispositions(const Loop *L) { | ||||
7531 | LoopDispositions.clear(); | ||||
7532 | } | ||||
7533 | |||||
7534 | /// Get the exact loop backedge taken count considering all loop exits. A | ||||
7535 | /// computable result can only be returned for loops with all exiting blocks | ||||
7536 | /// dominating the latch. howFarToZero assumes that the limit of each loop test | ||||
7537 | /// is never skipped. This is a valid assumption as long as the loop exits via | ||||
7538 | /// that test. For precise results, it is the caller's responsibility to specify | ||||
7539 | /// the relevant loop exiting block using getExact(ExitingBlock, SE). | ||||
7540 | const SCEV * | ||||
7541 | ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, | ||||
7542 | SCEVUnionPredicate *Preds) const { | ||||
7543 | // If any exits were not computable, the loop is not computable. | ||||
7544 | if (!isComplete() || ExitNotTaken.empty()) | ||||
7545 | return SE->getCouldNotCompute(); | ||||
7546 | |||||
7547 | const BasicBlock *Latch = L->getLoopLatch(); | ||||
7548 | // All exiting blocks we have collected must dominate the only backedge. | ||||
7549 | if (!Latch) | ||||
7550 | return SE->getCouldNotCompute(); | ||||
7551 | |||||
7552 | // All exiting blocks we have gathered dominate loop's latch, so exact trip | ||||
7553 | // count is simply a minimum out of all these calculated exit counts. | ||||
7554 | SmallVector<const SCEV *, 2> Ops; | ||||
7555 | for (auto &ENT : ExitNotTaken) { | ||||
7556 | const SCEV *BECount = ENT.ExactNotTaken; | ||||
7557 | assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!")((void)0); | ||||
7558 | assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&((void)0) | ||||
7559 | "We should only have known counts for exiting blocks that dominate "((void)0) | ||||
7560 | "latch!")((void)0); | ||||
7561 | |||||
7562 | Ops.push_back(BECount); | ||||
7563 | |||||
7564 | if (Preds && !ENT.hasAlwaysTruePredicate()) | ||||
7565 | Preds->add(ENT.Predicate.get()); | ||||
7566 | |||||
7567 | assert((Preds || ENT.hasAlwaysTruePredicate()) &&((void)0) | ||||
7568 | "Predicate should be always true!")((void)0); | ||||
7569 | } | ||||
7570 | |||||
7571 | return SE->getUMinFromMismatchedTypes(Ops); | ||||
7572 | } | ||||
7573 | |||||
7574 | /// Get the exact not taken count for this loop exit. | ||||
7575 | const SCEV * | ||||
7576 | ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, | ||||
7577 | ScalarEvolution *SE) const { | ||||
7578 | for (auto &ENT : ExitNotTaken) | ||||
7579 | if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) | ||||
7580 | return ENT.ExactNotTaken; | ||||
7581 | |||||
7582 | return SE->getCouldNotCompute(); | ||||
7583 | } | ||||
7584 | |||||
7585 | const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( | ||||
7586 | const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { | ||||
7587 | for (auto &ENT : ExitNotTaken) | ||||
7588 | if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) | ||||
7589 | return ENT.MaxNotTaken; | ||||
7590 | |||||
7591 | return SE->getCouldNotCompute(); | ||||
7592 | } | ||||
7593 | |||||
7594 | /// getConstantMax - Get the constant max backedge taken count for the loop. | ||||
7595 | const SCEV * | ||||
7596 | ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { | ||||
7597 | auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { | ||||
7598 | return !ENT.hasAlwaysTruePredicate(); | ||||
7599 | }; | ||||
7600 | |||||
7601 | if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax()) | ||||
7602 | return SE->getCouldNotCompute(); | ||||
7603 | |||||
7604 | assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||((void)0) | ||||
7605 | isa<SCEVConstant>(getConstantMax())) &&((void)0) | ||||
7606 | "No point in having a non-constant max backedge taken count!")((void)0); | ||||
7607 | return getConstantMax(); | ||||
7608 | } | ||||
7609 | |||||
7610 | const SCEV * | ||||
7611 | ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, | ||||
7612 | ScalarEvolution *SE) { | ||||
7613 | if (!SymbolicMax) | ||||
7614 | SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); | ||||
7615 | return SymbolicMax; | ||||
7616 | } | ||||
7617 | |||||
7618 | bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( | ||||
7619 | ScalarEvolution *SE) const { | ||||
7620 | auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { | ||||
7621 | return !ENT.hasAlwaysTruePredicate(); | ||||
7622 | }; | ||||
7623 | return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); | ||||
7624 | } | ||||
7625 | |||||
7626 | bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S) const { | ||||
7627 | return Operands.contains(S); | ||||
7628 | } | ||||
7629 | |||||
7630 | ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) | ||||
7631 | : ExitLimit(E, E, false, None) { | ||||
7632 | } | ||||
7633 | |||||
7634 | ScalarEvolution::ExitLimit::ExitLimit( | ||||
7635 | const SCEV *E, const SCEV *M, bool MaxOrZero, | ||||
7636 | ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) | ||||
7637 | : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { | ||||
7638 | assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||((void)0) | ||||
7639 | !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&((void)0) | ||||
7640 | "Exact is not allowed to be less precise than Max")((void)0); | ||||
7641 | assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||((void)0) | ||||
7642 | isa<SCEVConstant>(MaxNotTaken)) &&((void)0) | ||||
7643 | "No point in having a non-constant max backedge taken count!")((void)0); | ||||
7644 | for (auto *PredSet : PredSetList) | ||||
7645 | for (auto *P : *PredSet) | ||||
7646 | addPredicate(P); | ||||
7647 | assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) &&((void)0) | ||||
7648 | "Backedge count should be int")((void)0); | ||||
7649 | assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) &&((void)0) | ||||
7650 | "Max backedge count should be int")((void)0); | ||||
7651 | } | ||||
7652 | |||||
7653 | ScalarEvolution::ExitLimit::ExitLimit( | ||||
7654 | const SCEV *E, const SCEV *M, bool MaxOrZero, | ||||
7655 | const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) | ||||
7656 | : ExitLimit(E, M, MaxOrZero, {&PredSet}) { | ||||
7657 | } | ||||
7658 | |||||
7659 | ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, | ||||
7660 | bool MaxOrZero) | ||||
7661 | : ExitLimit(E, M, MaxOrZero, None) { | ||||
7662 | } | ||||
7663 | |||||
7664 | class SCEVRecordOperands { | ||||
7665 | SmallPtrSetImpl<const SCEV *> &Operands; | ||||
7666 | |||||
7667 | public: | ||||
7668 | SCEVRecordOperands(SmallPtrSetImpl<const SCEV *> &Operands) | ||||
7669 | : Operands(Operands) {} | ||||
7670 | bool follow(const SCEV *S) { | ||||
7671 | Operands.insert(S); | ||||
7672 | return true; | ||||
7673 | } | ||||
7674 | bool isDone() { return false; } | ||||
7675 | }; | ||||
7676 | |||||
7677 | /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each | ||||
7678 | /// computable exit into a persistent ExitNotTakenInfo array. | ||||
7679 | ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( | ||||
7680 | ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, | ||||
7681 | bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) | ||||
7682 | : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { | ||||
7683 | using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; | ||||
7684 | |||||
7685 | ExitNotTaken.reserve(ExitCounts.size()); | ||||
7686 | std::transform( | ||||
7687 | ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), | ||||
7688 | [&](const EdgeExitInfo &EEI) { | ||||
7689 | BasicBlock *ExitBB = EEI.first; | ||||
7690 | const ExitLimit &EL = EEI.second; | ||||
7691 | if (EL.Predicates.empty()) | ||||
7692 | return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, | ||||
7693 | nullptr); | ||||
7694 | |||||
7695 | std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); | ||||
7696 | for (auto *Pred : EL.Predicates) | ||||
7697 | Predicate->add(Pred); | ||||
7698 | |||||
7699 | return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, | ||||
7700 | std::move(Predicate)); | ||||
7701 | }); | ||||
7702 | assert((isa<SCEVCouldNotCompute>(ConstantMax) ||((void)0) | ||||
7703 | isa<SCEVConstant>(ConstantMax)) &&((void)0) | ||||
7704 | "No point in having a non-constant max backedge taken count!")((void)0); | ||||
7705 | |||||
7706 | SCEVRecordOperands RecordOperands(Operands); | ||||
7707 | SCEVTraversal<SCEVRecordOperands> ST(RecordOperands); | ||||
7708 | if (!isa<SCEVCouldNotCompute>(ConstantMax)) | ||||
7709 | ST.visitAll(ConstantMax); | ||||
7710 | for (auto &ENT : ExitNotTaken) | ||||
7711 | if (!isa<SCEVCouldNotCompute>(ENT.ExactNotTaken)) | ||||
7712 | ST.visitAll(ENT.ExactNotTaken); | ||||
7713 | } | ||||
7714 | |||||
7715 | /// Compute the number of times the backedge of the specified loop will execute. | ||||
7716 | ScalarEvolution::BackedgeTakenInfo | ||||
7717 | ScalarEvolution::computeBackedgeTakenCount(const Loop *L, | ||||
7718 | bool AllowPredicates) { | ||||
7719 | SmallVector<BasicBlock *, 8> ExitingBlocks; | ||||
7720 | L->getExitingBlocks(ExitingBlocks); | ||||
7721 | |||||
7722 | using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; | ||||
7723 | |||||
7724 | SmallVector<EdgeExitInfo, 4> ExitCounts; | ||||
7725 | bool CouldComputeBECount = true; | ||||
7726 | BasicBlock *Latch = L->getLoopLatch(); // may be NULL. | ||||
7727 | const SCEV *MustExitMaxBECount = nullptr; | ||||
7728 | const SCEV *MayExitMaxBECount = nullptr; | ||||
7729 | bool MustExitMaxOrZero = false; | ||||
7730 | |||||
7731 | // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts | ||||
7732 | // and compute maxBECount. | ||||
7733 | // Do a union of all the predicates here. | ||||
7734 | for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { | ||||
7735 | BasicBlock *ExitBB = ExitingBlocks[i]; | ||||
7736 | |||||
7737 | // We canonicalize untaken exits to br (constant), ignore them so that | ||||
7738 | // proving an exit untaken doesn't negatively impact our ability to reason | ||||
7739 | // about the loop as whole. | ||||
7740 | if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) | ||||
7741 | if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { | ||||
7742 | bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); | ||||
7743 | if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) | ||||
7744 | continue; | ||||
7745 | } | ||||
7746 | |||||
7747 | ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); | ||||
7748 | |||||
7749 | assert((AllowPredicates || EL.Predicates.empty()) &&((void)0) | ||||
7750 | "Predicated exit limit when predicates are not allowed!")((void)0); | ||||
7751 | |||||
7752 | // 1. For each exit that can be computed, add an entry to ExitCounts. | ||||
7753 | // CouldComputeBECount is true only if all exits can be computed. | ||||
7754 | if (EL.ExactNotTaken == getCouldNotCompute()) | ||||
7755 | // We couldn't compute an exact value for this exit, so | ||||
7756 | // we won't be able to compute an exact value for the loop. | ||||
7757 | CouldComputeBECount = false; | ||||
7758 | else | ||||
7759 | ExitCounts.emplace_back(ExitBB, EL); | ||||
7760 | |||||
7761 | // 2. Derive the loop's MaxBECount from each exit's max number of | ||||
7762 | // non-exiting iterations. Partition the loop exits into two kinds: | ||||
7763 | // LoopMustExits and LoopMayExits. | ||||
7764 | // | ||||
7765 | // If the exit dominates the loop latch, it is a LoopMustExit otherwise it | ||||
7766 | // is a LoopMayExit. If any computable LoopMustExit is found, then | ||||
7767 | // MaxBECount is the minimum EL.MaxNotTaken of computable | ||||
7768 | // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum | ||||
7769 | // EL.MaxNotTaken, where CouldNotCompute is considered greater than any | ||||
7770 | // computable EL.MaxNotTaken. | ||||
7771 | if (EL.MaxNotTaken != getCouldNotCompute() && Latch && | ||||
7772 | DT.dominates(ExitBB, Latch)) { | ||||
7773 | if (!MustExitMaxBECount) { | ||||
7774 | MustExitMaxBECount = EL.MaxNotTaken; | ||||
7775 | MustExitMaxOrZero = EL.MaxOrZero; | ||||
7776 | } else { | ||||
7777 | MustExitMaxBECount = | ||||
7778 | getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); | ||||
7779 | } | ||||
7780 | } else if (MayExitMaxBECount != getCouldNotCompute()) { | ||||
7781 | if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) | ||||
7782 | MayExitMaxBECount = EL.MaxNotTaken; | ||||
7783 | else { | ||||
7784 | MayExitMaxBECount = | ||||
7785 | getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); | ||||
7786 | } | ||||
7787 | } | ||||
7788 | } | ||||
7789 | const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : | ||||
7790 | (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); | ||||
7791 | // The loop backedge will be taken the maximum or zero times if there's | ||||
7792 | // a single exit that must be taken the maximum or zero times. | ||||
7793 | bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); | ||||
7794 | return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, | ||||
7795 | MaxBECount, MaxOrZero); | ||||
7796 | } | ||||
7797 | |||||
7798 | ScalarEvolution::ExitLimit | ||||
7799 | ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, | ||||
7800 | bool AllowPredicates) { | ||||
7801 | assert(L->contains(ExitingBlock) && "Exit count for non-loop block?")((void)0); | ||||
7802 | // If our exiting block does not dominate the latch, then its connection with | ||||
7803 | // loop's exit limit may be far from trivial. | ||||
7804 | const BasicBlock *Latch = L->getLoopLatch(); | ||||
7805 | if (!Latch || !DT.dominates(ExitingBlock, Latch)) | ||||
7806 | return getCouldNotCompute(); | ||||
7807 | |||||
7808 | bool IsOnlyExit = (L->getExitingBlock() != nullptr); | ||||
7809 | Instruction *Term = ExitingBlock->getTerminator(); | ||||
7810 | if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { | ||||
7811 | assert(BI->isConditional() && "If unconditional, it can't be in loop!")((void)0); | ||||
7812 | bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); | ||||
7813 | assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) &&((void)0) | ||||
7814 | "It should have one successor in loop and one exit block!")((void)0); | ||||
7815 | // Proceed to the next level to examine the exit condition expression. | ||||
7816 | return computeExitLimitFromCond( | ||||
7817 | L, BI->getCondition(), ExitIfTrue, | ||||
7818 | /*ControlsExit=*/IsOnlyExit, AllowPredicates); | ||||
7819 | } | ||||
7820 | |||||
7821 | if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { | ||||
7822 | // For switch, make sure that there is a single exit from the loop. | ||||
7823 | BasicBlock *Exit = nullptr; | ||||
7824 | for (auto *SBB : successors(ExitingBlock)) | ||||
7825 | if (!L->contains(SBB)) { | ||||
7826 | if (Exit) // Multiple exit successors. | ||||
7827 | return getCouldNotCompute(); | ||||
7828 | Exit = SBB; | ||||
7829 | } | ||||
7830 | assert(Exit && "Exiting block must have at least one exit")((void)0); | ||||
7831 | return computeExitLimitFromSingleExitSwitch(L, SI, Exit, | ||||
7832 | /*ControlsExit=*/IsOnlyExit); | ||||
7833 | } | ||||
7834 | |||||
7835 | return getCouldNotCompute(); | ||||
7836 | } | ||||
7837 | |||||
7838 | ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( | ||||
7839 | const Loop *L, Value *ExitCond, bool ExitIfTrue, | ||||
7840 | bool ControlsExit, bool AllowPredicates) { | ||||
7841 | ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); | ||||
7842 | return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, | ||||
7843 | ControlsExit, AllowPredicates); | ||||
7844 | } | ||||
7845 | |||||
7846 | Optional<ScalarEvolution::ExitLimit> | ||||
7847 | ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, | ||||
7848 | bool ExitIfTrue, bool ControlsExit, | ||||
7849 | bool AllowPredicates) { | ||||
7850 | (void)this->L; | ||||
7851 | (void)this->ExitIfTrue; | ||||
7852 | (void)this->AllowPredicates; | ||||
7853 | |||||
7854 | assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&((void)0) | ||||
7855 | this->AllowPredicates == AllowPredicates &&((void)0) | ||||
7856 | "Variance in assumed invariant key components!")((void)0); | ||||
7857 | auto Itr = TripCountMap.find({ExitCond, ControlsExit}); | ||||
7858 | if (Itr == TripCountMap.end()) | ||||
7859 | return None; | ||||
7860 | return Itr->second; | ||||
7861 | } | ||||
7862 | |||||
7863 | void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, | ||||
7864 | bool ExitIfTrue, | ||||
7865 | bool ControlsExit, | ||||
7866 | bool AllowPredicates, | ||||
7867 | const ExitLimit &EL) { | ||||
7868 | assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&((void)0) | ||||
7869 | this->AllowPredicates == AllowPredicates &&((void)0) | ||||
7870 | "Variance in assumed invariant key components!")((void)0); | ||||
7871 | |||||
7872 | auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); | ||||
7873 | assert(InsertResult.second && "Expected successful insertion!")((void)0); | ||||
7874 | (void)InsertResult; | ||||
7875 | (void)ExitIfTrue; | ||||
7876 | } | ||||
7877 | |||||
7878 | ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( | ||||
7879 | ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, | ||||
7880 | bool ControlsExit, bool AllowPredicates) { | ||||
7881 | |||||
7882 | if (auto MaybeEL = | ||||
7883 | Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) | ||||
7884 | return *MaybeEL; | ||||
7885 | |||||
7886 | ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, | ||||
7887 | ControlsExit, AllowPredicates); | ||||
7888 | Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); | ||||
7889 | return EL; | ||||
7890 | } | ||||
7891 | |||||
7892 | ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( | ||||
7893 | ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, | ||||
7894 | bool ControlsExit, bool AllowPredicates) { | ||||
7895 | // Handle BinOp conditions (And, Or). | ||||
7896 | if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( | ||||
7897 | Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) | ||||
7898 | return *LimitFromBinOp; | ||||
7899 | |||||
7900 | // With an icmp, it may be feasible to compute an exact backedge-taken count. | ||||
7901 | // Proceed to the next level to examine the icmp. | ||||
7902 | if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { | ||||
7903 | ExitLimit EL = | ||||
7904 | computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); | ||||
7905 | if (EL.hasFullInfo() || !AllowPredicates) | ||||
7906 | return EL; | ||||
7907 | |||||
7908 | // Try again, but use SCEV predicates this time. | ||||
7909 | return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, | ||||
7910 | /*AllowPredicates=*/true); | ||||
7911 | } | ||||
7912 | |||||
7913 | // Check for a constant condition. These are normally stripped out by | ||||
7914 | // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to | ||||
7915 | // preserve the CFG and is temporarily leaving constant conditions | ||||
7916 | // in place. | ||||
7917 | if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { | ||||
7918 | if (ExitIfTrue == !CI->getZExtValue()) | ||||
7919 | // The backedge is always taken. | ||||
7920 | return getCouldNotCompute(); | ||||
7921 | else | ||||
7922 | // The backedge is never taken. | ||||
7923 | return getZero(CI->getType()); | ||||
7924 | } | ||||
7925 | |||||
7926 | // If it's not an integer or pointer comparison then compute it the hard way. | ||||
7927 | return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); | ||||
7928 | } | ||||
7929 | |||||
7930 | Optional<ScalarEvolution::ExitLimit> | ||||
7931 | ScalarEvolution::computeExitLimitFromCondFromBinOp( | ||||
7932 | ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, | ||||
7933 | bool ControlsExit, bool AllowPredicates) { | ||||
7934 | // Check if the controlling expression for this loop is an And or Or. | ||||
7935 | Value *Op0, *Op1; | ||||
7936 | bool IsAnd = false; | ||||
7937 | if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) | ||||
7938 | IsAnd = true; | ||||
7939 | else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) | ||||
7940 | IsAnd = false; | ||||
7941 | else | ||||
7942 | return None; | ||||
7943 | |||||
7944 | // EitherMayExit is true in these two cases: | ||||
7945 | // br (and Op0 Op1), loop, exit | ||||
7946 | // br (or Op0 Op1), exit, loop | ||||
7947 | bool EitherMayExit = IsAnd ^ ExitIfTrue; | ||||
7948 | ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, | ||||
7949 | ControlsExit && !EitherMayExit, | ||||
7950 | AllowPredicates); | ||||
7951 | ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, | ||||
7952 | ControlsExit && !EitherMayExit, | ||||
7953 | AllowPredicates); | ||||
7954 | |||||
7955 | // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" | ||||
7956 | const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); | ||||
7957 | if (isa<ConstantInt>(Op1)) | ||||
7958 | return Op1 == NeutralElement ? EL0 : EL1; | ||||
7959 | if (isa<ConstantInt>(Op0)) | ||||
7960 | return Op0 == NeutralElement ? EL1 : EL0; | ||||
7961 | |||||
7962 | const SCEV *BECount = getCouldNotCompute(); | ||||
7963 | const SCEV *MaxBECount = getCouldNotCompute(); | ||||
7964 | if (EitherMayExit) { | ||||
7965 | // Both conditions must be same for the loop to continue executing. | ||||
7966 | // Choose the less conservative count. | ||||
7967 | // If ExitCond is a short-circuit form (select), using | ||||
7968 | // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general. | ||||
7969 | // To see the detailed examples, please see | ||||
7970 | // test/Analysis/ScalarEvolution/exit-count-select.ll | ||||
7971 | bool PoisonSafe = isa<BinaryOperator>(ExitCond); | ||||
7972 | if (!PoisonSafe) | ||||
7973 | // Even if ExitCond is select, we can safely derive BECount using both | ||||
7974 | // EL0 and EL1 in these cases: | ||||
7975 | // (1) EL0.ExactNotTaken is non-zero | ||||
7976 | // (2) EL1.ExactNotTaken is non-poison | ||||
7977 | // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and | ||||
7978 | // it cannot be umin(0, ..)) | ||||
7979 | // The PoisonSafe assignment below is simplified and the assertion after | ||||
7980 | // BECount calculation fully guarantees the condition (3). | ||||
7981 | PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) || | ||||
7982 | isa<SCEVConstant>(EL1.ExactNotTaken); | ||||
7983 | if (EL0.ExactNotTaken != getCouldNotCompute() && | ||||
7984 | EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) { | ||||
7985 | BECount = | ||||
7986 | getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); | ||||
7987 | |||||
7988 | // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form, | ||||
7989 | // it should have been simplified to zero (see the condition (3) above) | ||||
7990 | assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() ||((void)0) | ||||
7991 | BECount->isZero())((void)0); | ||||
7992 | } | ||||
7993 | if (EL0.MaxNotTaken == getCouldNotCompute()) | ||||
7994 | MaxBECount = EL1.MaxNotTaken; | ||||
7995 | else if (EL1.MaxNotTaken == getCouldNotCompute()) | ||||
7996 | MaxBECount = EL0.MaxNotTaken; | ||||
7997 | else | ||||
7998 | MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); | ||||
7999 | } else { | ||||
8000 | // Both conditions must be same at the same time for the loop to exit. | ||||
8001 | // For now, be conservative. | ||||
8002 | if (EL0.ExactNotTaken == EL1.ExactNotTaken) | ||||
8003 | BECount = EL0.ExactNotTaken; | ||||
8004 | } | ||||
8005 | |||||
8006 | // There are cases (e.g. PR26207) where computeExitLimitFromCond is able | ||||
8007 | // to be more aggressive when computing BECount than when computing | ||||
8008 | // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and | ||||
8009 | // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken | ||||
8010 | // to not. | ||||
8011 | if (isa<SCEVCouldNotCompute>(MaxBECount) && | ||||
8012 | !isa<SCEVCouldNotCompute>(BECount)) | ||||
8013 | MaxBECount = getConstant(getUnsignedRangeMax(BECount)); | ||||
8014 | |||||
8015 | return ExitLimit(BECount, MaxBECount, false, | ||||
8016 | { &EL0.Predicates, &EL1.Predicates }); | ||||
8017 | } | ||||
8018 | |||||
8019 | ScalarEvolution::ExitLimit | ||||
8020 | ScalarEvolution::computeExitLimitFromICmp(const Loop *L, | ||||
8021 | ICmpInst *ExitCond, | ||||
8022 | bool ExitIfTrue, | ||||
8023 | bool ControlsExit, | ||||
8024 | bool AllowPredicates) { | ||||
8025 | // If the condition was exit on true, convert the condition to exit on false | ||||
8026 | ICmpInst::Predicate Pred; | ||||
8027 | if (!ExitIfTrue) | ||||
8028 | Pred = ExitCond->getPredicate(); | ||||
8029 | else | ||||
8030 | Pred = ExitCond->getInversePredicate(); | ||||
8031 | const ICmpInst::Predicate OriginalPred = Pred; | ||||
8032 | |||||
8033 | // Handle common loops like: for (X = "string"; *X; ++X) | ||||
8034 | if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) | ||||
8035 | if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { | ||||
8036 | ExitLimit ItCnt = | ||||
8037 | computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); | ||||
8038 | if (ItCnt.hasAnyInfo()) | ||||
8039 | return ItCnt; | ||||
8040 | } | ||||
8041 | |||||
8042 | const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); | ||||
8043 | const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); | ||||
8044 | |||||
8045 | // Try to evaluate any dependencies out of the loop. | ||||
8046 | LHS = getSCEVAtScope(LHS, L); | ||||
8047 | RHS = getSCEVAtScope(RHS, L); | ||||
8048 | |||||
8049 | // At this point, we would like to compute how many iterations of the | ||||
8050 | // loop the predicate will return true for these inputs. | ||||
8051 | if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { | ||||
8052 | // If there is a loop-invariant, force it into the RHS. | ||||
8053 | std::swap(LHS, RHS); | ||||
8054 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
8055 | } | ||||
8056 | |||||
8057 | // Simplify the operands before analyzing them. | ||||
8058 | (void)SimplifyICmpOperands(Pred, LHS, RHS); | ||||
8059 | |||||
8060 | // If we have a comparison of a chrec against a constant, try to use value | ||||
8061 | // ranges to answer this query. | ||||
8062 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) | ||||
8063 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) | ||||
8064 | if (AddRec->getLoop() == L) { | ||||
8065 | // Form the constant range. | ||||
8066 | ConstantRange CompRange = | ||||
8067 | ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); | ||||
8068 | |||||
8069 | const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); | ||||
8070 | if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; | ||||
8071 | } | ||||
8072 | |||||
8073 | switch (Pred) { | ||||
8074 | case ICmpInst::ICMP_NE: { // while (X != Y) | ||||
8075 | // Convert to: while (X-Y != 0) | ||||
8076 | if (LHS->getType()->isPointerTy()) { | ||||
8077 | LHS = getLosslessPtrToIntExpr(LHS); | ||||
8078 | if (isa<SCEVCouldNotCompute>(LHS)) | ||||
8079 | return LHS; | ||||
8080 | } | ||||
8081 | if (RHS->getType()->isPointerTy()) { | ||||
8082 | RHS = getLosslessPtrToIntExpr(RHS); | ||||
8083 | if (isa<SCEVCouldNotCompute>(RHS)) | ||||
8084 | return RHS; | ||||
8085 | } | ||||
8086 | ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, | ||||
8087 | AllowPredicates); | ||||
8088 | if (EL.hasAnyInfo()) return EL; | ||||
8089 | break; | ||||
8090 | } | ||||
8091 | case ICmpInst::ICMP_EQ: { // while (X == Y) | ||||
8092 | // Convert to: while (X-Y == 0) | ||||
8093 | if (LHS->getType()->isPointerTy()) { | ||||
8094 | LHS = getLosslessPtrToIntExpr(LHS); | ||||
8095 | if (isa<SCEVCouldNotCompute>(LHS)) | ||||
8096 | return LHS; | ||||
8097 | } | ||||
8098 | if (RHS->getType()->isPointerTy()) { | ||||
8099 | RHS = getLosslessPtrToIntExpr(RHS); | ||||
8100 | if (isa<SCEVCouldNotCompute>(RHS)) | ||||
8101 | return RHS; | ||||
8102 | } | ||||
8103 | ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); | ||||
8104 | if (EL.hasAnyInfo()) return EL; | ||||
8105 | break; | ||||
8106 | } | ||||
8107 | case ICmpInst::ICMP_SLT: | ||||
8108 | case ICmpInst::ICMP_ULT: { // while (X < Y) | ||||
8109 | bool IsSigned = Pred == ICmpInst::ICMP_SLT; | ||||
8110 | ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, | ||||
8111 | AllowPredicates); | ||||
8112 | if (EL.hasAnyInfo()) return EL; | ||||
8113 | break; | ||||
8114 | } | ||||
8115 | case ICmpInst::ICMP_SGT: | ||||
8116 | case ICmpInst::ICMP_UGT: { // while (X > Y) | ||||
8117 | bool IsSigned = Pred == ICmpInst::ICMP_SGT; | ||||
8118 | ExitLimit EL = | ||||
8119 | howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, | ||||
8120 | AllowPredicates); | ||||
8121 | if (EL.hasAnyInfo()) return EL; | ||||
8122 | break; | ||||
8123 | } | ||||
8124 | default: | ||||
8125 | break; | ||||
8126 | } | ||||
8127 | |||||
8128 | auto *ExhaustiveCount = | ||||
8129 | computeExitCountExhaustively(L, ExitCond, ExitIfTrue); | ||||
8130 | |||||
8131 | if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) | ||||
8132 | return ExhaustiveCount; | ||||
8133 | |||||
8134 | return computeShiftCompareExitLimit(ExitCond->getOperand(0), | ||||
8135 | ExitCond->getOperand(1), L, OriginalPred); | ||||
8136 | } | ||||
8137 | |||||
8138 | ScalarEvolution::ExitLimit | ||||
8139 | ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, | ||||
8140 | SwitchInst *Switch, | ||||
8141 | BasicBlock *ExitingBlock, | ||||
8142 | bool ControlsExit) { | ||||
8143 | assert(!L->contains(ExitingBlock) && "Not an exiting block!")((void)0); | ||||
8144 | |||||
8145 | // Give up if the exit is the default dest of a switch. | ||||
8146 | if (Switch->getDefaultDest() == ExitingBlock) | ||||
8147 | return getCouldNotCompute(); | ||||
8148 | |||||
8149 | assert(L->contains(Switch->getDefaultDest()) &&((void)0) | ||||
8150 | "Default case must not exit the loop!")((void)0); | ||||
8151 | const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); | ||||
8152 | const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); | ||||
8153 | |||||
8154 | // while (X != Y) --> while (X-Y != 0) | ||||
8155 | ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); | ||||
8156 | if (EL.hasAnyInfo()) | ||||
8157 | return EL; | ||||
8158 | |||||
8159 | return getCouldNotCompute(); | ||||
8160 | } | ||||
8161 | |||||
8162 | static ConstantInt * | ||||
8163 | EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, | ||||
8164 | ScalarEvolution &SE) { | ||||
8165 | const SCEV *InVal = SE.getConstant(C); | ||||
8166 | const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); | ||||
8167 | assert(isa<SCEVConstant>(Val) &&((void)0) | ||||
8168 | "Evaluation of SCEV at constant didn't fold correctly?")((void)0); | ||||
8169 | return cast<SCEVConstant>(Val)->getValue(); | ||||
8170 | } | ||||
8171 | |||||
8172 | /// Given an exit condition of 'icmp op load X, cst', try to see if we can | ||||
8173 | /// compute the backedge execution count. | ||||
8174 | ScalarEvolution::ExitLimit | ||||
8175 | ScalarEvolution::computeLoadConstantCompareExitLimit( | ||||
8176 | LoadInst *LI, | ||||
8177 | Constant *RHS, | ||||
8178 | const Loop *L, | ||||
8179 | ICmpInst::Predicate predicate) { | ||||
8180 | if (LI->isVolatile()) return getCouldNotCompute(); | ||||
8181 | |||||
8182 | // Check to see if the loaded pointer is a getelementptr of a global. | ||||
8183 | // TODO: Use SCEV instead of manually grubbing with GEPs. | ||||
8184 | GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); | ||||
8185 | if (!GEP) return getCouldNotCompute(); | ||||
8186 | |||||
8187 | // Make sure that it is really a constant global we are gepping, with an | ||||
8188 | // initializer, and make sure the first IDX is really 0. | ||||
8189 | GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); | ||||
8190 | if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || | ||||
8191 | GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || | ||||
8192 | !cast<Constant>(GEP->getOperand(1))->isNullValue()) | ||||
8193 | return getCouldNotCompute(); | ||||
8194 | |||||
8195 | // Okay, we allow one non-constant index into the GEP instruction. | ||||
8196 | Value *VarIdx = nullptr; | ||||
8197 | std::vector<Constant*> Indexes; | ||||
8198 | unsigned VarIdxNum = 0; | ||||
8199 | for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) | ||||
8200 | if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { | ||||
8201 | Indexes.push_back(CI); | ||||
8202 | } else if (!isa<ConstantInt>(GEP->getOperand(i))) { | ||||
8203 | if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. | ||||
8204 | VarIdx = GEP->getOperand(i); | ||||
8205 | VarIdxNum = i-2; | ||||
8206 | Indexes.push_back(nullptr); | ||||
8207 | } | ||||
8208 | |||||
8209 | // Loop-invariant loads may be a byproduct of loop optimization. Skip them. | ||||
8210 | if (!VarIdx) | ||||
8211 | return getCouldNotCompute(); | ||||
8212 | |||||
8213 | // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. | ||||
8214 | // Check to see if X is a loop variant variable value now. | ||||
8215 | const SCEV *Idx = getSCEV(VarIdx); | ||||
8216 | Idx = getSCEVAtScope(Idx, L); | ||||
8217 | |||||
8218 | // We can only recognize very limited forms of loop index expressions, in | ||||
8219 | // particular, only affine AddRec's like {C1,+,C2}<L>. | ||||
8220 | const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); | ||||
8221 | if (!IdxExpr || IdxExpr->getLoop() != L || !IdxExpr->isAffine() || | ||||
8222 | isLoopInvariant(IdxExpr, L) || | ||||
8223 | !isa<SCEVConstant>(IdxExpr->getOperand(0)) || | ||||
8224 | !isa<SCEVConstant>(IdxExpr->getOperand(1))) | ||||
8225 | return getCouldNotCompute(); | ||||
8226 | |||||
8227 | unsigned MaxSteps = MaxBruteForceIterations; | ||||
8228 | for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { | ||||
8229 | ConstantInt *ItCst = ConstantInt::get( | ||||
8230 | cast<IntegerType>(IdxExpr->getType()), IterationNum); | ||||
8231 | ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); | ||||
8232 | |||||
8233 | // Form the GEP offset. | ||||
8234 | Indexes[VarIdxNum] = Val; | ||||
8235 | |||||
8236 | Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), | ||||
8237 | Indexes); | ||||
8238 | if (!Result) break; // Cannot compute! | ||||
8239 | |||||
8240 | // Evaluate the condition for this iteration. | ||||
8241 | Result = ConstantExpr::getICmp(predicate, Result, RHS); | ||||
8242 | if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure | ||||
8243 | if (cast<ConstantInt>(Result)->getValue().isMinValue()) { | ||||
8244 | ++NumArrayLenItCounts; | ||||
8245 | return getConstant(ItCst); // Found terminating iteration! | ||||
8246 | } | ||||
8247 | } | ||||
8248 | return getCouldNotCompute(); | ||||
8249 | } | ||||
8250 | |||||
8251 | ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( | ||||
8252 | Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { | ||||
8253 | ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); | ||||
8254 | if (!RHS) | ||||
8255 | return getCouldNotCompute(); | ||||
8256 | |||||
8257 | const BasicBlock *Latch = L->getLoopLatch(); | ||||
8258 | if (!Latch) | ||||
8259 | return getCouldNotCompute(); | ||||
8260 | |||||
8261 | const BasicBlock *Predecessor = L->getLoopPredecessor(); | ||||
8262 | if (!Predecessor) | ||||
8263 | return getCouldNotCompute(); | ||||
8264 | |||||
8265 | // Return true if V is of the form "LHS `shift_op` <positive constant>". | ||||
8266 | // Return LHS in OutLHS and shift_opt in OutOpCode. | ||||
8267 | auto MatchPositiveShift = | ||||
8268 | [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { | ||||
8269 | |||||
8270 | using namespace PatternMatch; | ||||
8271 | |||||
8272 | ConstantInt *ShiftAmt; | ||||
8273 | if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) | ||||
8274 | OutOpCode = Instruction::LShr; | ||||
8275 | else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) | ||||
8276 | OutOpCode = Instruction::AShr; | ||||
8277 | else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) | ||||
8278 | OutOpCode = Instruction::Shl; | ||||
8279 | else | ||||
8280 | return false; | ||||
8281 | |||||
8282 | return ShiftAmt->getValue().isStrictlyPositive(); | ||||
8283 | }; | ||||
8284 | |||||
8285 | // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in | ||||
8286 | // | ||||
8287 | // loop: | ||||
8288 | // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] | ||||
8289 | // %iv.shifted = lshr i32 %iv, <positive constant> | ||||
8290 | // | ||||
8291 | // Return true on a successful match. Return the corresponding PHI node (%iv | ||||
8292 | // above) in PNOut and the opcode of the shift operation in OpCodeOut. | ||||
8293 | auto MatchShiftRecurrence = | ||||
8294 | [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { | ||||
8295 | Optional<Instruction::BinaryOps> PostShiftOpCode; | ||||
8296 | |||||
8297 | { | ||||
8298 | Instruction::BinaryOps OpC; | ||||
8299 | Value *V; | ||||
8300 | |||||
8301 | // If we encounter a shift instruction, "peel off" the shift operation, | ||||
8302 | // and remember that we did so. Later when we inspect %iv's backedge | ||||
8303 | // value, we will make sure that the backedge value uses the same | ||||
8304 | // operation. | ||||
8305 | // | ||||
8306 | // Note: the peeled shift operation does not have to be the same | ||||
8307 | // instruction as the one feeding into the PHI's backedge value. We only | ||||
8308 | // really care about it being the same *kind* of shift instruction -- | ||||
8309 | // that's all that is required for our later inferences to hold. | ||||
8310 | if (MatchPositiveShift(LHS, V, OpC)) { | ||||
8311 | PostShiftOpCode = OpC; | ||||
8312 | LHS = V; | ||||
8313 | } | ||||
8314 | } | ||||
8315 | |||||
8316 | PNOut = dyn_cast<PHINode>(LHS); | ||||
8317 | if (!PNOut || PNOut->getParent() != L->getHeader()) | ||||
8318 | return false; | ||||
8319 | |||||
8320 | Value *BEValue = PNOut->getIncomingValueForBlock(Latch); | ||||
8321 | Value *OpLHS; | ||||
8322 | |||||
8323 | return | ||||
8324 | // The backedge value for the PHI node must be a shift by a positive | ||||
8325 | // amount | ||||
8326 | MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && | ||||
8327 | |||||
8328 | // of the PHI node itself | ||||
8329 | OpLHS == PNOut && | ||||
8330 | |||||
8331 | // and the kind of shift should be match the kind of shift we peeled | ||||
8332 | // off, if any. | ||||
8333 | (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); | ||||
8334 | }; | ||||
8335 | |||||
8336 | PHINode *PN; | ||||
8337 | Instruction::BinaryOps OpCode; | ||||
8338 | if (!MatchShiftRecurrence(LHS, PN, OpCode)) | ||||
8339 | return getCouldNotCompute(); | ||||
8340 | |||||
8341 | const DataLayout &DL = getDataLayout(); | ||||
8342 | |||||
8343 | // The key rationale for this optimization is that for some kinds of shift | ||||
8344 | // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 | ||||
8345 | // within a finite number of iterations. If the condition guarding the | ||||
8346 | // backedge (in the sense that the backedge is taken if the condition is true) | ||||
8347 | // is false for the value the shift recurrence stabilizes to, then we know | ||||
8348 | // that the backedge is taken only a finite number of times. | ||||
8349 | |||||
8350 | ConstantInt *StableValue = nullptr; | ||||
8351 | switch (OpCode) { | ||||
8352 | default: | ||||
8353 | llvm_unreachable("Impossible case!")__builtin_unreachable(); | ||||
8354 | |||||
8355 | case Instruction::AShr: { | ||||
8356 | // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most | ||||
8357 | // bitwidth(K) iterations. | ||||
8358 | Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); | ||||
8359 | KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, | ||||
8360 | Predecessor->getTerminator(), &DT); | ||||
8361 | auto *Ty = cast<IntegerType>(RHS->getType()); | ||||
8362 | if (Known.isNonNegative()) | ||||
8363 | StableValue = ConstantInt::get(Ty, 0); | ||||
8364 | else if (Known.isNegative()) | ||||
8365 | StableValue = ConstantInt::get(Ty, -1, true); | ||||
8366 | else | ||||
8367 | return getCouldNotCompute(); | ||||
8368 | |||||
8369 | break; | ||||
8370 | } | ||||
8371 | case Instruction::LShr: | ||||
8372 | case Instruction::Shl: | ||||
8373 | // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} | ||||
8374 | // stabilize to 0 in at most bitwidth(K) iterations. | ||||
8375 | StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); | ||||
8376 | break; | ||||
8377 | } | ||||
8378 | |||||
8379 | auto *Result = | ||||
8380 | ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); | ||||
8381 | assert(Result->getType()->isIntegerTy(1) &&((void)0) | ||||
8382 | "Otherwise cannot be an operand to a branch instruction")((void)0); | ||||
8383 | |||||
8384 | if (Result->isZeroValue()) { | ||||
8385 | unsigned BitWidth = getTypeSizeInBits(RHS->getType()); | ||||
8386 | const SCEV *UpperBound = | ||||
8387 | getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); | ||||
8388 | return ExitLimit(getCouldNotCompute(), UpperBound, false); | ||||
8389 | } | ||||
8390 | |||||
8391 | return getCouldNotCompute(); | ||||
8392 | } | ||||
8393 | |||||
8394 | /// Return true if we can constant fold an instruction of the specified type, | ||||
8395 | /// assuming that all operands were constants. | ||||
8396 | static bool CanConstantFold(const Instruction *I) { | ||||
8397 | if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || | ||||
8398 | isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || | ||||
8399 | isa<LoadInst>(I) || isa<ExtractValueInst>(I)) | ||||
8400 | return true; | ||||
8401 | |||||
8402 | if (const CallInst *CI = dyn_cast<CallInst>(I)) | ||||
8403 | if (const Function *F = CI->getCalledFunction()) | ||||
8404 | return canConstantFoldCallTo(CI, F); | ||||
8405 | return false; | ||||
8406 | } | ||||
8407 | |||||
8408 | /// Determine whether this instruction can constant evolve within this loop | ||||
8409 | /// assuming its operands can all constant evolve. | ||||
8410 | static bool canConstantEvolve(Instruction *I, const Loop *L) { | ||||
8411 | // An instruction outside of the loop can't be derived from a loop PHI. | ||||
8412 | if (!L->contains(I)) return false; | ||||
8413 | |||||
8414 | if (isa<PHINode>(I)) { | ||||
8415 | // We don't currently keep track of the control flow needed to evaluate | ||||
8416 | // PHIs, so we cannot handle PHIs inside of loops. | ||||
8417 | return L->getHeader() == I->getParent(); | ||||
8418 | } | ||||
8419 | |||||
8420 | // If we won't be able to constant fold this expression even if the operands | ||||
8421 | // are constants, bail early. | ||||
8422 | return CanConstantFold(I); | ||||
8423 | } | ||||
8424 | |||||
8425 | /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by | ||||
8426 | /// recursing through each instruction operand until reaching a loop header phi. | ||||
8427 | static PHINode * | ||||
8428 | getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, | ||||
8429 | DenseMap<Instruction *, PHINode *> &PHIMap, | ||||
8430 | unsigned Depth) { | ||||
8431 | if (Depth > MaxConstantEvolvingDepth) | ||||
8432 | return nullptr; | ||||
8433 | |||||
8434 | // Otherwise, we can evaluate this instruction if all of its operands are | ||||
8435 | // constant or derived from a PHI node themselves. | ||||
8436 | PHINode *PHI = nullptr; | ||||
8437 | for (Value *Op : UseInst->operands()) { | ||||
8438 | if (isa<Constant>(Op)) continue; | ||||
8439 | |||||
8440 | Instruction *OpInst = dyn_cast<Instruction>(Op); | ||||
8441 | if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; | ||||
8442 | |||||
8443 | PHINode *P = dyn_cast<PHINode>(OpInst); | ||||
8444 | if (!P) | ||||
8445 | // If this operand is already visited, reuse the prior result. | ||||
8446 | // We may have P != PHI if this is the deepest point at which the | ||||
8447 | // inconsistent paths meet. | ||||
8448 | P = PHIMap.lookup(OpInst); | ||||
8449 | if (!P) { | ||||
8450 | // Recurse and memoize the results, whether a phi is found or not. | ||||
8451 | // This recursive call invalidates pointers into PHIMap. | ||||
8452 | P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); | ||||
8453 | PHIMap[OpInst] = P; | ||||
8454 | } | ||||
8455 | if (!P) | ||||
8456 | return nullptr; // Not evolving from PHI | ||||
8457 | if (PHI && PHI != P) | ||||
8458 | return nullptr; // Evolving from multiple different PHIs. | ||||
8459 | PHI = P; | ||||
8460 | } | ||||
8461 | // This is a expression evolving from a constant PHI! | ||||
8462 | return PHI; | ||||
8463 | } | ||||
8464 | |||||
8465 | /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node | ||||
8466 | /// in the loop that V is derived from. We allow arbitrary operations along the | ||||
8467 | /// way, but the operands of an operation must either be constants or a value | ||||
8468 | /// derived from a constant PHI. If this expression does not fit with these | ||||
8469 | /// constraints, return null. | ||||
8470 | static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { | ||||
8471 | Instruction *I = dyn_cast<Instruction>(V); | ||||
8472 | if (!I || !canConstantEvolve(I, L)) return nullptr; | ||||
8473 | |||||
8474 | if (PHINode *PN = dyn_cast<PHINode>(I)) | ||||
8475 | return PN; | ||||
8476 | |||||
8477 | // Record non-constant instructions contained by the loop. | ||||
8478 | DenseMap<Instruction *, PHINode *> PHIMap; | ||||
8479 | return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); | ||||
8480 | } | ||||
8481 | |||||
8482 | /// EvaluateExpression - Given an expression that passes the | ||||
8483 | /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node | ||||
8484 | /// in the loop has the value PHIVal. If we can't fold this expression for some | ||||
8485 | /// reason, return null. | ||||
8486 | static Constant *EvaluateExpression(Value *V, const Loop *L, | ||||
8487 | DenseMap<Instruction *, Constant *> &Vals, | ||||
8488 | const DataLayout &DL, | ||||
8489 | const TargetLibraryInfo *TLI) { | ||||
8490 | // Convenient constant check, but redundant for recursive calls. | ||||
8491 | if (Constant *C = dyn_cast<Constant>(V)) return C; | ||||
8492 | Instruction *I = dyn_cast<Instruction>(V); | ||||
8493 | if (!I) return nullptr; | ||||
8494 | |||||
8495 | if (Constant *C = Vals.lookup(I)) return C; | ||||
8496 | |||||
8497 | // An instruction inside the loop depends on a value outside the loop that we | ||||
8498 | // weren't given a mapping for, or a value such as a call inside the loop. | ||||
8499 | if (!canConstantEvolve(I, L)) return nullptr; | ||||
8500 | |||||
8501 | // An unmapped PHI can be due to a branch or another loop inside this loop, | ||||
8502 | // or due to this not being the initial iteration through a loop where we | ||||
8503 | // couldn't compute the evolution of this particular PHI last time. | ||||
8504 | if (isa<PHINode>(I)) return nullptr; | ||||
8505 | |||||
8506 | std::vector<Constant*> Operands(I->getNumOperands()); | ||||
8507 | |||||
8508 | for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { | ||||
8509 | Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); | ||||
8510 | if (!Operand) { | ||||
8511 | Operands[i] = dyn_cast<Constant>(I->getOperand(i)); | ||||
8512 | if (!Operands[i]) return nullptr; | ||||
8513 | continue; | ||||
8514 | } | ||||
8515 | Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); | ||||
8516 | Vals[Operand] = C; | ||||
8517 | if (!C) return nullptr; | ||||
8518 | Operands[i] = C; | ||||
8519 | } | ||||
8520 | |||||
8521 | if (CmpInst *CI = dyn_cast<CmpInst>(I)) | ||||
8522 | return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], | ||||
8523 | Operands[1], DL, TLI); | ||||
8524 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { | ||||
8525 | if (!LI->isVolatile()) | ||||
8526 | return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); | ||||
8527 | } | ||||
8528 | return ConstantFoldInstOperands(I, Operands, DL, TLI); | ||||
8529 | } | ||||
8530 | |||||
8531 | |||||
8532 | // If every incoming value to PN except the one for BB is a specific Constant, | ||||
8533 | // return that, else return nullptr. | ||||
8534 | static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { | ||||
8535 | Constant *IncomingVal = nullptr; | ||||
8536 | |||||
8537 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | ||||
8538 | if (PN->getIncomingBlock(i) == BB) | ||||
8539 | continue; | ||||
8540 | |||||
8541 | auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); | ||||
8542 | if (!CurrentVal) | ||||
8543 | return nullptr; | ||||
8544 | |||||
8545 | if (IncomingVal != CurrentVal) { | ||||
8546 | if (IncomingVal) | ||||
8547 | return nullptr; | ||||
8548 | IncomingVal = CurrentVal; | ||||
8549 | } | ||||
8550 | } | ||||
8551 | |||||
8552 | return IncomingVal; | ||||
8553 | } | ||||
8554 | |||||
8555 | /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is | ||||
8556 | /// in the header of its containing loop, we know the loop executes a | ||||
8557 | /// constant number of times, and the PHI node is just a recurrence | ||||
8558 | /// involving constants, fold it. | ||||
8559 | Constant * | ||||
8560 | ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, | ||||
8561 | const APInt &BEs, | ||||
8562 | const Loop *L) { | ||||
8563 | auto I = ConstantEvolutionLoopExitValue.find(PN); | ||||
8564 | if (I != ConstantEvolutionLoopExitValue.end()) | ||||
8565 | return I->second; | ||||
8566 | |||||
8567 | if (BEs.ugt(MaxBruteForceIterations)) | ||||
8568 | return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. | ||||
8569 | |||||
8570 | Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; | ||||
8571 | |||||
8572 | DenseMap<Instruction *, Constant *> CurrentIterVals; | ||||
8573 | BasicBlock *Header = L->getHeader(); | ||||
8574 | assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!")((void)0); | ||||
8575 | |||||
8576 | BasicBlock *Latch = L->getLoopLatch(); | ||||
8577 | if (!Latch) | ||||
8578 | return nullptr; | ||||
8579 | |||||
8580 | for (PHINode &PHI : Header->phis()) { | ||||
8581 | if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) | ||||
8582 | CurrentIterVals[&PHI] = StartCST; | ||||
8583 | } | ||||
8584 | if (!CurrentIterVals.count(PN)) | ||||
8585 | return RetVal = nullptr; | ||||
8586 | |||||
8587 | Value *BEValue = PN->getIncomingValueForBlock(Latch); | ||||
8588 | |||||
8589 | // Execute the loop symbolically to determine the exit value. | ||||
8590 | assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) &&((void)0) | ||||
8591 | "BEs is <= MaxBruteForceIterations which is an 'unsigned'!")((void)0); | ||||
8592 | |||||
8593 | unsigned NumIterations = BEs.getZExtValue(); // must be in range | ||||
8594 | unsigned IterationNum = 0; | ||||
8595 | const DataLayout &DL = getDataLayout(); | ||||
8596 | for (; ; ++IterationNum) { | ||||
8597 | if (IterationNum == NumIterations) | ||||
8598 | return RetVal = CurrentIterVals[PN]; // Got exit value! | ||||
8599 | |||||
8600 | // Compute the value of the PHIs for the next iteration. | ||||
8601 | // EvaluateExpression adds non-phi values to the CurrentIterVals map. | ||||
8602 | DenseMap<Instruction *, Constant *> NextIterVals; | ||||
8603 | Constant *NextPHI = | ||||
8604 | EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); | ||||
8605 | if (!NextPHI) | ||||
8606 | return nullptr; // Couldn't evaluate! | ||||
8607 | NextIterVals[PN] = NextPHI; | ||||
8608 | |||||
8609 | bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; | ||||
8610 | |||||
8611 | // Also evaluate the other PHI nodes. However, we don't get to stop if we | ||||
8612 | // cease to be able to evaluate one of them or if they stop evolving, | ||||
8613 | // because that doesn't necessarily prevent us from computing PN. | ||||
8614 | SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; | ||||
8615 | for (const auto &I : CurrentIterVals) { | ||||
8616 | PHINode *PHI = dyn_cast<PHINode>(I.first); | ||||
8617 | if (!PHI || PHI == PN || PHI->getParent() != Header) continue; | ||||
8618 | PHIsToCompute.emplace_back(PHI, I.second); | ||||
8619 | } | ||||
8620 | // We use two distinct loops because EvaluateExpression may invalidate any | ||||
8621 | // iterators into CurrentIterVals. | ||||
8622 | for (const auto &I : PHIsToCompute) { | ||||
8623 | PHINode *PHI = I.first; | ||||
8624 | Constant *&NextPHI = NextIterVals[PHI]; | ||||
8625 | if (!NextPHI) { // Not already computed. | ||||
8626 | Value *BEValue = PHI->getIncomingValueForBlock(Latch); | ||||
8627 | NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); | ||||
8628 | } | ||||
8629 | if (NextPHI != I.second) | ||||
8630 | StoppedEvolving = false; | ||||
8631 | } | ||||
8632 | |||||
8633 | // If all entries in CurrentIterVals == NextIterVals then we can stop | ||||
8634 | // iterating, the loop can't continue to change. | ||||
8635 | if (StoppedEvolving) | ||||
8636 | return RetVal = CurrentIterVals[PN]; | ||||
8637 | |||||
8638 | CurrentIterVals.swap(NextIterVals); | ||||
8639 | } | ||||
8640 | } | ||||
8641 | |||||
8642 | const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, | ||||
8643 | Value *Cond, | ||||
8644 | bool ExitWhen) { | ||||
8645 | PHINode *PN = getConstantEvolvingPHI(Cond, L); | ||||
8646 | if (!PN) return getCouldNotCompute(); | ||||
8647 | |||||
8648 | // If the loop is canonicalized, the PHI will have exactly two entries. | ||||
8649 | // That's the only form we support here. | ||||
8650 | if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); | ||||
8651 | |||||
8652 | DenseMap<Instruction *, Constant *> CurrentIterVals; | ||||
8653 | BasicBlock *Header = L->getHeader(); | ||||
8654 | assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!")((void)0); | ||||
8655 | |||||
8656 | BasicBlock *Latch = L->getLoopLatch(); | ||||
8657 | assert(Latch && "Should follow from NumIncomingValues == 2!")((void)0); | ||||
8658 | |||||
8659 | for (PHINode &PHI : Header->phis()) { | ||||
8660 | if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) | ||||
8661 | CurrentIterVals[&PHI] = StartCST; | ||||
8662 | } | ||||
8663 | if (!CurrentIterVals.count(PN)) | ||||
8664 | return getCouldNotCompute(); | ||||
8665 | |||||
8666 | // Okay, we find a PHI node that defines the trip count of this loop. Execute | ||||
8667 | // the loop symbolically to determine when the condition gets a value of | ||||
8668 | // "ExitWhen". | ||||
8669 | unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. | ||||
8670 | const DataLayout &DL = getDataLayout(); | ||||
8671 | for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ | ||||
8672 | auto *CondVal = dyn_cast_or_null<ConstantInt>( | ||||
8673 | EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); | ||||
8674 | |||||
8675 | // Couldn't symbolically evaluate. | ||||
8676 | if (!CondVal) return getCouldNotCompute(); | ||||
8677 | |||||
8678 | if (CondVal->getValue() == uint64_t(ExitWhen)) { | ||||
8679 | ++NumBruteForceTripCountsComputed; | ||||
8680 | return getConstant(Type::getInt32Ty(getContext()), IterationNum); | ||||
8681 | } | ||||
8682 | |||||
8683 | // Update all the PHI nodes for the next iteration. | ||||
8684 | DenseMap<Instruction *, Constant *> NextIterVals; | ||||
8685 | |||||
8686 | // Create a list of which PHIs we need to compute. We want to do this before | ||||
8687 | // calling EvaluateExpression on them because that may invalidate iterators | ||||
8688 | // into CurrentIterVals. | ||||
8689 | SmallVector<PHINode *, 8> PHIsToCompute; | ||||
8690 | for (const auto &I : CurrentIterVals) { | ||||
8691 | PHINode *PHI = dyn_cast<PHINode>(I.first); | ||||
8692 | if (!PHI || PHI->getParent() != Header) continue; | ||||
8693 | PHIsToCompute.push_back(PHI); | ||||
8694 | } | ||||
8695 | for (PHINode *PHI : PHIsToCompute) { | ||||
8696 | Constant *&NextPHI = NextIterVals[PHI]; | ||||
8697 | if (NextPHI) continue; // Already computed! | ||||
8698 | |||||
8699 | Value *BEValue = PHI->getIncomingValueForBlock(Latch); | ||||
8700 | NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); | ||||
8701 | } | ||||
8702 | CurrentIterVals.swap(NextIterVals); | ||||
8703 | } | ||||
8704 | |||||
8705 | // Too many iterations were needed to evaluate. | ||||
8706 | return getCouldNotCompute(); | ||||
8707 | } | ||||
8708 | |||||
8709 | const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { | ||||
8710 | SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = | ||||
8711 | ValuesAtScopes[V]; | ||||
8712 | // Check to see if we've folded this expression at this loop before. | ||||
8713 | for (auto &LS : Values) | ||||
8714 | if (LS.first == L) | ||||
8715 | return LS.second ? LS.second : V; | ||||
8716 | |||||
8717 | Values.emplace_back(L, nullptr); | ||||
8718 | |||||
8719 | // Otherwise compute it. | ||||
8720 | const SCEV *C = computeSCEVAtScope(V, L); | ||||
8721 | for (auto &LS : reverse(ValuesAtScopes[V])) | ||||
8722 | if (LS.first == L) { | ||||
8723 | LS.second = C; | ||||
8724 | break; | ||||
8725 | } | ||||
8726 | return C; | ||||
8727 | } | ||||
8728 | |||||
8729 | /// This builds up a Constant using the ConstantExpr interface. That way, we | ||||
8730 | /// will return Constants for objects which aren't represented by a | ||||
8731 | /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. | ||||
8732 | /// Returns NULL if the SCEV isn't representable as a Constant. | ||||
8733 | static Constant *BuildConstantFromSCEV(const SCEV *V) { | ||||
8734 | switch (V->getSCEVType()) { | ||||
8735 | case scCouldNotCompute: | ||||
8736 | case scAddRecExpr: | ||||
8737 | return nullptr; | ||||
8738 | case scConstant: | ||||
8739 | return cast<SCEVConstant>(V)->getValue(); | ||||
8740 | case scUnknown: | ||||
8741 | return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); | ||||
8742 | case scSignExtend: { | ||||
8743 | const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); | ||||
8744 | if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) | ||||
8745 | return ConstantExpr::getSExt(CastOp, SS->getType()); | ||||
8746 | return nullptr; | ||||
8747 | } | ||||
8748 | case scZeroExtend: { | ||||
8749 | const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); | ||||
8750 | if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) | ||||
8751 | return ConstantExpr::getZExt(CastOp, SZ->getType()); | ||||
8752 | return nullptr; | ||||
8753 | } | ||||
8754 | case scPtrToInt: { | ||||
8755 | const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); | ||||
8756 | if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) | ||||
8757 | return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); | ||||
8758 | |||||
8759 | return nullptr; | ||||
8760 | } | ||||
8761 | case scTruncate: { | ||||
8762 | const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); | ||||
8763 | if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) | ||||
8764 | return ConstantExpr::getTrunc(CastOp, ST->getType()); | ||||
8765 | return nullptr; | ||||
8766 | } | ||||
8767 | case scAddExpr: { | ||||
8768 | const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); | ||||
8769 | if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { | ||||
8770 | if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { | ||||
8771 | unsigned AS = PTy->getAddressSpace(); | ||||
8772 | Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); | ||||
8773 | C = ConstantExpr::getBitCast(C, DestPtrTy); | ||||
8774 | } | ||||
8775 | for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { | ||||
8776 | Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); | ||||
8777 | if (!C2) | ||||
8778 | return nullptr; | ||||
8779 | |||||
8780 | // First pointer! | ||||
8781 | if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { | ||||
8782 | unsigned AS = C2->getType()->getPointerAddressSpace(); | ||||
8783 | std::swap(C, C2); | ||||
8784 | Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); | ||||
8785 | // The offsets have been converted to bytes. We can add bytes to an | ||||
8786 | // i8* by GEP with the byte count in the first index. | ||||
8787 | C = ConstantExpr::getBitCast(C, DestPtrTy); | ||||
8788 | } | ||||
8789 | |||||
8790 | // Don't bother trying to sum two pointers. We probably can't | ||||
8791 | // statically compute a load that results from it anyway. | ||||
8792 | if (C2->getType()->isPointerTy()) | ||||
8793 | return nullptr; | ||||
8794 | |||||
8795 | if (C->getType()->isPointerTy()) { | ||||
8796 | C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()), | ||||
8797 | C, C2); | ||||
8798 | } else { | ||||
8799 | C = ConstantExpr::getAdd(C, C2); | ||||
8800 | } | ||||
8801 | } | ||||
8802 | return C; | ||||
8803 | } | ||||
8804 | return nullptr; | ||||
8805 | } | ||||
8806 | case scMulExpr: { | ||||
8807 | const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); | ||||
8808 | if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { | ||||
8809 | // Don't bother with pointers at all. | ||||
8810 | if (C->getType()->isPointerTy()) | ||||
8811 | return nullptr; | ||||
8812 | for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { | ||||
8813 | Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); | ||||
8814 | if (!C2 || C2->getType()->isPointerTy()) | ||||
8815 | return nullptr; | ||||
8816 | C = ConstantExpr::getMul(C, C2); | ||||
8817 | } | ||||
8818 | return C; | ||||
8819 | } | ||||
8820 | return nullptr; | ||||
8821 | } | ||||
8822 | case scUDivExpr: { | ||||
8823 | const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); | ||||
8824 | if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) | ||||
8825 | if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) | ||||
8826 | if (LHS->getType() == RHS->getType()) | ||||
8827 | return ConstantExpr::getUDiv(LHS, RHS); | ||||
8828 | return nullptr; | ||||
8829 | } | ||||
8830 | case scSMaxExpr: | ||||
8831 | case scUMaxExpr: | ||||
8832 | case scSMinExpr: | ||||
8833 | case scUMinExpr: | ||||
8834 | return nullptr; // TODO: smax, umax, smin, umax. | ||||
8835 | } | ||||
8836 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | ||||
8837 | } | ||||
8838 | |||||
8839 | const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { | ||||
8840 | if (isa<SCEVConstant>(V)) return V; | ||||
8841 | |||||
8842 | // If this instruction is evolved from a constant-evolving PHI, compute the | ||||
8843 | // exit value from the loop without using SCEVs. | ||||
8844 | if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { | ||||
8845 | if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { | ||||
8846 | if (PHINode *PN = dyn_cast<PHINode>(I)) { | ||||
8847 | const Loop *CurrLoop = this->LI[I->getParent()]; | ||||
8848 | // Looking for loop exit value. | ||||
8849 | if (CurrLoop && CurrLoop->getParentLoop() == L && | ||||
8850 | PN->getParent() == CurrLoop->getHeader()) { | ||||
8851 | // Okay, there is no closed form solution for the PHI node. Check | ||||
8852 | // to see if the loop that contains it has a known backedge-taken | ||||
8853 | // count. If so, we may be able to force computation of the exit | ||||
8854 | // value. | ||||
8855 | const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); | ||||
8856 | // This trivial case can show up in some degenerate cases where | ||||
8857 | // the incoming IR has not yet been fully simplified. | ||||
8858 | if (BackedgeTakenCount->isZero()) { | ||||
8859 | Value *InitValue = nullptr; | ||||
8860 | bool MultipleInitValues = false; | ||||
8861 | for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { | ||||
8862 | if (!CurrLoop->contains(PN->getIncomingBlock(i))) { | ||||
8863 | if (!InitValue) | ||||
8864 | InitValue = PN->getIncomingValue(i); | ||||
8865 | else if (InitValue != PN->getIncomingValue(i)) { | ||||
8866 | MultipleInitValues = true; | ||||
8867 | break; | ||||
8868 | } | ||||
8869 | } | ||||
8870 | } | ||||
8871 | if (!MultipleInitValues && InitValue) | ||||
8872 | return getSCEV(InitValue); | ||||
8873 | } | ||||
8874 | // Do we have a loop invariant value flowing around the backedge | ||||
8875 | // for a loop which must execute the backedge? | ||||
8876 | if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && | ||||
8877 | isKnownPositive(BackedgeTakenCount) && | ||||
8878 | PN->getNumIncomingValues() == 2) { | ||||
8879 | |||||
8880 | unsigned InLoopPred = | ||||
8881 | CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; | ||||
8882 | Value *BackedgeVal = PN->getIncomingValue(InLoopPred); | ||||
8883 | if (CurrLoop->isLoopInvariant(BackedgeVal)) | ||||
8884 | return getSCEV(BackedgeVal); | ||||
8885 | } | ||||
8886 | if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { | ||||
8887 | // Okay, we know how many times the containing loop executes. If | ||||
8888 | // this is a constant evolving PHI node, get the final value at | ||||
8889 | // the specified iteration number. | ||||
8890 | Constant *RV = getConstantEvolutionLoopExitValue( | ||||
8891 | PN, BTCC->getAPInt(), CurrLoop); | ||||
8892 | if (RV) return getSCEV(RV); | ||||
8893 | } | ||||
8894 | } | ||||
8895 | |||||
8896 | // If there is a single-input Phi, evaluate it at our scope. If we can | ||||
8897 | // prove that this replacement does not break LCSSA form, use new value. | ||||
8898 | if (PN->getNumOperands() == 1) { | ||||
8899 | const SCEV *Input = getSCEV(PN->getOperand(0)); | ||||
8900 | const SCEV *InputAtScope = getSCEVAtScope(Input, L); | ||||
8901 | // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, | ||||
8902 | // for the simplest case just support constants. | ||||
8903 | if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; | ||||
8904 | } | ||||
8905 | } | ||||
8906 | |||||
8907 | // Okay, this is an expression that we cannot symbolically evaluate | ||||
8908 | // into a SCEV. Check to see if it's possible to symbolically evaluate | ||||
8909 | // the arguments into constants, and if so, try to constant propagate the | ||||
8910 | // result. This is particularly useful for computing loop exit values. | ||||
8911 | if (CanConstantFold(I)) { | ||||
8912 | SmallVector<Constant *, 4> Operands; | ||||
8913 | bool MadeImprovement = false; | ||||
8914 | for (Value *Op : I->operands()) { | ||||
8915 | if (Constant *C = dyn_cast<Constant>(Op)) { | ||||
8916 | Operands.push_back(C); | ||||
8917 | continue; | ||||
8918 | } | ||||
8919 | |||||
8920 | // If any of the operands is non-constant and if they are | ||||
8921 | // non-integer and non-pointer, don't even try to analyze them | ||||
8922 | // with scev techniques. | ||||
8923 | if (!isSCEVable(Op->getType())) | ||||
8924 | return V; | ||||
8925 | |||||
8926 | const SCEV *OrigV = getSCEV(Op); | ||||
8927 | const SCEV *OpV = getSCEVAtScope(OrigV, L); | ||||
8928 | MadeImprovement |= OrigV != OpV; | ||||
8929 | |||||
8930 | Constant *C = BuildConstantFromSCEV(OpV); | ||||
8931 | if (!C) return V; | ||||
8932 | if (C->getType() != Op->getType()) | ||||
8933 | C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, | ||||
8934 | Op->getType(), | ||||
8935 | false), | ||||
8936 | C, Op->getType()); | ||||
8937 | Operands.push_back(C); | ||||
8938 | } | ||||
8939 | |||||
8940 | // Check to see if getSCEVAtScope actually made an improvement. | ||||
8941 | if (MadeImprovement) { | ||||
8942 | Constant *C = nullptr; | ||||
8943 | const DataLayout &DL = getDataLayout(); | ||||
8944 | if (const CmpInst *CI = dyn_cast<CmpInst>(I)) | ||||
8945 | C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], | ||||
8946 | Operands[1], DL, &TLI); | ||||
8947 | else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { | ||||
8948 | if (!Load->isVolatile()) | ||||
8949 | C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), | ||||
8950 | DL); | ||||
8951 | } else | ||||
8952 | C = ConstantFoldInstOperands(I, Operands, DL, &TLI); | ||||
8953 | if (!C) return V; | ||||
8954 | return getSCEV(C); | ||||
8955 | } | ||||
8956 | } | ||||
8957 | } | ||||
8958 | |||||
8959 | // This is some other type of SCEVUnknown, just return it. | ||||
8960 | return V; | ||||
8961 | } | ||||
8962 | |||||
8963 | if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { | ||||
8964 | // Avoid performing the look-up in the common case where the specified | ||||
8965 | // expression has no loop-variant portions. | ||||
8966 | for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { | ||||
8967 | const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); | ||||
8968 | if (OpAtScope != Comm->getOperand(i)) { | ||||
8969 | // Okay, at least one of these operands is loop variant but might be | ||||
8970 | // foldable. Build a new instance of the folded commutative expression. | ||||
8971 | SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), | ||||
8972 | Comm->op_begin()+i); | ||||
8973 | NewOps.push_back(OpAtScope); | ||||
8974 | |||||
8975 | for (++i; i != e; ++i) { | ||||
8976 | OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); | ||||
8977 | NewOps.push_back(OpAtScope); | ||||
8978 | } | ||||
8979 | if (isa<SCEVAddExpr>(Comm)) | ||||
8980 | return getAddExpr(NewOps, Comm->getNoWrapFlags()); | ||||
8981 | if (isa<SCEVMulExpr>(Comm)) | ||||
8982 | return getMulExpr(NewOps, Comm->getNoWrapFlags()); | ||||
8983 | if (isa<SCEVMinMaxExpr>(Comm)) | ||||
8984 | return getMinMaxExpr(Comm->getSCEVType(), NewOps); | ||||
8985 | llvm_unreachable("Unknown commutative SCEV type!")__builtin_unreachable(); | ||||
8986 | } | ||||
8987 | } | ||||
8988 | // If we got here, all operands are loop invariant. | ||||
8989 | return Comm; | ||||
8990 | } | ||||
8991 | |||||
8992 | if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { | ||||
8993 | const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); | ||||
8994 | const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); | ||||
8995 | if (LHS == Div->getLHS() && RHS == Div->getRHS()) | ||||
8996 | return Div; // must be loop invariant | ||||
8997 | return getUDivExpr(LHS, RHS); | ||||
8998 | } | ||||
8999 | |||||
9000 | // If this is a loop recurrence for a loop that does not contain L, then we | ||||
9001 | // are dealing with the final value computed by the loop. | ||||
9002 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { | ||||
9003 | // First, attempt to evaluate each operand. | ||||
9004 | // Avoid performing the look-up in the common case where the specified | ||||
9005 | // expression has no loop-variant portions. | ||||
9006 | for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { | ||||
9007 | const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); | ||||
9008 | if (OpAtScope == AddRec->getOperand(i)) | ||||
9009 | continue; | ||||
9010 | |||||
9011 | // Okay, at least one of these operands is loop variant but might be | ||||
9012 | // foldable. Build a new instance of the folded commutative expression. | ||||
9013 | SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), | ||||
9014 | AddRec->op_begin()+i); | ||||
9015 | NewOps.push_back(OpAtScope); | ||||
9016 | for (++i; i != e; ++i) | ||||
9017 | NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); | ||||
9018 | |||||
9019 | const SCEV *FoldedRec = | ||||
9020 | getAddRecExpr(NewOps, AddRec->getLoop(), | ||||
9021 | AddRec->getNoWrapFlags(SCEV::FlagNW)); | ||||
9022 | AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); | ||||
9023 | // The addrec may be folded to a nonrecurrence, for example, if the | ||||
9024 | // induction variable is multiplied by zero after constant folding. Go | ||||
9025 | // ahead and return the folded value. | ||||
9026 | if (!AddRec) | ||||
9027 | return FoldedRec; | ||||
9028 | break; | ||||
9029 | } | ||||
9030 | |||||
9031 | // If the scope is outside the addrec's loop, evaluate it by using the | ||||
9032 | // loop exit value of the addrec. | ||||
9033 | if (!AddRec->getLoop()->contains(L)) { | ||||
9034 | // To evaluate this recurrence, we need to know how many times the AddRec | ||||
9035 | // loop iterates. Compute this now. | ||||
9036 | const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); | ||||
9037 | if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; | ||||
9038 | |||||
9039 | // Then, evaluate the AddRec. | ||||
9040 | return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); | ||||
9041 | } | ||||
9042 | |||||
9043 | return AddRec; | ||||
9044 | } | ||||
9045 | |||||
9046 | if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { | ||||
9047 | const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); | ||||
9048 | if (Op == Cast->getOperand()) | ||||
9049 | return Cast; // must be loop invariant | ||||
9050 | return getZeroExtendExpr(Op, Cast->getType()); | ||||
9051 | } | ||||
9052 | |||||
9053 | if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { | ||||
9054 | const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); | ||||
9055 | if (Op == Cast->getOperand()) | ||||
9056 | return Cast; // must be loop invariant | ||||
9057 | return getSignExtendExpr(Op, Cast->getType()); | ||||
9058 | } | ||||
9059 | |||||
9060 | if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { | ||||
9061 | const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); | ||||
9062 | if (Op == Cast->getOperand()) | ||||
9063 | return Cast; // must be loop invariant | ||||
9064 | return getTruncateExpr(Op, Cast->getType()); | ||||
9065 | } | ||||
9066 | |||||
9067 | if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) { | ||||
9068 | const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); | ||||
9069 | if (Op == Cast->getOperand()) | ||||
9070 | return Cast; // must be loop invariant | ||||
9071 | return getPtrToIntExpr(Op, Cast->getType()); | ||||
9072 | } | ||||
9073 | |||||
9074 | llvm_unreachable("Unknown SCEV type!")__builtin_unreachable(); | ||||
9075 | } | ||||
9076 | |||||
9077 | const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { | ||||
9078 | return getSCEVAtScope(getSCEV(V), L); | ||||
9079 | } | ||||
9080 | |||||
9081 | const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { | ||||
9082 | if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) | ||||
9083 | return stripInjectiveFunctions(ZExt->getOperand()); | ||||
9084 | if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) | ||||
9085 | return stripInjectiveFunctions(SExt->getOperand()); | ||||
9086 | return S; | ||||
9087 | } | ||||
9088 | |||||
9089 | /// Finds the minimum unsigned root of the following equation: | ||||
9090 | /// | ||||
9091 | /// A * X = B (mod N) | ||||
9092 | /// | ||||
9093 | /// where N = 2^BW and BW is the common bit width of A and B. The signedness of | ||||
9094 | /// A and B isn't important. | ||||
9095 | /// | ||||
9096 | /// If the equation does not have a solution, SCEVCouldNotCompute is returned. | ||||
9097 | static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, | ||||
9098 | ScalarEvolution &SE) { | ||||
9099 | uint32_t BW = A.getBitWidth(); | ||||
9100 | assert(BW == SE.getTypeSizeInBits(B->getType()))((void)0); | ||||
9101 | assert(A != 0 && "A must be non-zero.")((void)0); | ||||
9102 | |||||
9103 | // 1. D = gcd(A, N) | ||||
9104 | // | ||||
9105 | // The gcd of A and N may have only one prime factor: 2. The number of | ||||
9106 | // trailing zeros in A is its multiplicity | ||||
9107 | uint32_t Mult2 = A.countTrailingZeros(); | ||||
9108 | // D = 2^Mult2 | ||||
9109 | |||||
9110 | // 2. Check if B is divisible by D. | ||||
9111 | // | ||||
9112 | // B is divisible by D if and only if the multiplicity of prime factor 2 for B | ||||
9113 | // is not less than multiplicity of this prime factor for D. | ||||
9114 | if (SE.GetMinTrailingZeros(B) < Mult2) | ||||
9115 | return SE.getCouldNotCompute(); | ||||
9116 | |||||
9117 | // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic | ||||
9118 | // modulo (N / D). | ||||
9119 | // | ||||
9120 | // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent | ||||
9121 | // (N / D) in general. The inverse itself always fits into BW bits, though, | ||||
9122 | // so we immediately truncate it. | ||||
9123 | APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D | ||||
9124 | APInt Mod(BW + 1, 0); | ||||
9125 | Mod.setBit(BW - Mult2); // Mod = N / D | ||||
9126 | APInt I = AD.multiplicativeInverse(Mod).trunc(BW); | ||||
9127 | |||||
9128 | // 4. Compute the minimum unsigned root of the equation: | ||||
9129 | // I * (B / D) mod (N / D) | ||||
9130 | // To simplify the computation, we factor out the divide by D: | ||||
9131 | // (I * B mod N) / D | ||||
9132 | const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); | ||||
9133 | return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); | ||||
9134 | } | ||||
9135 | |||||
9136 | /// For a given quadratic addrec, generate coefficients of the corresponding | ||||
9137 | /// quadratic equation, multiplied by a common value to ensure that they are | ||||
9138 | /// integers. | ||||
9139 | /// The returned value is a tuple { A, B, C, M, BitWidth }, where | ||||
9140 | /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C | ||||
9141 | /// were multiplied by, and BitWidth is the bit width of the original addrec | ||||
9142 | /// coefficients. | ||||
9143 | /// This function returns None if the addrec coefficients are not compile- | ||||
9144 | /// time constants. | ||||
9145 | static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> | ||||
9146 | GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { | ||||
9147 | assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!")((void)0); | ||||
9148 | const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); | ||||
9149 | const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); | ||||
9150 | const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); | ||||
9151 | LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: "do { } while (false) | ||||
9152 | << *AddRec << '\n')do { } while (false); | ||||
9153 | |||||
9154 | // We currently can only solve this if the coefficients are constants. | ||||
9155 | if (!LC || !MC || !NC) { | ||||
9156 | LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n")do { } while (false); | ||||
9157 | return None; | ||||
9158 | } | ||||
9159 | |||||
9160 | APInt L = LC->getAPInt(); | ||||
9161 | APInt M = MC->getAPInt(); | ||||
9162 | APInt N = NC->getAPInt(); | ||||
9163 | assert(!N.isNullValue() && "This is not a quadratic addrec")((void)0); | ||||
9164 | |||||
9165 | unsigned BitWidth = LC->getAPInt().getBitWidth(); | ||||
9166 | unsigned NewWidth = BitWidth + 1; | ||||
9167 | LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: "do { } while (false) | ||||
9168 | << BitWidth << '\n')do { } while (false); | ||||
9169 | // The sign-extension (as opposed to a zero-extension) here matches the | ||||
9170 | // extension used in SolveQuadraticEquationWrap (with the same motivation). | ||||
9171 | N = N.sext(NewWidth); | ||||
9172 | M = M.sext(NewWidth); | ||||
9173 | L = L.sext(NewWidth); | ||||
9174 | |||||
9175 | // The increments are M, M+N, M+2N, ..., so the accumulated values are | ||||
9176 | // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, | ||||
9177 | // L+M, L+2M+N, L+3M+3N, ... | ||||
9178 | // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. | ||||
9179 | // | ||||
9180 | // The equation Acc = 0 is then | ||||
9181 | // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. | ||||
9182 | // In a quadratic form it becomes: | ||||
9183 | // N n^2 + (2M-N) n + 2L = 0. | ||||
9184 | |||||
9185 | APInt A = N; | ||||
9186 | APInt B = 2 * M - A; | ||||
9187 | APInt C = 2 * L; | ||||
9188 | APInt T = APInt(NewWidth, 2); | ||||
9189 | LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << Bdo { } while (false) | ||||
9190 | << "x + " << C << ", coeff bw: " << NewWidthdo { } while (false) | ||||
9191 | << ", multiplied by " << T << '\n')do { } while (false); | ||||
9192 | return std::make_tuple(A, B, C, T, BitWidth); | ||||
9193 | } | ||||
9194 | |||||
9195 | /// Helper function to compare optional APInts: | ||||
9196 | /// (a) if X and Y both exist, return min(X, Y), | ||||
9197 | /// (b) if neither X nor Y exist, return None, | ||||
9198 | /// (c) if exactly one of X and Y exists, return that value. | ||||
9199 | static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { | ||||
9200 | if (X.hasValue() && Y.hasValue()) { | ||||
9201 | unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); | ||||
9202 | APInt XW = X->sextOrSelf(W); | ||||
9203 | APInt YW = Y->sextOrSelf(W); | ||||
9204 | return XW.slt(YW) ? *X : *Y; | ||||
9205 | } | ||||
9206 | if (!X.hasValue() && !Y.hasValue()) | ||||
9207 | return None; | ||||
9208 | return X.hasValue() ? *X : *Y; | ||||
9209 | } | ||||
9210 | |||||
9211 | /// Helper function to truncate an optional APInt to a given BitWidth. | ||||
9212 | /// When solving addrec-related equations, it is preferable to return a value | ||||
9213 | /// that has the same bit width as the original addrec's coefficients. If the | ||||
9214 | /// solution fits in the original bit width, truncate it (except for i1). | ||||
9215 | /// Returning a value of a different bit width may inhibit some optimizations. | ||||
9216 | /// | ||||
9217 | /// In general, a solution to a quadratic equation generated from an addrec | ||||
9218 | /// may require BW+1 bits, where BW is the bit width of the addrec's | ||||
9219 | /// coefficients. The reason is that the coefficients of the quadratic | ||||
9220 | /// equation are BW+1 bits wide (to avoid truncation when converting from | ||||
9221 | /// the addrec to the equation). | ||||
9222 | static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { | ||||
9223 | if (!X.hasValue()) | ||||
9224 | return None; | ||||
9225 | unsigned W = X->getBitWidth(); | ||||
9226 | if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) | ||||
9227 | return X->trunc(BitWidth); | ||||
9228 | return X; | ||||
9229 | } | ||||
9230 | |||||
9231 | /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n | ||||
9232 | /// iterations. The values L, M, N are assumed to be signed, and they | ||||
9233 | /// should all have the same bit widths. | ||||
9234 | /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, | ||||
9235 | /// where BW is the bit width of the addrec's coefficients. | ||||
9236 | /// If the calculated value is a BW-bit integer (for BW > 1), it will be | ||||
9237 | /// returned as such, otherwise the bit width of the returned value may | ||||
9238 | /// be greater than BW. | ||||
9239 | /// | ||||
9240 | /// This function returns None if | ||||
9241 | /// (a) the addrec coefficients are not constant, or | ||||
9242 | /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases | ||||
9243 | /// like x^2 = 5, no integer solutions exist, in other cases an integer | ||||
9244 | /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. | ||||
9245 | static Optional<APInt> | ||||
9246 | SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { | ||||
9247 | APInt A, B, C, M; | ||||
9248 | unsigned BitWidth; | ||||
9249 | auto T = GetQuadraticEquation(AddRec); | ||||
9250 | if (!T.hasValue()) | ||||
9251 | return None; | ||||
9252 | |||||
9253 | std::tie(A, B, C, M, BitWidth) = *T; | ||||
9254 | LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n")do { } while (false); | ||||
9255 | Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); | ||||
9256 | if (!X.hasValue()) | ||||
9257 | return None; | ||||
9258 | |||||
9259 | ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); | ||||
9260 | ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); | ||||
9261 | if (!V->isZero()) | ||||
9262 | return None; | ||||
9263 | |||||
9264 | return TruncIfPossible(X, BitWidth); | ||||
9265 | } | ||||
9266 | |||||
9267 | /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n | ||||
9268 | /// iterations. The values M, N are assumed to be signed, and they | ||||
9269 | /// should all have the same bit widths. | ||||
9270 | /// Find the least n such that c(n) does not belong to the given range, | ||||
9271 | /// while c(n-1) does. | ||||
9272 | /// | ||||
9273 | /// This function returns None if | ||||
9274 | /// (a) the addrec coefficients are not constant, or | ||||
9275 | /// (b) SolveQuadraticEquationWrap was unable to find a solution for the | ||||
9276 | /// bounds of the range. | ||||
9277 | static Optional<APInt> | ||||
9278 | SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, | ||||
9279 | const ConstantRange &Range, ScalarEvolution &SE) { | ||||
9280 | assert(AddRec->getOperand(0)->isZero() &&((void)0) | ||||
9281 | "Starting value of addrec should be 0")((void)0); | ||||
9282 | LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range "do { } while (false) | ||||
9283 | << Range << ", addrec " << *AddRec << '\n')do { } while (false); | ||||
9284 | // This case is handled in getNumIterationsInRange. Here we can assume that | ||||
9285 | // we start in the range. | ||||
9286 | assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) &&((void)0) | ||||
9287 | "Addrec's initial value should be in range")((void)0); | ||||
9288 | |||||
9289 | APInt A, B, C, M; | ||||
9290 | unsigned BitWidth; | ||||
9291 | auto T = GetQuadraticEquation(AddRec); | ||||
9292 | if (!T.hasValue()) | ||||
9293 | return None; | ||||
9294 | |||||
9295 | // Be careful about the return value: there can be two reasons for not | ||||
9296 | // returning an actual number. First, if no solutions to the equations | ||||
9297 | // were found, and second, if the solutions don't leave the given range. | ||||
9298 | // The first case means that the actual solution is "unknown", the second | ||||
9299 | // means that it's known, but not valid. If the solution is unknown, we | ||||
9300 | // cannot make any conclusions. | ||||
9301 | // Return a pair: the optional solution and a flag indicating if the | ||||
9302 | // solution was found. | ||||
9303 | auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { | ||||
9304 | // Solve for signed overflow and unsigned overflow, pick the lower | ||||
9305 | // solution. | ||||
9306 | LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "do { } while (false) | ||||
9307 | << Bound << " (before multiplying by " << M << ")\n")do { } while (false); | ||||
9308 | Bound *= M; // The quadratic equation multiplier. | ||||
9309 | |||||
9310 | Optional<APInt> SO = None; | ||||
9311 | if (BitWidth > 1) { | ||||
9312 | LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "do { } while (false) | ||||
9313 | "signed overflow\n")do { } while (false); | ||||
9314 | SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); | ||||
9315 | } | ||||
9316 | LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "do { } while (false) | ||||
9317 | "unsigned overflow\n")do { } while (false); | ||||
9318 | Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, | ||||
9319 | BitWidth+1); | ||||
9320 | |||||
9321 | auto LeavesRange = [&] (const APInt &X) { | ||||
9322 | ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); | ||||
9323 | ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); | ||||
9324 | if (Range.contains(V0->getValue())) | ||||
9325 | return false; | ||||
9326 | // X should be at least 1, so X-1 is non-negative. | ||||
9327 | ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); | ||||
9328 | ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); | ||||
9329 | if (Range.contains(V1->getValue())) | ||||
9330 | return true; | ||||
9331 | return false; | ||||
9332 | }; | ||||
9333 | |||||
9334 | // If SolveQuadraticEquationWrap returns None, it means that there can | ||||
9335 | // be a solution, but the function failed to find it. We cannot treat it | ||||
9336 | // as "no solution". | ||||
9337 | if (!SO.hasValue() || !UO.hasValue()) | ||||
9338 | return { None, false }; | ||||
9339 | |||||
9340 | // Check the smaller value first to see if it leaves the range. | ||||
9341 | // At this point, both SO and UO must have values. | ||||
9342 | Optional<APInt> Min = MinOptional(SO, UO); | ||||
9343 | if (LeavesRange(*Min)) | ||||
9344 | return { Min, true }; | ||||
9345 | Optional<APInt> Max = Min == SO ? UO : SO; | ||||
9346 | if (LeavesRange(*Max)) | ||||
9347 | return { Max, true }; | ||||
9348 | |||||
9349 | // Solutions were found, but were eliminated, hence the "true". | ||||
9350 | return { None, true }; | ||||
9351 | }; | ||||
9352 | |||||
9353 | std::tie(A, B, C, M, BitWidth) = *T; | ||||
9354 | // Lower bound is inclusive, subtract 1 to represent the exiting value. | ||||
9355 | APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; | ||||
9356 | APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); | ||||
9357 | auto SL = SolveForBoundary(Lower); | ||||
9358 | auto SU = SolveForBoundary(Upper); | ||||
9359 | // If any of the solutions was unknown, no meaninigful conclusions can | ||||
9360 | // be made. | ||||
9361 | if (!SL.second || !SU.second) | ||||
9362 | return None; | ||||
9363 | |||||
9364 | // Claim: The correct solution is not some value between Min and Max. | ||||
9365 | // | ||||
9366 | // Justification: Assuming that Min and Max are different values, one of | ||||
9367 | // them is when the first signed overflow happens, the other is when the | ||||
9368 | // first unsigned overflow happens. Crossing the range boundary is only | ||||
9369 | // possible via an overflow (treating 0 as a special case of it, modeling | ||||
9370 | // an overflow as crossing k*2^W for some k). | ||||
9371 | // | ||||
9372 | // The interesting case here is when Min was eliminated as an invalid | ||||
9373 | // solution, but Max was not. The argument is that if there was another | ||||
9374 | // overflow between Min and Max, it would also have been eliminated if | ||||
9375 | // it was considered. | ||||
9376 | // | ||||
9377 | // For a given boundary, it is possible to have two overflows of the same | ||||
9378 | // type (signed/unsigned) without having the other type in between: this | ||||
9379 | // can happen when the vertex of the parabola is between the iterations | ||||
9380 | // corresponding to the overflows. This is only possible when the two | ||||
9381 | // overflows cross k*2^W for the same k. In such case, if the second one | ||||
9382 | // left the range (and was the first one to do so), the first overflow | ||||
9383 | // would have to enter the range, which would mean that either we had left | ||||
9384 | // the range before or that we started outside of it. Both of these cases | ||||
9385 | // are contradictions. | ||||
9386 | // | ||||
9387 | // Claim: In the case where SolveForBoundary returns None, the correct | ||||
9388 | // solution is not some value between the Max for this boundary and the | ||||
9389 | // Min of the other boundary. | ||||
9390 | // | ||||
9391 | // Justification: Assume that we had such Max_A and Min_B corresponding | ||||
9392 | // to range boundaries A and B and such that Max_A < Min_B. If there was | ||||
9393 | // a solution between Max_A and Min_B, it would have to be caused by an | ||||
9394 | // overflow corresponding to either A or B. It cannot correspond to B, | ||||
9395 | // since Min_B is the first occurrence of such an overflow. If it | ||||
9396 | // corresponded to A, it would have to be either a signed or an unsigned | ||||
9397 | // overflow that is larger than both eliminated overflows for A. But | ||||
9398 | // between the eliminated overflows and this overflow, the values would | ||||
9399 | // cover the entire value space, thus crossing the other boundary, which | ||||
9400 | // is a contradiction. | ||||
9401 | |||||
9402 | return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); | ||||
9403 | } | ||||
9404 | |||||
9405 | ScalarEvolution::ExitLimit | ||||
9406 | ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, | ||||
9407 | bool AllowPredicates) { | ||||
9408 | |||||
9409 | // This is only used for loops with a "x != y" exit test. The exit condition | ||||
9410 | // is now expressed as a single expression, V = x-y. So the exit test is | ||||
9411 | // effectively V != 0. We know and take advantage of the fact that this | ||||
9412 | // expression only being used in a comparison by zero context. | ||||
9413 | |||||
9414 | SmallPtrSet<const SCEVPredicate *, 4> Predicates; | ||||
9415 | // If the value is a constant | ||||
9416 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { | ||||
9417 | // If the value is already zero, the branch will execute zero times. | ||||
9418 | if (C->getValue()->isZero()) return C; | ||||
9419 | return getCouldNotCompute(); // Otherwise it will loop infinitely. | ||||
9420 | } | ||||
9421 | |||||
9422 | const SCEVAddRecExpr *AddRec = | ||||
9423 | dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); | ||||
9424 | |||||
9425 | if (!AddRec && AllowPredicates) | ||||
9426 | // Try to make this an AddRec using runtime tests, in the first X | ||||
9427 | // iterations of this loop, where X is the SCEV expression found by the | ||||
9428 | // algorithm below. | ||||
9429 | AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); | ||||
9430 | |||||
9431 | if (!AddRec || AddRec->getLoop() != L) | ||||
9432 | return getCouldNotCompute(); | ||||
9433 | |||||
9434 | // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of | ||||
9435 | // the quadratic equation to solve it. | ||||
9436 | if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { | ||||
9437 | // We can only use this value if the chrec ends up with an exact zero | ||||
9438 | // value at this index. When solving for "X*X != 5", for example, we | ||||
9439 | // should not accept a root of 2. | ||||
9440 | if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { | ||||
9441 | const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); | ||||
9442 | return ExitLimit(R, R, false, Predicates); | ||||
9443 | } | ||||
9444 | return getCouldNotCompute(); | ||||
9445 | } | ||||
9446 | |||||
9447 | // Otherwise we can only handle this if it is affine. | ||||
9448 | if (!AddRec->isAffine()) | ||||
9449 | return getCouldNotCompute(); | ||||
9450 | |||||
9451 | // If this is an affine expression, the execution count of this branch is | ||||
9452 | // the minimum unsigned root of the following equation: | ||||
9453 | // | ||||
9454 | // Start + Step*N = 0 (mod 2^BW) | ||||
9455 | // | ||||
9456 | // equivalent to: | ||||
9457 | // | ||||
9458 | // Step*N = -Start (mod 2^BW) | ||||
9459 | // | ||||
9460 | // where BW is the common bit width of Start and Step. | ||||
9461 | |||||
9462 | // Get the initial value for the loop. | ||||
9463 | const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); | ||||
9464 | const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); | ||||
9465 | |||||
9466 | // For now we handle only constant steps. | ||||
9467 | // | ||||
9468 | // TODO: Handle a nonconstant Step given AddRec<NUW>. If the | ||||
9469 | // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap | ||||
9470 | // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. | ||||
9471 | // We have not yet seen any such cases. | ||||
9472 | const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); | ||||
9473 | if (!StepC || StepC->getValue()->isZero()) | ||||
9474 | return getCouldNotCompute(); | ||||
9475 | |||||
9476 | // For positive steps (counting up until unsigned overflow): | ||||
9477 | // N = -Start/Step (as unsigned) | ||||
9478 | // For negative steps (counting down to zero): | ||||
9479 | // N = Start/-Step | ||||
9480 | // First compute the unsigned distance from zero in the direction of Step. | ||||
9481 | bool CountDown = StepC->getAPInt().isNegative(); | ||||
9482 | const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); | ||||
9483 | |||||
9484 | // Handle unitary steps, which cannot wraparound. | ||||
9485 | // 1*N = -Start; -1*N = Start (mod 2^BW), so: | ||||
9486 | // N = Distance (as unsigned) | ||||
9487 | if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { | ||||
9488 | APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); | ||||
9489 | APInt MaxBECountBase = getUnsignedRangeMax(Distance); | ||||
9490 | if (MaxBECountBase.ult(MaxBECount)) | ||||
9491 | MaxBECount = MaxBECountBase; | ||||
9492 | |||||
9493 | // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, | ||||
9494 | // we end up with a loop whose backedge-taken count is n - 1. Detect this | ||||
9495 | // case, and see if we can improve the bound. | ||||
9496 | // | ||||
9497 | // Explicitly handling this here is necessary because getUnsignedRange | ||||
9498 | // isn't context-sensitive; it doesn't know that we only care about the | ||||
9499 | // range inside the loop. | ||||
9500 | const SCEV *Zero = getZero(Distance->getType()); | ||||
9501 | const SCEV *One = getOne(Distance->getType()); | ||||
9502 | const SCEV *DistancePlusOne = getAddExpr(Distance, One); | ||||
9503 | if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { | ||||
9504 | // If Distance + 1 doesn't overflow, we can compute the maximum distance | ||||
9505 | // as "unsigned_max(Distance + 1) - 1". | ||||
9506 | ConstantRange CR = getUnsignedRange(DistancePlusOne); | ||||
9507 | MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); | ||||
9508 | } | ||||
9509 | return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); | ||||
9510 | } | ||||
9511 | |||||
9512 | // If the condition controls loop exit (the loop exits only if the expression | ||||
9513 | // is true) and the addition is no-wrap we can use unsigned divide to | ||||
9514 | // compute the backedge count. In this case, the step may not divide the | ||||
9515 | // distance, but we don't care because if the condition is "missed" the loop | ||||
9516 | // will have undefined behavior due to wrapping. | ||||
9517 | if (ControlsExit && AddRec->hasNoSelfWrap() && | ||||
9518 | loopHasNoAbnormalExits(AddRec->getLoop())) { | ||||
9519 | const SCEV *Exact = | ||||
9520 | getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); | ||||
9521 | const SCEV *Max = getCouldNotCompute(); | ||||
9522 | if (Exact != getCouldNotCompute()) { | ||||
9523 | APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L)); | ||||
9524 | APInt BaseMaxInt = getUnsignedRangeMax(Exact); | ||||
9525 | if (BaseMaxInt.ult(MaxInt)) | ||||
9526 | Max = getConstant(BaseMaxInt); | ||||
9527 | else | ||||
9528 | Max = getConstant(MaxInt); | ||||
9529 | } | ||||
9530 | return ExitLimit(Exact, Max, false, Predicates); | ||||
9531 | } | ||||
9532 | |||||
9533 | // Solve the general equation. | ||||
9534 | const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), | ||||
9535 | getNegativeSCEV(Start), *this); | ||||
9536 | const SCEV *M = E == getCouldNotCompute() | ||||
9537 | ? E | ||||
9538 | : getConstant(getUnsignedRangeMax(E)); | ||||
9539 | return ExitLimit(E, M, false, Predicates); | ||||
9540 | } | ||||
9541 | |||||
9542 | ScalarEvolution::ExitLimit | ||||
9543 | ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { | ||||
9544 | // Loops that look like: while (X == 0) are very strange indeed. We don't | ||||
9545 | // handle them yet except for the trivial case. This could be expanded in the | ||||
9546 | // future as needed. | ||||
9547 | |||||
9548 | // If the value is a constant, check to see if it is known to be non-zero | ||||
9549 | // already. If so, the backedge will execute zero times. | ||||
9550 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { | ||||
9551 | if (!C->getValue()->isZero()) | ||||
9552 | return getZero(C->getType()); | ||||
9553 | return getCouldNotCompute(); // Otherwise it will loop infinitely. | ||||
9554 | } | ||||
9555 | |||||
9556 | // We could implement others, but I really doubt anyone writes loops like | ||||
9557 | // this, and if they did, they would already be constant folded. | ||||
9558 | return getCouldNotCompute(); | ||||
9559 | } | ||||
9560 | |||||
9561 | std::pair<const BasicBlock *, const BasicBlock *> | ||||
9562 | ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) | ||||
9563 | const { | ||||
9564 | // If the block has a unique predecessor, then there is no path from the | ||||
9565 | // predecessor to the block that does not go through the direct edge | ||||
9566 | // from the predecessor to the block. | ||||
9567 | if (const BasicBlock *Pred = BB->getSinglePredecessor()) | ||||
9568 | return {Pred, BB}; | ||||
9569 | |||||
9570 | // A loop's header is defined to be a block that dominates the loop. | ||||
9571 | // If the header has a unique predecessor outside the loop, it must be | ||||
9572 | // a block that has exactly one successor that can reach the loop. | ||||
9573 | if (const Loop *L = LI.getLoopFor(BB)) | ||||
9574 | return {L->getLoopPredecessor(), L->getHeader()}; | ||||
9575 | |||||
9576 | return {nullptr, nullptr}; | ||||
9577 | } | ||||
9578 | |||||
9579 | /// SCEV structural equivalence is usually sufficient for testing whether two | ||||
9580 | /// expressions are equal, however for the purposes of looking for a condition | ||||
9581 | /// guarding a loop, it can be useful to be a little more general, since a | ||||
9582 | /// front-end may have replicated the controlling expression. | ||||
9583 | static bool HasSameValue(const SCEV *A, const SCEV *B) { | ||||
9584 | // Quick check to see if they are the same SCEV. | ||||
9585 | if (A == B) return true; | ||||
9586 | |||||
9587 | auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { | ||||
9588 | // Not all instructions that are "identical" compute the same value. For | ||||
9589 | // instance, two distinct alloca instructions allocating the same type are | ||||
9590 | // identical and do not read memory; but compute distinct values. | ||||
9591 | return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); | ||||
9592 | }; | ||||
9593 | |||||
9594 | // Otherwise, if they're both SCEVUnknown, it's possible that they hold | ||||
9595 | // two different instructions with the same value. Check for this case. | ||||
9596 | if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) | ||||
9597 | if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) | ||||
9598 | if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) | ||||
9599 | if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) | ||||
9600 | if (ComputesEqualValues(AI, BI)) | ||||
9601 | return true; | ||||
9602 | |||||
9603 | // Otherwise assume they may have a different value. | ||||
9604 | return false; | ||||
9605 | } | ||||
9606 | |||||
9607 | bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, | ||||
9608 | const SCEV *&LHS, const SCEV *&RHS, | ||||
9609 | unsigned Depth) { | ||||
9610 | bool Changed = false; | ||||
9611 | // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or | ||||
9612 | // '0 != 0'. | ||||
9613 | auto TrivialCase = [&](bool TriviallyTrue) { | ||||
9614 | LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); | ||||
9615 | Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; | ||||
9616 | return true; | ||||
9617 | }; | ||||
9618 | // If we hit the max recursion limit bail out. | ||||
9619 | if (Depth >= 3) | ||||
9620 | return false; | ||||
9621 | |||||
9622 | // Canonicalize a constant to the right side. | ||||
9623 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { | ||||
9624 | // Check for both operands constant. | ||||
9625 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { | ||||
9626 | if (ConstantExpr::getICmp(Pred, | ||||
9627 | LHSC->getValue(), | ||||
9628 | RHSC->getValue())->isNullValue()) | ||||
9629 | return TrivialCase(false); | ||||
9630 | else | ||||
9631 | return TrivialCase(true); | ||||
9632 | } | ||||
9633 | // Otherwise swap the operands to put the constant on the right. | ||||
9634 | std::swap(LHS, RHS); | ||||
9635 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
9636 | Changed = true; | ||||
9637 | } | ||||
9638 | |||||
9639 | // If we're comparing an addrec with a value which is loop-invariant in the | ||||
9640 | // addrec's loop, put the addrec on the left. Also make a dominance check, | ||||
9641 | // as both operands could be addrecs loop-invariant in each other's loop. | ||||
9642 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { | ||||
9643 | const Loop *L = AR->getLoop(); | ||||
9644 | if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { | ||||
9645 | std::swap(LHS, RHS); | ||||
9646 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
9647 | Changed = true; | ||||
9648 | } | ||||
9649 | } | ||||
9650 | |||||
9651 | // If there's a constant operand, canonicalize comparisons with boundary | ||||
9652 | // cases, and canonicalize *-or-equal comparisons to regular comparisons. | ||||
9653 | if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { | ||||
9654 | const APInt &RA = RC->getAPInt(); | ||||
9655 | |||||
9656 | bool SimplifiedByConstantRange = false; | ||||
9657 | |||||
9658 | if (!ICmpInst::isEquality(Pred)) { | ||||
9659 | ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); | ||||
9660 | if (ExactCR.isFullSet()) | ||||
9661 | return TrivialCase(true); | ||||
9662 | else if (ExactCR.isEmptySet()) | ||||
9663 | return TrivialCase(false); | ||||
9664 | |||||
9665 | APInt NewRHS; | ||||
9666 | CmpInst::Predicate NewPred; | ||||
9667 | if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && | ||||
9668 | ICmpInst::isEquality(NewPred)) { | ||||
9669 | // We were able to convert an inequality to an equality. | ||||
9670 | Pred = NewPred; | ||||
9671 | RHS = getConstant(NewRHS); | ||||
9672 | Changed = SimplifiedByConstantRange = true; | ||||
9673 | } | ||||
9674 | } | ||||
9675 | |||||
9676 | if (!SimplifiedByConstantRange) { | ||||
9677 | switch (Pred) { | ||||
9678 | default: | ||||
9679 | break; | ||||
9680 | case ICmpInst::ICMP_EQ: | ||||
9681 | case ICmpInst::ICMP_NE: | ||||
9682 | // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. | ||||
9683 | if (!RA) | ||||
9684 | if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) | ||||
9685 | if (const SCEVMulExpr *ME = | ||||
9686 | dyn_cast<SCEVMulExpr>(AE->getOperand(0))) | ||||
9687 | if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && | ||||
9688 | ME->getOperand(0)->isAllOnesValue()) { | ||||
9689 | RHS = AE->getOperand(1); | ||||
9690 | LHS = ME->getOperand(1); | ||||
9691 | Changed = true; | ||||
9692 | } | ||||
9693 | break; | ||||
9694 | |||||
9695 | |||||
9696 | // The "Should have been caught earlier!" messages refer to the fact | ||||
9697 | // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above | ||||
9698 | // should have fired on the corresponding cases, and canonicalized the | ||||
9699 | // check to trivial case. | ||||
9700 | |||||
9701 | case ICmpInst::ICMP_UGE: | ||||
9702 | assert(!RA.isMinValue() && "Should have been caught earlier!")((void)0); | ||||
9703 | Pred = ICmpInst::ICMP_UGT; | ||||
9704 | RHS = getConstant(RA - 1); | ||||
9705 | Changed = true; | ||||
9706 | break; | ||||
9707 | case ICmpInst::ICMP_ULE: | ||||
9708 | assert(!RA.isMaxValue() && "Should have been caught earlier!")((void)0); | ||||
9709 | Pred = ICmpInst::ICMP_ULT; | ||||
9710 | RHS = getConstant(RA + 1); | ||||
9711 | Changed = true; | ||||
9712 | break; | ||||
9713 | case ICmpInst::ICMP_SGE: | ||||
9714 | assert(!RA.isMinSignedValue() && "Should have been caught earlier!")((void)0); | ||||
9715 | Pred = ICmpInst::ICMP_SGT; | ||||
9716 | RHS = getConstant(RA - 1); | ||||
9717 | Changed = true; | ||||
9718 | break; | ||||
9719 | case ICmpInst::ICMP_SLE: | ||||
9720 | assert(!RA.isMaxSignedValue() && "Should have been caught earlier!")((void)0); | ||||
9721 | Pred = ICmpInst::ICMP_SLT; | ||||
9722 | RHS = getConstant(RA + 1); | ||||
9723 | Changed = true; | ||||
9724 | break; | ||||
9725 | } | ||||
9726 | } | ||||
9727 | } | ||||
9728 | |||||
9729 | // Check for obvious equality. | ||||
9730 | if (HasSameValue(LHS, RHS)) { | ||||
9731 | if (ICmpInst::isTrueWhenEqual(Pred)) | ||||
9732 | return TrivialCase(true); | ||||
9733 | if (ICmpInst::isFalseWhenEqual(Pred)) | ||||
9734 | return TrivialCase(false); | ||||
9735 | } | ||||
9736 | |||||
9737 | // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by | ||||
9738 | // adding or subtracting 1 from one of the operands. | ||||
9739 | switch (Pred) { | ||||
9740 | case ICmpInst::ICMP_SLE: | ||||
9741 | if (!getSignedRangeMax(RHS).isMaxSignedValue()) { | ||||
9742 | RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, | ||||
9743 | SCEV::FlagNSW); | ||||
9744 | Pred = ICmpInst::ICMP_SLT; | ||||
9745 | Changed = true; | ||||
9746 | } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { | ||||
9747 | LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, | ||||
9748 | SCEV::FlagNSW); | ||||
9749 | Pred = ICmpInst::ICMP_SLT; | ||||
9750 | Changed = true; | ||||
9751 | } | ||||
9752 | break; | ||||
9753 | case ICmpInst::ICMP_SGE: | ||||
9754 | if (!getSignedRangeMin(RHS).isMinSignedValue()) { | ||||
9755 | RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, | ||||
9756 | SCEV::FlagNSW); | ||||
9757 | Pred = ICmpInst::ICMP_SGT; | ||||
9758 | Changed = true; | ||||
9759 | } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { | ||||
9760 | LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, | ||||
9761 | SCEV::FlagNSW); | ||||
9762 | Pred = ICmpInst::ICMP_SGT; | ||||
9763 | Changed = true; | ||||
9764 | } | ||||
9765 | break; | ||||
9766 | case ICmpInst::ICMP_ULE: | ||||
9767 | if (!getUnsignedRangeMax(RHS).isMaxValue()) { | ||||
9768 | RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, | ||||
9769 | SCEV::FlagNUW); | ||||
9770 | Pred = ICmpInst::ICMP_ULT; | ||||
9771 | Changed = true; | ||||
9772 | } else if (!getUnsignedRangeMin(LHS).isMinValue()) { | ||||
9773 | LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); | ||||
9774 | Pred = ICmpInst::ICMP_ULT; | ||||
9775 | Changed = true; | ||||
9776 | } | ||||
9777 | break; | ||||
9778 | case ICmpInst::ICMP_UGE: | ||||
9779 | if (!getUnsignedRangeMin(RHS).isMinValue()) { | ||||
9780 | RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); | ||||
9781 | Pred = ICmpInst::ICMP_UGT; | ||||
9782 | Changed = true; | ||||
9783 | } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { | ||||
9784 | LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, | ||||
9785 | SCEV::FlagNUW); | ||||
9786 | Pred = ICmpInst::ICMP_UGT; | ||||
9787 | Changed = true; | ||||
9788 | } | ||||
9789 | break; | ||||
9790 | default: | ||||
9791 | break; | ||||
9792 | } | ||||
9793 | |||||
9794 | // TODO: More simplifications are possible here. | ||||
9795 | |||||
9796 | // Recursively simplify until we either hit a recursion limit or nothing | ||||
9797 | // changes. | ||||
9798 | if (Changed) | ||||
9799 | return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); | ||||
9800 | |||||
9801 | return Changed; | ||||
9802 | } | ||||
9803 | |||||
9804 | bool ScalarEvolution::isKnownNegative(const SCEV *S) { | ||||
9805 | return getSignedRangeMax(S).isNegative(); | ||||
9806 | } | ||||
9807 | |||||
9808 | bool ScalarEvolution::isKnownPositive(const SCEV *S) { | ||||
9809 | return getSignedRangeMin(S).isStrictlyPositive(); | ||||
9810 | } | ||||
9811 | |||||
9812 | bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { | ||||
9813 | return !getSignedRangeMin(S).isNegative(); | ||||
9814 | } | ||||
9815 | |||||
9816 | bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { | ||||
9817 | return !getSignedRangeMax(S).isStrictlyPositive(); | ||||
9818 | } | ||||
9819 | |||||
9820 | bool ScalarEvolution::isKnownNonZero(const SCEV *S) { | ||||
9821 | return getUnsignedRangeMin(S) != 0; | ||||
9822 | } | ||||
9823 | |||||
9824 | std::pair<const SCEV *, const SCEV *> | ||||
9825 | ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { | ||||
9826 | // Compute SCEV on entry of loop L. | ||||
9827 | const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); | ||||
9828 | if (Start == getCouldNotCompute()) | ||||
9829 | return { Start, Start }; | ||||
9830 | // Compute post increment SCEV for loop L. | ||||
9831 | const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); | ||||
9832 | assert(PostInc != getCouldNotCompute() && "Unexpected could not compute")((void)0); | ||||
9833 | return { Start, PostInc }; | ||||
9834 | } | ||||
9835 | |||||
9836 | bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, | ||||
9837 | const SCEV *LHS, const SCEV *RHS) { | ||||
9838 | // First collect all loops. | ||||
9839 | SmallPtrSet<const Loop *, 8> LoopsUsed; | ||||
9840 | getUsedLoops(LHS, LoopsUsed); | ||||
9841 | getUsedLoops(RHS, LoopsUsed); | ||||
9842 | |||||
9843 | if (LoopsUsed.empty()) | ||||
9844 | return false; | ||||
9845 | |||||
9846 | // Domination relationship must be a linear order on collected loops. | ||||
9847 | #ifndef NDEBUG1 | ||||
9848 | for (auto *L1 : LoopsUsed) | ||||
9849 | for (auto *L2 : LoopsUsed) | ||||
9850 | assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||((void)0) | ||||
9851 | DT.dominates(L2->getHeader(), L1->getHeader())) &&((void)0) | ||||
9852 | "Domination relationship is not a linear order")((void)0); | ||||
9853 | #endif | ||||
9854 | |||||
9855 | const Loop *MDL = | ||||
9856 | *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), | ||||
9857 | [&](const Loop *L1, const Loop *L2) { | ||||
9858 | return DT.properlyDominates(L1->getHeader(), L2->getHeader()); | ||||
9859 | }); | ||||
9860 | |||||
9861 | // Get init and post increment value for LHS. | ||||
9862 | auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); | ||||
9863 | // if LHS contains unknown non-invariant SCEV then bail out. | ||||
9864 | if (SplitLHS.first == getCouldNotCompute()) | ||||
9865 | return false; | ||||
9866 | assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC")((void)0); | ||||
9867 | // Get init and post increment value for RHS. | ||||
9868 | auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); | ||||
9869 | // if RHS contains unknown non-invariant SCEV then bail out. | ||||
9870 | if (SplitRHS.first == getCouldNotCompute()) | ||||
9871 | return false; | ||||
9872 | assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC")((void)0); | ||||
9873 | // It is possible that init SCEV contains an invariant load but it does | ||||
9874 | // not dominate MDL and is not available at MDL loop entry, so we should | ||||
9875 | // check it here. | ||||
9876 | if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || | ||||
9877 | !isAvailableAtLoopEntry(SplitRHS.first, MDL)) | ||||
9878 | return false; | ||||
9879 | |||||
9880 | // It seems backedge guard check is faster than entry one so in some cases | ||||
9881 | // it can speed up whole estimation by short circuit | ||||
9882 | return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, | ||||
9883 | SplitRHS.second) && | ||||
9884 | isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); | ||||
9885 | } | ||||
9886 | |||||
9887 | bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, | ||||
9888 | const SCEV *LHS, const SCEV *RHS) { | ||||
9889 | // Canonicalize the inputs first. | ||||
9890 | (void)SimplifyICmpOperands(Pred, LHS, RHS); | ||||
9891 | |||||
9892 | if (isKnownViaInduction(Pred, LHS, RHS)) | ||||
9893 | return true; | ||||
9894 | |||||
9895 | if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) | ||||
9896 | return true; | ||||
9897 | |||||
9898 | // Otherwise see what can be done with some simple reasoning. | ||||
9899 | return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); | ||||
9900 | } | ||||
9901 | |||||
9902 | Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, | ||||
9903 | const SCEV *LHS, | ||||
9904 | const SCEV *RHS) { | ||||
9905 | if (isKnownPredicate(Pred, LHS, RHS)) | ||||
9906 | return true; | ||||
9907 | else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) | ||||
9908 | return false; | ||||
9909 | return None; | ||||
9910 | } | ||||
9911 | |||||
9912 | bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, | ||||
9913 | const SCEV *LHS, const SCEV *RHS, | ||||
9914 | const Instruction *Context) { | ||||
9915 | // TODO: Analyze guards and assumes from Context's block. | ||||
9916 | return isKnownPredicate(Pred, LHS, RHS) || | ||||
9917 | isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS); | ||||
9918 | } | ||||
9919 | |||||
9920 | Optional<bool> | ||||
9921 | ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, | ||||
9922 | const SCEV *RHS, | ||||
9923 | const Instruction *Context) { | ||||
9924 | Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); | ||||
9925 | if (KnownWithoutContext) | ||||
| |||||
9926 | return KnownWithoutContext; | ||||
9927 | |||||
9928 | if (isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS)) | ||||
9929 | return true; | ||||
9930 | else if (isBasicBlockEntryGuardedByCond(Context->getParent(), | ||||
9931 | ICmpInst::getInversePredicate(Pred), | ||||
9932 | LHS, RHS)) | ||||
9933 | return false; | ||||
9934 | return None; | ||||
9935 | } | ||||
9936 | |||||
9937 | bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, | ||||
9938 | const SCEVAddRecExpr *LHS, | ||||
9939 | const SCEV *RHS) { | ||||
9940 | const Loop *L = LHS->getLoop(); | ||||
9941 | return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && | ||||
9942 | isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); | ||||
9943 | } | ||||
9944 | |||||
9945 | Optional<ScalarEvolution::MonotonicPredicateType> | ||||
9946 | ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, | ||||
9947 | ICmpInst::Predicate Pred) { | ||||
9948 | auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); | ||||
9949 | |||||
9950 | #ifndef NDEBUG1 | ||||
9951 | // Verify an invariant: inverting the predicate should turn a monotonically | ||||
9952 | // increasing change to a monotonically decreasing one, and vice versa. | ||||
9953 | if (Result) { | ||||
9954 | auto ResultSwapped = | ||||
9955 | getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); | ||||
9956 | |||||
9957 | assert(ResultSwapped.hasValue() && "should be able to analyze both!")((void)0); | ||||
9958 | assert(ResultSwapped.getValue() != Result.getValue() &&((void)0) | ||||
9959 | "monotonicity should flip as we flip the predicate")((void)0); | ||||
9960 | } | ||||
9961 | #endif | ||||
9962 | |||||
9963 | return Result; | ||||
9964 | } | ||||
9965 | |||||
9966 | Optional<ScalarEvolution::MonotonicPredicateType> | ||||
9967 | ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, | ||||
9968 | ICmpInst::Predicate Pred) { | ||||
9969 | // A zero step value for LHS means the induction variable is essentially a | ||||
9970 | // loop invariant value. We don't really depend on the predicate actually | ||||
9971 | // flipping from false to true (for increasing predicates, and the other way | ||||
9972 | // around for decreasing predicates), all we care about is that *if* the | ||||
9973 | // predicate changes then it only changes from false to true. | ||||
9974 | // | ||||
9975 | // A zero step value in itself is not very useful, but there may be places | ||||
9976 | // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be | ||||
9977 | // as general as possible. | ||||
9978 | |||||
9979 | // Only handle LE/LT/GE/GT predicates. | ||||
9980 | if (!ICmpInst::isRelational(Pred)) | ||||
9981 | return None; | ||||
9982 | |||||
9983 | bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); | ||||
9984 | assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) &&((void)0) | ||||
9985 | "Should be greater or less!")((void)0); | ||||
9986 | |||||
9987 | // Check that AR does not wrap. | ||||
9988 | if (ICmpInst::isUnsigned(Pred)) { | ||||
9989 | if (!LHS->hasNoUnsignedWrap()) | ||||
9990 | return None; | ||||
9991 | return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; | ||||
9992 | } else { | ||||
9993 | assert(ICmpInst::isSigned(Pred) &&((void)0) | ||||
9994 | "Relational predicate is either signed or unsigned!")((void)0); | ||||
9995 | if (!LHS->hasNoSignedWrap()) | ||||
9996 | return None; | ||||
9997 | |||||
9998 | const SCEV *Step = LHS->getStepRecurrence(*this); | ||||
9999 | |||||
10000 | if (isKnownNonNegative(Step)) | ||||
10001 | return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; | ||||
10002 | |||||
10003 | if (isKnownNonPositive(Step)) | ||||
10004 | return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; | ||||
10005 | |||||
10006 | return None; | ||||
10007 | } | ||||
10008 | } | ||||
10009 | |||||
10010 | Optional<ScalarEvolution::LoopInvariantPredicate> | ||||
10011 | ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, | ||||
10012 | const SCEV *LHS, const SCEV *RHS, | ||||
10013 | const Loop *L) { | ||||
10014 | |||||
10015 | // If there is a loop-invariant, force it into the RHS, otherwise bail out. | ||||
10016 | if (!isLoopInvariant(RHS, L)) { | ||||
10017 | if (!isLoopInvariant(LHS, L)) | ||||
10018 | return None; | ||||
10019 | |||||
10020 | std::swap(LHS, RHS); | ||||
10021 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
10022 | } | ||||
10023 | |||||
10024 | const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); | ||||
10025 | if (!ArLHS || ArLHS->getLoop() != L) | ||||
10026 | return None; | ||||
10027 | |||||
10028 | auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); | ||||
10029 | if (!MonotonicType) | ||||
10030 | return None; | ||||
10031 | // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to | ||||
10032 | // true as the loop iterates, and the backedge is control dependent on | ||||
10033 | // "ArLHS `Pred` RHS" == true then we can reason as follows: | ||||
10034 | // | ||||
10035 | // * if the predicate was false in the first iteration then the predicate | ||||
10036 | // is never evaluated again, since the loop exits without taking the | ||||
10037 | // backedge. | ||||
10038 | // * if the predicate was true in the first iteration then it will | ||||
10039 | // continue to be true for all future iterations since it is | ||||
10040 | // monotonically increasing. | ||||
10041 | // | ||||
10042 | // For both the above possibilities, we can replace the loop varying | ||||
10043 | // predicate with its value on the first iteration of the loop (which is | ||||
10044 | // loop invariant). | ||||
10045 | // | ||||
10046 | // A similar reasoning applies for a monotonically decreasing predicate, by | ||||
10047 | // replacing true with false and false with true in the above two bullets. | ||||
10048 | bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; | ||||
10049 | auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); | ||||
10050 | |||||
10051 | if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) | ||||
10052 | return None; | ||||
10053 | |||||
10054 | return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); | ||||
10055 | } | ||||
10056 | |||||
10057 | Optional<ScalarEvolution::LoopInvariantPredicate> | ||||
10058 | ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( | ||||
10059 | ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, | ||||
10060 | const Instruction *Context, const SCEV *MaxIter) { | ||||
10061 | // Try to prove the following set of facts: | ||||
10062 | // - The predicate is monotonic in the iteration space. | ||||
10063 | // - If the check does not fail on the 1st iteration: | ||||
10064 | // - No overflow will happen during first MaxIter iterations; | ||||
10065 | // - It will not fail on the MaxIter'th iteration. | ||||
10066 | // If the check does fail on the 1st iteration, we leave the loop and no | ||||
10067 | // other checks matter. | ||||
10068 | |||||
10069 | // If there is a loop-invariant, force it into the RHS, otherwise bail out. | ||||
10070 | if (!isLoopInvariant(RHS, L)) { | ||||
10071 | if (!isLoopInvariant(LHS, L)) | ||||
10072 | return None; | ||||
10073 | |||||
10074 | std::swap(LHS, RHS); | ||||
10075 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
10076 | } | ||||
10077 | |||||
10078 | auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); | ||||
10079 | if (!AR || AR->getLoop() != L) | ||||
10080 | return None; | ||||
10081 | |||||
10082 | // The predicate must be relational (i.e. <, <=, >=, >). | ||||
10083 | if (!ICmpInst::isRelational(Pred)) | ||||
10084 | return None; | ||||
10085 | |||||
10086 | // TODO: Support steps other than +/- 1. | ||||
10087 | const SCEV *Step = AR->getStepRecurrence(*this); | ||||
10088 | auto *One = getOne(Step->getType()); | ||||
10089 | auto *MinusOne = getNegativeSCEV(One); | ||||
10090 | if (Step != One && Step != MinusOne) | ||||
10091 | return None; | ||||
10092 | |||||
10093 | // Type mismatch here means that MaxIter is potentially larger than max | ||||
10094 | // unsigned value in start type, which mean we cannot prove no wrap for the | ||||
10095 | // indvar. | ||||
10096 | if (AR->getType() != MaxIter->getType()) | ||||
10097 | return None; | ||||
10098 | |||||
10099 | // Value of IV on suggested last iteration. | ||||
10100 | const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); | ||||
10101 | // Does it still meet the requirement? | ||||
10102 | if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) | ||||
10103 | return None; | ||||
10104 | // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does | ||||
10105 | // not exceed max unsigned value of this type), this effectively proves | ||||
10106 | // that there is no wrap during the iteration. To prove that there is no | ||||
10107 | // signed/unsigned wrap, we need to check that | ||||
10108 | // Start <= Last for step = 1 or Start >= Last for step = -1. | ||||
10109 | ICmpInst::Predicate NoOverflowPred = | ||||
10110 | CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; | ||||
10111 | if (Step == MinusOne) | ||||
10112 | NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); | ||||
10113 | const SCEV *Start = AR->getStart(); | ||||
10114 | if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context)) | ||||
10115 | return None; | ||||
10116 | |||||
10117 | // Everything is fine. | ||||
10118 | return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); | ||||
10119 | } | ||||
10120 | |||||
10121 | bool ScalarEvolution::isKnownPredicateViaConstantRanges( | ||||
10122 | ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { | ||||
10123 | if (HasSameValue(LHS, RHS)) | ||||
10124 | return ICmpInst::isTrueWhenEqual(Pred); | ||||
10125 | |||||
10126 | // This code is split out from isKnownPredicate because it is called from | ||||
10127 | // within isLoopEntryGuardedByCond. | ||||
10128 | |||||
10129 | auto CheckRanges = [&](const ConstantRange &RangeLHS, | ||||
10130 | const ConstantRange &RangeRHS) { | ||||
10131 | return RangeLHS.icmp(Pred, RangeRHS); | ||||
10132 | }; | ||||
10133 | |||||
10134 | // The check at the top of the function catches the case where the values are | ||||
10135 | // known to be equal. | ||||
10136 | if (Pred == CmpInst::ICMP_EQ) | ||||
10137 | return false; | ||||
10138 | |||||
10139 | if (Pred == CmpInst::ICMP_NE) { | ||||
10140 | if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || | ||||
10141 | CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS))) | ||||
10142 | return true; | ||||
10143 | auto *Diff = getMinusSCEV(LHS, RHS); | ||||
10144 | return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff); | ||||
10145 | } | ||||
10146 | |||||
10147 | if (CmpInst::isSigned(Pred)) | ||||
10148 | return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); | ||||
10149 | |||||
10150 | return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); | ||||
10151 | } | ||||
10152 | |||||
10153 | bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, | ||||
10154 | const SCEV *LHS, | ||||
10155 | const SCEV *RHS) { | ||||
10156 | // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where | ||||
10157 | // C1 and C2 are constant integers. If either X or Y are not add expressions, | ||||
10158 | // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via | ||||
10159 | // OutC1 and OutC2. | ||||
10160 | auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y, | ||||
10161 | APInt &OutC1, APInt &OutC2, | ||||
10162 | SCEV::NoWrapFlags ExpectedFlags) { | ||||
10163 | const SCEV *XNonConstOp, *XConstOp; | ||||
10164 | const SCEV *YNonConstOp, *YConstOp; | ||||
10165 | SCEV::NoWrapFlags XFlagsPresent; | ||||
10166 | SCEV::NoWrapFlags YFlagsPresent; | ||||
10167 | |||||
10168 | if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) { | ||||
10169 | XConstOp = getZero(X->getType()); | ||||
10170 | XNonConstOp = X; | ||||
10171 | XFlagsPresent = ExpectedFlags; | ||||
10172 | } | ||||
10173 | if (!isa<SCEVConstant>(XConstOp) || | ||||
10174 | (XFlagsPresent & ExpectedFlags) != ExpectedFlags) | ||||
10175 | return false; | ||||
10176 | |||||
10177 | if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) { | ||||
10178 | YConstOp = getZero(Y->getType()); | ||||
10179 | YNonConstOp = Y; | ||||
10180 | YFlagsPresent = ExpectedFlags; | ||||
10181 | } | ||||
10182 | |||||
10183 | if (!isa<SCEVConstant>(YConstOp) || | ||||
10184 | (YFlagsPresent & ExpectedFlags) != ExpectedFlags) | ||||
10185 | return false; | ||||
10186 | |||||
10187 | if (YNonConstOp != XNonConstOp) | ||||
10188 | return false; | ||||
10189 | |||||
10190 | OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt(); | ||||
10191 | OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt(); | ||||
10192 | |||||
10193 | return true; | ||||
10194 | }; | ||||
10195 | |||||
10196 | APInt C1; | ||||
10197 | APInt C2; | ||||
10198 | |||||
10199 | switch (Pred) { | ||||
10200 | default: | ||||
10201 | break; | ||||
10202 | |||||
10203 | case ICmpInst::ICMP_SGE: | ||||
10204 | std::swap(LHS, RHS); | ||||
10205 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
10206 | case ICmpInst::ICMP_SLE: | ||||
10207 | // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2. | ||||
10208 | if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2)) | ||||
10209 | return true; | ||||
10210 | |||||
10211 | break; | ||||
10212 | |||||
10213 | case ICmpInst::ICMP_SGT: | ||||
10214 | std::swap(LHS, RHS); | ||||
10215 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
10216 | case ICmpInst::ICMP_SLT: | ||||
10217 | // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2. | ||||
10218 | if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2)) | ||||
10219 | return true; | ||||
10220 | |||||
10221 | break; | ||||
10222 | |||||
10223 | case ICmpInst::ICMP_UGE: | ||||
10224 | std::swap(LHS, RHS); | ||||
10225 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
10226 | case ICmpInst::ICMP_ULE: | ||||
10227 | // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2. | ||||
10228 | if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2)) | ||||
10229 | return true; | ||||
10230 | |||||
10231 | break; | ||||
10232 | |||||
10233 | case ICmpInst::ICMP_UGT: | ||||
10234 | std::swap(LHS, RHS); | ||||
10235 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
10236 | case ICmpInst::ICMP_ULT: | ||||
10237 | // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2. | ||||
10238 | if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2)) | ||||
10239 | return true; | ||||
10240 | break; | ||||
10241 | } | ||||
10242 | |||||
10243 | return false; | ||||
10244 | } | ||||
10245 | |||||
10246 | bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, | ||||
10247 | const SCEV *LHS, | ||||
10248 | const SCEV *RHS) { | ||||
10249 | if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) | ||||
10250 | return false; | ||||
10251 | |||||
10252 | // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on | ||||
10253 | // the stack can result in exponential time complexity. | ||||
10254 | SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); | ||||
10255 | |||||
10256 | // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L | ||||
10257 | // | ||||
10258 | // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use | ||||
10259 | // isKnownPredicate. isKnownPredicate is more powerful, but also more | ||||
10260 | // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the | ||||
10261 | // interesting cases seen in practice. We can consider "upgrading" L >= 0 to | ||||
10262 | // use isKnownPredicate later if needed. | ||||
10263 | return isKnownNonNegative(RHS) && | ||||
10264 | isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && | ||||
10265 | isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); | ||||
10266 | } | ||||
10267 | |||||
10268 | bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, | ||||
10269 | ICmpInst::Predicate Pred, | ||||
10270 | const SCEV *LHS, const SCEV *RHS) { | ||||
10271 | // No need to even try if we know the module has no guards. | ||||
10272 | if (!HasGuards) | ||||
10273 | return false; | ||||
10274 | |||||
10275 | return any_of(*BB, [&](const Instruction &I) { | ||||
10276 | using namespace llvm::PatternMatch; | ||||
10277 | |||||
10278 | Value *Condition; | ||||
10279 | return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( | ||||
10280 | m_Value(Condition))) && | ||||
10281 | isImpliedCond(Pred, LHS, RHS, Condition, false); | ||||
10282 | }); | ||||
10283 | } | ||||
10284 | |||||
10285 | /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is | ||||
10286 | /// protected by a conditional between LHS and RHS. This is used to | ||||
10287 | /// to eliminate casts. | ||||
10288 | bool | ||||
10289 | ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, | ||||
10290 | ICmpInst::Predicate Pred, | ||||
10291 | const SCEV *LHS, const SCEV *RHS) { | ||||
10292 | // Interpret a null as meaning no loop, where there is obviously no guard | ||||
10293 | // (interprocedural conditions notwithstanding). | ||||
10294 | if (!L) return true; | ||||
10295 | |||||
10296 | if (VerifyIR) | ||||
10297 | assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&((void)0) | ||||
10298 | "This cannot be done on broken IR!")((void)0); | ||||
10299 | |||||
10300 | |||||
10301 | if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) | ||||
10302 | return true; | ||||
10303 | |||||
10304 | BasicBlock *Latch = L->getLoopLatch(); | ||||
10305 | if (!Latch) | ||||
10306 | return false; | ||||
10307 | |||||
10308 | BranchInst *LoopContinuePredicate = | ||||
10309 | dyn_cast<BranchInst>(Latch->getTerminator()); | ||||
10310 | if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && | ||||
10311 | isImpliedCond(Pred, LHS, RHS, | ||||
10312 | LoopContinuePredicate->getCondition(), | ||||
10313 | LoopContinuePredicate->getSuccessor(0) != L->getHeader())) | ||||
10314 | return true; | ||||
10315 | |||||
10316 | // We don't want more than one activation of the following loops on the stack | ||||
10317 | // -- that can lead to O(n!) time complexity. | ||||
10318 | if (WalkingBEDominatingConds) | ||||
10319 | return false; | ||||
10320 | |||||
10321 | SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); | ||||
10322 | |||||
10323 | // See if we can exploit a trip count to prove the predicate. | ||||
10324 | const auto &BETakenInfo = getBackedgeTakenInfo(L); | ||||
10325 | const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); | ||||
10326 | if (LatchBECount != getCouldNotCompute()) { | ||||
10327 | // We know that Latch branches back to the loop header exactly | ||||
10328 | // LatchBECount times. This means the backdege condition at Latch is | ||||
10329 | // equivalent to "{0,+,1} u< LatchBECount". | ||||
10330 | Type *Ty = LatchBECount->getType(); | ||||
10331 | auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); | ||||
10332 | const SCEV *LoopCounter = | ||||
10333 | getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); | ||||
10334 | if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, | ||||
10335 | LatchBECount)) | ||||
10336 | return true; | ||||
10337 | } | ||||
10338 | |||||
10339 | // Check conditions due to any @llvm.assume intrinsics. | ||||
10340 | for (auto &AssumeVH : AC.assumptions()) { | ||||
10341 | if (!AssumeVH) | ||||
10342 | continue; | ||||
10343 | auto *CI = cast<CallInst>(AssumeVH); | ||||
10344 | if (!DT.dominates(CI, Latch->getTerminator())) | ||||
10345 | continue; | ||||
10346 | |||||
10347 | if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) | ||||
10348 | return true; | ||||
10349 | } | ||||
10350 | |||||
10351 | // If the loop is not reachable from the entry block, we risk running into an | ||||
10352 | // infinite loop as we walk up into the dom tree. These loops do not matter | ||||
10353 | // anyway, so we just return a conservative answer when we see them. | ||||
10354 | if (!DT.isReachableFromEntry(L->getHeader())) | ||||
10355 | return false; | ||||
10356 | |||||
10357 | if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) | ||||
10358 | return true; | ||||
10359 | |||||
10360 | for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; | ||||
10361 | DTN != HeaderDTN; DTN = DTN->getIDom()) { | ||||
10362 | assert(DTN && "should reach the loop header before reaching the root!")((void)0); | ||||
10363 | |||||
10364 | BasicBlock *BB = DTN->getBlock(); | ||||
10365 | if (isImpliedViaGuard(BB, Pred, LHS, RHS)) | ||||
10366 | return true; | ||||
10367 | |||||
10368 | BasicBlock *PBB = BB->getSinglePredecessor(); | ||||
10369 | if (!PBB) | ||||
10370 | continue; | ||||
10371 | |||||
10372 | BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); | ||||
10373 | if (!ContinuePredicate || !ContinuePredicate->isConditional()) | ||||
10374 | continue; | ||||
10375 | |||||
10376 | Value *Condition = ContinuePredicate->getCondition(); | ||||
10377 | |||||
10378 | // If we have an edge `E` within the loop body that dominates the only | ||||
10379 | // latch, the condition guarding `E` also guards the backedge. This | ||||
10380 | // reasoning works only for loops with a single latch. | ||||
10381 | |||||
10382 | BasicBlockEdge DominatingEdge(PBB, BB); | ||||
10383 | if (DominatingEdge.isSingleEdge()) { | ||||
10384 | // We're constructively (and conservatively) enumerating edges within the | ||||
10385 | // loop body that dominate the latch. The dominator tree better agree | ||||
10386 | // with us on this: | ||||
10387 | assert(DT.dominates(DominatingEdge, Latch) && "should be!")((void)0); | ||||
10388 | |||||
10389 | if (isImpliedCond(Pred, LHS, RHS, Condition, | ||||
10390 | BB != ContinuePredicate->getSuccessor(0))) | ||||
10391 | return true; | ||||
10392 | } | ||||
10393 | } | ||||
10394 | |||||
10395 | return false; | ||||
10396 | } | ||||
10397 | |||||
10398 | bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, | ||||
10399 | ICmpInst::Predicate Pred, | ||||
10400 | const SCEV *LHS, | ||||
10401 | const SCEV *RHS) { | ||||
10402 | if (VerifyIR) | ||||
10403 | assert(!verifyFunction(*BB->getParent(), &dbgs()) &&((void)0) | ||||
10404 | "This cannot be done on broken IR!")((void)0); | ||||
10405 | |||||
10406 | // If we cannot prove strict comparison (e.g. a > b), maybe we can prove | ||||
10407 | // the facts (a >= b && a != b) separately. A typical situation is when the | ||||
10408 | // non-strict comparison is known from ranges and non-equality is known from | ||||
10409 | // dominating predicates. If we are proving strict comparison, we always try | ||||
10410 | // to prove non-equality and non-strict comparison separately. | ||||
10411 | auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); | ||||
10412 | const bool ProvingStrictComparison = (Pred != NonStrictPredicate); | ||||
10413 | bool ProvedNonStrictComparison = false; | ||||
10414 | bool ProvedNonEquality = false; | ||||
10415 | |||||
10416 | auto SplitAndProve = | ||||
10417 | [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { | ||||
10418 | if (!ProvedNonStrictComparison) | ||||
10419 | ProvedNonStrictComparison = Fn(NonStrictPredicate); | ||||
10420 | if (!ProvedNonEquality) | ||||
10421 | ProvedNonEquality = Fn(ICmpInst::ICMP_NE); | ||||
10422 | if (ProvedNonStrictComparison && ProvedNonEquality) | ||||
10423 | return true; | ||||
10424 | return false; | ||||
10425 | }; | ||||
10426 | |||||
10427 | if (ProvingStrictComparison
| ||||
10428 | auto ProofFn = [&](ICmpInst::Predicate P) { | ||||
10429 | return isKnownViaNonRecursiveReasoning(P, LHS, RHS); | ||||
10430 | }; | ||||
10431 | if (SplitAndProve(ProofFn)) | ||||
10432 | return true; | ||||
10433 | } | ||||
10434 | |||||
10435 | // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. | ||||
10436 | auto ProveViaGuard = [&](const BasicBlock *Block) { | ||||
10437 | if (isImpliedViaGuard(Block, Pred, LHS, RHS)) | ||||
10438 | return true; | ||||
10439 | if (ProvingStrictComparison) { | ||||
10440 | auto ProofFn = [&](ICmpInst::Predicate P) { | ||||
10441 | return isImpliedViaGuard(Block, P, LHS, RHS); | ||||
10442 | }; | ||||
10443 | if (SplitAndProve(ProofFn)) | ||||
10444 | return true; | ||||
10445 | } | ||||
10446 | return false; | ||||
10447 | }; | ||||
10448 | |||||
10449 | // Try to prove (Pred, LHS, RHS) using isImpliedCond. | ||||
10450 | auto ProveViaCond = [&](const Value *Condition, bool Inverse) { | ||||
10451 | const Instruction *Context = &BB->front(); | ||||
| |||||
10452 | if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context)) | ||||
10453 | return true; | ||||
10454 | if (ProvingStrictComparison) { | ||||
10455 | auto ProofFn = [&](ICmpInst::Predicate P) { | ||||
10456 | return isImpliedCond(P, LHS, RHS, Condition, Inverse, Context); | ||||
10457 | }; | ||||
10458 | if (SplitAndProve(ProofFn)) | ||||
10459 | return true; | ||||
10460 | } | ||||
10461 | return false; | ||||
10462 | }; | ||||
10463 | |||||
10464 | // Starting at the block's predecessor, climb up the predecessor chain, as long | ||||
10465 | // as there are predecessors that can be found that have unique successors | ||||
10466 | // leading to the original block. | ||||
10467 | const Loop *ContainingLoop = LI.getLoopFor(BB); | ||||
10468 | const BasicBlock *PredBB; | ||||
10469 | if (ContainingLoop && ContainingLoop->getHeader() == BB) | ||||
10470 | PredBB = ContainingLoop->getLoopPredecessor(); | ||||
10471 | else | ||||
10472 | PredBB = BB->getSinglePredecessor(); | ||||
10473 | for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); | ||||
10474 | Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { | ||||
10475 | if (ProveViaGuard(Pair.first)) | ||||
10476 | return true; | ||||
10477 | |||||
10478 | const BranchInst *LoopEntryPredicate = | ||||
10479 | dyn_cast<BranchInst>(Pair.first->getTerminator()); | ||||
10480 | if (!LoopEntryPredicate
| ||||
10481 | LoopEntryPredicate->isUnconditional()) | ||||
10482 | continue; | ||||
10483 | |||||
10484 | if (ProveViaCond(LoopEntryPredicate->getCondition(), | ||||
10485 | LoopEntryPredicate->getSuccessor(0) != Pair.second)) | ||||
10486 | return true; | ||||
10487 | } | ||||
10488 | |||||
10489 | // Check conditions due to any @llvm.assume intrinsics. | ||||
10490 | for (auto &AssumeVH : AC.assumptions()) { | ||||
10491 | if (!AssumeVH) | ||||
10492 | continue; | ||||
10493 | auto *CI = cast<CallInst>(AssumeVH); | ||||
10494 | if (!DT.dominates(CI, BB)) | ||||
10495 | continue; | ||||
10496 | |||||
10497 | if (ProveViaCond(CI->getArgOperand(0), false)) | ||||
10498 | return true; | ||||
10499 | } | ||||
10500 | |||||
10501 | return false; | ||||
10502 | } | ||||
10503 | |||||
10504 | bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, | ||||
10505 | ICmpInst::Predicate Pred, | ||||
10506 | const SCEV *LHS, | ||||
10507 | const SCEV *RHS) { | ||||
10508 | // Interpret a null as meaning no loop, where there is obviously no guard | ||||
10509 | // (interprocedural conditions notwithstanding). | ||||
10510 | if (!L) | ||||
10511 | return false; | ||||
10512 | |||||
10513 | // Both LHS and RHS must be available at loop entry. | ||||
10514 | assert(isAvailableAtLoopEntry(LHS, L) &&((void)0) | ||||
10515 | "LHS is not available at Loop Entry")((void)0); | ||||
10516 | assert(isAvailableAtLoopEntry(RHS, L) &&((void)0) | ||||
10517 | "RHS is not available at Loop Entry")((void)0); | ||||
10518 | |||||
10519 | if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) | ||||
10520 | return true; | ||||
10521 | |||||
10522 | return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); | ||||
10523 | } | ||||
10524 | |||||
10525 | bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, | ||||
10526 | const SCEV *RHS, | ||||
10527 | const Value *FoundCondValue, bool Inverse, | ||||
10528 | const Instruction *Context) { | ||||
10529 | // False conditions implies anything. Do not bother analyzing it further. | ||||
10530 | if (FoundCondValue == | ||||
10531 | ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) | ||||
10532 | return true; | ||||
10533 | |||||
10534 | if (!PendingLoopPredicates.insert(FoundCondValue).second) | ||||
10535 | return false; | ||||
10536 | |||||
10537 | auto ClearOnExit = | ||||
10538 | make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); | ||||
10539 | |||||
10540 | // Recursively handle And and Or conditions. | ||||
10541 | const Value *Op0, *Op1; | ||||
10542 | if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { | ||||
10543 | if (!Inverse) | ||||
10544 | return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || | ||||
10545 | isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); | ||||
10546 | } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { | ||||
10547 | if (Inverse) | ||||
10548 | return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || | ||||
10549 | isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); | ||||
10550 | } | ||||
10551 | |||||
10552 | const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); | ||||
10553 | if (!ICI) return false; | ||||
10554 | |||||
10555 | // Now that we found a conditional branch that dominates the loop or controls | ||||
10556 | // the loop latch. Check to see if it is the comparison we are looking for. | ||||
10557 | ICmpInst::Predicate FoundPred; | ||||
10558 | if (Inverse) | ||||
10559 | FoundPred = ICI->getInversePredicate(); | ||||
10560 | else | ||||
10561 | FoundPred = ICI->getPredicate(); | ||||
10562 | |||||
10563 | const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); | ||||
10564 | const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); | ||||
10565 | |||||
10566 | return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context); | ||||
10567 | } | ||||
10568 | |||||
10569 | bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, | ||||
10570 | const SCEV *RHS, | ||||
10571 | ICmpInst::Predicate FoundPred, | ||||
10572 | const SCEV *FoundLHS, const SCEV *FoundRHS, | ||||
10573 | const Instruction *Context) { | ||||
10574 | // Balance the types. | ||||
10575 | if (getTypeSizeInBits(LHS->getType()) < | ||||
10576 | getTypeSizeInBits(FoundLHS->getType())) { | ||||
10577 | // For unsigned and equality predicates, try to prove that both found | ||||
10578 | // operands fit into narrow unsigned range. If so, try to prove facts in | ||||
10579 | // narrow types. | ||||
10580 | if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy()) { | ||||
10581 | auto *NarrowType = LHS->getType(); | ||||
10582 | auto *WideType = FoundLHS->getType(); | ||||
10583 | auto BitWidth = getTypeSizeInBits(NarrowType); | ||||
10584 | const SCEV *MaxValue = getZeroExtendExpr( | ||||
10585 | getConstant(APInt::getMaxValue(BitWidth)), WideType); | ||||
10586 | if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) && | ||||
10587 | isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) { | ||||
10588 | const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); | ||||
10589 | const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); | ||||
10590 | if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, | ||||
10591 | TruncFoundRHS, Context)) | ||||
10592 | return true; | ||||
10593 | } | ||||
10594 | } | ||||
10595 | |||||
10596 | if (LHS->getType()->isPointerTy()) | ||||
10597 | return false; | ||||
10598 | if (CmpInst::isSigned(Pred)) { | ||||
10599 | LHS = getSignExtendExpr(LHS, FoundLHS->getType()); | ||||
10600 | RHS = getSignExtendExpr(RHS, FoundLHS->getType()); | ||||
10601 | } else { | ||||
10602 | LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); | ||||
10603 | RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); | ||||
10604 | } | ||||
10605 | } else if (getTypeSizeInBits(LHS->getType()) > | ||||
10606 | getTypeSizeInBits(FoundLHS->getType())) { | ||||
10607 | if (FoundLHS->getType()->isPointerTy()) | ||||
10608 | return false; | ||||
10609 | if (CmpInst::isSigned(FoundPred)) { | ||||
10610 | FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); | ||||
10611 | FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); | ||||
10612 | } else { | ||||
10613 | FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); | ||||
10614 | FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); | ||||
10615 | } | ||||
10616 | } | ||||
10617 | return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, | ||||
10618 | FoundRHS, Context); | ||||
10619 | } | ||||
10620 | |||||
10621 | bool ScalarEvolution::isImpliedCondBalancedTypes( | ||||
10622 | ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, | ||||
10623 | ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, | ||||
10624 | const Instruction *Context) { | ||||
10625 | assert(getTypeSizeInBits(LHS->getType()) ==((void)0) | ||||
10626 | getTypeSizeInBits(FoundLHS->getType()) &&((void)0) | ||||
10627 | "Types should be balanced!")((void)0); | ||||
10628 | // Canonicalize the query to match the way instcombine will have | ||||
10629 | // canonicalized the comparison. | ||||
10630 | if (SimplifyICmpOperands(Pred, LHS, RHS)) | ||||
10631 | if (LHS == RHS) | ||||
10632 | return CmpInst::isTrueWhenEqual(Pred); | ||||
10633 | if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) | ||||
10634 | if (FoundLHS == FoundRHS) | ||||
10635 | return CmpInst::isFalseWhenEqual(FoundPred); | ||||
10636 | |||||
10637 | // Check to see if we can make the LHS or RHS match. | ||||
10638 | if (LHS == FoundRHS || RHS == FoundLHS) { | ||||
10639 | if (isa<SCEVConstant>(RHS)) { | ||||
10640 | std::swap(FoundLHS, FoundRHS); | ||||
10641 | FoundPred = ICmpInst::getSwappedPredicate(FoundPred); | ||||
10642 | } else { | ||||
10643 | std::swap(LHS, RHS); | ||||
10644 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
10645 | } | ||||
10646 | } | ||||
10647 | |||||
10648 | // Check whether the found predicate is the same as the desired predicate. | ||||
10649 | if (FoundPred == Pred) | ||||
10650 | return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); | ||||
10651 | |||||
10652 | // Check whether swapping the found predicate makes it the same as the | ||||
10653 | // desired predicate. | ||||
10654 | if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { | ||||
10655 | // We can write the implication | ||||
10656 | // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS | ||||
10657 | // using one of the following ways: | ||||
10658 | // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS | ||||
10659 | // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS | ||||
10660 | // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS | ||||
10661 | // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS | ||||
10662 | // Forms 1. and 2. require swapping the operands of one condition. Don't | ||||
10663 | // do this if it would break canonical constant/addrec ordering. | ||||
10664 | if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS)) | ||||
10665 | return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, | ||||
10666 | Context); | ||||
10667 | if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS)) | ||||
10668 | return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); | ||||
10669 | |||||
10670 | // Don't try to getNotSCEV pointers. | ||||
10671 | if (LHS->getType()->isPointerTy() || FoundLHS->getType()->isPointerTy()) | ||||
10672 | return false; | ||||
10673 | |||||
10674 | // There's no clear preference between forms 3. and 4., try both. | ||||
10675 | return isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), | ||||
10676 | FoundLHS, FoundRHS, Context) || | ||||
10677 | isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), | ||||
10678 | getNotSCEV(FoundRHS), Context); | ||||
10679 | } | ||||
10680 | |||||
10681 | // Unsigned comparison is the same as signed comparison when both the operands | ||||
10682 | // are non-negative. | ||||
10683 | if (CmpInst::isUnsigned(FoundPred) && | ||||
10684 | CmpInst::getSignedPredicate(FoundPred) == Pred && | ||||
10685 | isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) | ||||
10686 | return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); | ||||
10687 | |||||
10688 | // Check if we can make progress by sharpening ranges. | ||||
10689 | if (FoundPred == ICmpInst::ICMP_NE && | ||||
10690 | (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { | ||||
10691 | |||||
10692 | const SCEVConstant *C = nullptr; | ||||
10693 | const SCEV *V = nullptr; | ||||
10694 | |||||
10695 | if (isa<SCEVConstant>(FoundLHS)) { | ||||
10696 | C = cast<SCEVConstant>(FoundLHS); | ||||
10697 | V = FoundRHS; | ||||
10698 | } else { | ||||
10699 | C = cast<SCEVConstant>(FoundRHS); | ||||
10700 | V = FoundLHS; | ||||
10701 | } | ||||
10702 | |||||
10703 | // The guarding predicate tells us that C != V. If the known range | ||||
10704 | // of V is [C, t), we can sharpen the range to [C + 1, t). The | ||||
10705 | // range we consider has to correspond to same signedness as the | ||||
10706 | // predicate we're interested in folding. | ||||
10707 | |||||
10708 | APInt Min = ICmpInst::isSigned(Pred) ? | ||||
10709 | getSignedRangeMin(V) : getUnsignedRangeMin(V); | ||||
10710 | |||||
10711 | if (Min == C->getAPInt()) { | ||||
10712 | // Given (V >= Min && V != Min) we conclude V >= (Min + 1). | ||||
10713 | // This is true even if (Min + 1) wraps around -- in case of | ||||
10714 | // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). | ||||
10715 | |||||
10716 | APInt SharperMin = Min + 1; | ||||
10717 | |||||
10718 | switch (Pred) { | ||||
10719 | case ICmpInst::ICMP_SGE: | ||||
10720 | case ICmpInst::ICMP_UGE: | ||||
10721 | // We know V `Pred` SharperMin. If this implies LHS `Pred` | ||||
10722 | // RHS, we're done. | ||||
10723 | if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), | ||||
10724 | Context)) | ||||
10725 | return true; | ||||
10726 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
10727 | |||||
10728 | case ICmpInst::ICMP_SGT: | ||||
10729 | case ICmpInst::ICMP_UGT: | ||||
10730 | // We know from the range information that (V `Pred` Min || | ||||
10731 | // V == Min). We know from the guarding condition that !(V | ||||
10732 | // == Min). This gives us | ||||
10733 | // | ||||
10734 | // V `Pred` Min || V == Min && !(V == Min) | ||||
10735 | // => V `Pred` Min | ||||
10736 | // | ||||
10737 | // If V `Pred` Min implies LHS `Pred` RHS, we're done. | ||||
10738 | |||||
10739 | if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), | ||||
10740 | Context)) | ||||
10741 | return true; | ||||
10742 | break; | ||||
10743 | |||||
10744 | // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. | ||||
10745 | case ICmpInst::ICMP_SLE: | ||||
10746 | case ICmpInst::ICMP_ULE: | ||||
10747 | if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, | ||||
10748 | LHS, V, getConstant(SharperMin), Context)) | ||||
10749 | return true; | ||||
10750 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
10751 | |||||
10752 | case ICmpInst::ICMP_SLT: | ||||
10753 | case ICmpInst::ICMP_ULT: | ||||
10754 | if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, | ||||
10755 | LHS, V, getConstant(Min), Context)) | ||||
10756 | return true; | ||||
10757 | break; | ||||
10758 | |||||
10759 | default: | ||||
10760 | // No change | ||||
10761 | break; | ||||
10762 | } | ||||
10763 | } | ||||
10764 | } | ||||
10765 | |||||
10766 | // Check whether the actual condition is beyond sufficient. | ||||
10767 | if (FoundPred == ICmpInst::ICMP_EQ) | ||||
10768 | if (ICmpInst::isTrueWhenEqual(Pred)) | ||||
10769 | if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context)) | ||||
10770 | return true; | ||||
10771 | if (Pred == ICmpInst::ICMP_NE) | ||||
10772 | if (!ICmpInst::isTrueWhenEqual(FoundPred)) | ||||
10773 | if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, | ||||
10774 | Context)) | ||||
10775 | return true; | ||||
10776 | |||||
10777 | // Otherwise assume the worst. | ||||
10778 | return false; | ||||
10779 | } | ||||
10780 | |||||
10781 | bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, | ||||
10782 | const SCEV *&L, const SCEV *&R, | ||||
10783 | SCEV::NoWrapFlags &Flags) { | ||||
10784 | const auto *AE = dyn_cast<SCEVAddExpr>(Expr); | ||||
10785 | if (!AE || AE->getNumOperands() != 2) | ||||
10786 | return false; | ||||
10787 | |||||
10788 | L = AE->getOperand(0); | ||||
10789 | R = AE->getOperand(1); | ||||
10790 | Flags = AE->getNoWrapFlags(); | ||||
10791 | return true; | ||||
10792 | } | ||||
10793 | |||||
10794 | Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, | ||||
10795 | const SCEV *Less) { | ||||
10796 | // We avoid subtracting expressions here because this function is usually | ||||
10797 | // fairly deep in the call stack (i.e. is called many times). | ||||
10798 | |||||
10799 | // X - X = 0. | ||||
10800 | if (More == Less) | ||||
10801 | return APInt(getTypeSizeInBits(More->getType()), 0); | ||||
10802 | |||||
10803 | if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { | ||||
10804 | const auto *LAR = cast<SCEVAddRecExpr>(Less); | ||||
10805 | const auto *MAR = cast<SCEVAddRecExpr>(More); | ||||
10806 | |||||
10807 | if (LAR->getLoop() != MAR->getLoop()) | ||||
10808 | return None; | ||||
10809 | |||||
10810 | // We look at affine expressions only; not for correctness but to keep | ||||
10811 | // getStepRecurrence cheap. | ||||
10812 | if (!LAR->isAffine() || !MAR->isAffine()) | ||||
10813 | return None; | ||||
10814 | |||||
10815 | if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) | ||||
10816 | return None; | ||||
10817 | |||||
10818 | Less = LAR->getStart(); | ||||
10819 | More = MAR->getStart(); | ||||
10820 | |||||
10821 | // fall through | ||||
10822 | } | ||||
10823 | |||||
10824 | if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { | ||||
10825 | const auto &M = cast<SCEVConstant>(More)->getAPInt(); | ||||
10826 | const auto &L = cast<SCEVConstant>(Less)->getAPInt(); | ||||
10827 | return M - L; | ||||
10828 | } | ||||
10829 | |||||
10830 | SCEV::NoWrapFlags Flags; | ||||
10831 | const SCEV *LLess = nullptr, *RLess = nullptr; | ||||
10832 | const SCEV *LMore = nullptr, *RMore = nullptr; | ||||
10833 | const SCEVConstant *C1 = nullptr, *C2 = nullptr; | ||||
10834 | // Compare (X + C1) vs X. | ||||
10835 | if (splitBinaryAdd(Less, LLess, RLess, Flags)) | ||||
10836 | if ((C1 = dyn_cast<SCEVConstant>(LLess))) | ||||
10837 | if (RLess == More) | ||||
10838 | return -(C1->getAPInt()); | ||||
10839 | |||||
10840 | // Compare X vs (X + C2). | ||||
10841 | if (splitBinaryAdd(More, LMore, RMore, Flags)) | ||||
10842 | if ((C2 = dyn_cast<SCEVConstant>(LMore))) | ||||
10843 | if (RMore == Less) | ||||
10844 | return C2->getAPInt(); | ||||
10845 | |||||
10846 | // Compare (X + C1) vs (X + C2). | ||||
10847 | if (C1 && C2 && RLess == RMore) | ||||
10848 | return C2->getAPInt() - C1->getAPInt(); | ||||
10849 | |||||
10850 | return None; | ||||
10851 | } | ||||
10852 | |||||
10853 | bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( | ||||
10854 | ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, | ||||
10855 | const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) { | ||||
10856 | // Try to recognize the following pattern: | ||||
10857 | // | ||||
10858 | // FoundRHS = ... | ||||
10859 | // ... | ||||
10860 | // loop: | ||||
10861 | // FoundLHS = {Start,+,W} | ||||
10862 | // context_bb: // Basic block from the same loop | ||||
10863 | // known(Pred, FoundLHS, FoundRHS) | ||||
10864 | // | ||||
10865 | // If some predicate is known in the context of a loop, it is also known on | ||||
10866 | // each iteration of this loop, including the first iteration. Therefore, in | ||||
10867 | // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to | ||||
10868 | // prove the original pred using this fact. | ||||
10869 | if (!Context) | ||||
10870 | return false; | ||||
10871 | const BasicBlock *ContextBB = Context->getParent(); | ||||
10872 | // Make sure AR varies in the context block. | ||||
10873 | if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { | ||||
10874 | const Loop *L = AR->getLoop(); | ||||
10875 | // Make sure that context belongs to the loop and executes on 1st iteration | ||||
10876 | // (if it ever executes at all). | ||||
10877 | if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) | ||||
10878 | return false; | ||||
10879 | if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) | ||||
10880 | return false; | ||||
10881 | return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); | ||||
10882 | } | ||||
10883 | |||||
10884 | if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { | ||||
10885 | const Loop *L = AR->getLoop(); | ||||
10886 | // Make sure that context belongs to the loop and executes on 1st iteration | ||||
10887 | // (if it ever executes at all). | ||||
10888 | if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) | ||||
10889 | return false; | ||||
10890 | if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) | ||||
10891 | return false; | ||||
10892 | return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); | ||||
10893 | } | ||||
10894 | |||||
10895 | return false; | ||||
10896 | } | ||||
10897 | |||||
10898 | bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( | ||||
10899 | ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, | ||||
10900 | const SCEV *FoundLHS, const SCEV *FoundRHS) { | ||||
10901 | if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) | ||||
10902 | return false; | ||||
10903 | |||||
10904 | const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); | ||||
10905 | if (!AddRecLHS) | ||||
10906 | return false; | ||||
10907 | |||||
10908 | const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); | ||||
10909 | if (!AddRecFoundLHS) | ||||
10910 | return false; | ||||
10911 | |||||
10912 | // We'd like to let SCEV reason about control dependencies, so we constrain | ||||
10913 | // both the inequalities to be about add recurrences on the same loop. This | ||||
10914 | // way we can use isLoopEntryGuardedByCond later. | ||||
10915 | |||||
10916 | const Loop *L = AddRecFoundLHS->getLoop(); | ||||
10917 | if (L != AddRecLHS->getLoop()) | ||||
10918 | return false; | ||||
10919 | |||||
10920 | // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) | ||||
10921 | // | ||||
10922 | // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) | ||||
10923 | // ... (2) | ||||
10924 | // | ||||
10925 | // Informal proof for (2), assuming (1) [*]: | ||||
10926 | // | ||||
10927 | // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] | ||||
10928 | // | ||||
10929 | // Then | ||||
10930 | // | ||||
10931 | // FoundLHS s< FoundRHS s< INT_MIN - C | ||||
10932 | // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] | ||||
10933 | // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] | ||||
10934 | // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< | ||||
10935 | // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] | ||||
10936 | // <=> FoundLHS + C s< FoundRHS + C | ||||
10937 | // | ||||
10938 | // [*]: (1) can be proved by ruling out overflow. | ||||
10939 | // | ||||
10940 | // [**]: This can be proved by analyzing all the four possibilities: | ||||
10941 | // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and | ||||
10942 | // (A s>= 0, B s>= 0). | ||||
10943 | // | ||||
10944 | // Note: | ||||
10945 | // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" | ||||
10946 | // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS | ||||
10947 | // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS | ||||
10948 | // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is | ||||
10949 | // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + | ||||
10950 | // C)". | ||||
10951 | |||||
10952 | Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); | ||||
10953 | Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); | ||||
10954 | if (!LDiff || !RDiff || *LDiff != *RDiff) | ||||
10955 | return false; | ||||
10956 | |||||
10957 | if (LDiff->isMinValue()) | ||||
10958 | return true; | ||||
10959 | |||||
10960 | APInt FoundRHSLimit; | ||||
10961 | |||||
10962 | if (Pred == CmpInst::ICMP_ULT) { | ||||
10963 | FoundRHSLimit = -(*RDiff); | ||||
10964 | } else { | ||||
10965 | assert(Pred == CmpInst::ICMP_SLT && "Checked above!")((void)0); | ||||
10966 | FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; | ||||
10967 | } | ||||
10968 | |||||
10969 | // Try to prove (1) or (2), as needed. | ||||
10970 | return isAvailableAtLoopEntry(FoundRHS, L) && | ||||
10971 | isLoopEntryGuardedByCond(L, Pred, FoundRHS, | ||||
10972 | getConstant(FoundRHSLimit)); | ||||
10973 | } | ||||
10974 | |||||
10975 | bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, | ||||
10976 | const SCEV *LHS, const SCEV *RHS, | ||||
10977 | const SCEV *FoundLHS, | ||||
10978 | const SCEV *FoundRHS, unsigned Depth) { | ||||
10979 | const PHINode *LPhi = nullptr, *RPhi = nullptr; | ||||
10980 | |||||
10981 | auto ClearOnExit = make_scope_exit([&]() { | ||||
10982 | if (LPhi) { | ||||
10983 | bool Erased = PendingMerges.erase(LPhi); | ||||
10984 | assert(Erased && "Failed to erase LPhi!")((void)0); | ||||
10985 | (void)Erased; | ||||
10986 | } | ||||
10987 | if (RPhi) { | ||||
10988 | bool Erased = PendingMerges.erase(RPhi); | ||||
10989 | assert(Erased && "Failed to erase RPhi!")((void)0); | ||||
10990 | (void)Erased; | ||||
10991 | } | ||||
10992 | }); | ||||
10993 | |||||
10994 | // Find respective Phis and check that they are not being pending. | ||||
10995 | if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) | ||||
10996 | if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { | ||||
10997 | if (!PendingMerges.insert(Phi).second) | ||||
10998 | return false; | ||||
10999 | LPhi = Phi; | ||||
11000 | } | ||||
11001 | if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) | ||||
11002 | if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { | ||||
11003 | // If we detect a loop of Phi nodes being processed by this method, for | ||||
11004 | // example: | ||||
11005 | // | ||||
11006 | // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] | ||||
11007 | // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] | ||||
11008 | // | ||||
11009 | // we don't want to deal with a case that complex, so return conservative | ||||
11010 | // answer false. | ||||
11011 | if (!PendingMerges.insert(Phi).second) | ||||
11012 | return false; | ||||
11013 | RPhi = Phi; | ||||
11014 | } | ||||
11015 | |||||
11016 | // If none of LHS, RHS is a Phi, nothing to do here. | ||||
11017 | if (!LPhi && !RPhi) | ||||
11018 | return false; | ||||
11019 | |||||
11020 | // If there is a SCEVUnknown Phi we are interested in, make it left. | ||||
11021 | if (!LPhi) { | ||||
11022 | std::swap(LHS, RHS); | ||||
11023 | std::swap(FoundLHS, FoundRHS); | ||||
11024 | std::swap(LPhi, RPhi); | ||||
11025 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
11026 | } | ||||
11027 | |||||
11028 | assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!")((void)0); | ||||
11029 | const BasicBlock *LBB = LPhi->getParent(); | ||||
11030 | const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); | ||||
11031 | |||||
11032 | auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { | ||||
11033 | return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || | ||||
11034 | isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || | ||||
11035 | isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); | ||||
11036 | }; | ||||
11037 | |||||
11038 | if (RPhi && RPhi->getParent() == LBB) { | ||||
11039 | // Case one: RHS is also a SCEVUnknown Phi from the same basic block. | ||||
11040 | // If we compare two Phis from the same block, and for each entry block | ||||
11041 | // the predicate is true for incoming values from this block, then the | ||||
11042 | // predicate is also true for the Phis. | ||||
11043 | for (const BasicBlock *IncBB : predecessors(LBB)) { | ||||
11044 | const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); | ||||
11045 | const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); | ||||
11046 | if (!ProvedEasily(L, R)) | ||||
11047 | return false; | ||||
11048 | } | ||||
11049 | } else if (RAR && RAR->getLoop()->getHeader() == LBB) { | ||||
11050 | // Case two: RHS is also a Phi from the same basic block, and it is an | ||||
11051 | // AddRec. It means that there is a loop which has both AddRec and Unknown | ||||
11052 | // PHIs, for it we can compare incoming values of AddRec from above the loop | ||||
11053 | // and latch with their respective incoming values of LPhi. | ||||
11054 | // TODO: Generalize to handle loops with many inputs in a header. | ||||
11055 | if (LPhi->getNumIncomingValues() != 2) return false; | ||||
11056 | |||||
11057 | auto *RLoop = RAR->getLoop(); | ||||
11058 | auto *Predecessor = RLoop->getLoopPredecessor(); | ||||
11059 | assert(Predecessor && "Loop with AddRec with no predecessor?")((void)0); | ||||
11060 | const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); | ||||
11061 | if (!ProvedEasily(L1, RAR->getStart())) | ||||
11062 | return false; | ||||
11063 | auto *Latch = RLoop->getLoopLatch(); | ||||
11064 | assert(Latch && "Loop with AddRec with no latch?")((void)0); | ||||
11065 | const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); | ||||
11066 | if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) | ||||
11067 | return false; | ||||
11068 | } else { | ||||
11069 | // In all other cases go over inputs of LHS and compare each of them to RHS, | ||||
11070 | // the predicate is true for (LHS, RHS) if it is true for all such pairs. | ||||
11071 | // At this point RHS is either a non-Phi, or it is a Phi from some block | ||||
11072 | // different from LBB. | ||||
11073 | for (const BasicBlock *IncBB : predecessors(LBB)) { | ||||
11074 | // Check that RHS is available in this block. | ||||
11075 | if (!dominates(RHS, IncBB)) | ||||
11076 | return false; | ||||
11077 | const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); | ||||
11078 | // Make sure L does not refer to a value from a potentially previous | ||||
11079 | // iteration of a loop. | ||||
11080 | if (!properlyDominates(L, IncBB)) | ||||
11081 | return false; | ||||
11082 | if (!ProvedEasily(L, RHS)) | ||||
11083 | return false; | ||||
11084 | } | ||||
11085 | } | ||||
11086 | return true; | ||||
11087 | } | ||||
11088 | |||||
11089 | bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, | ||||
11090 | const SCEV *LHS, const SCEV *RHS, | ||||
11091 | const SCEV *FoundLHS, | ||||
11092 | const SCEV *FoundRHS, | ||||
11093 | const Instruction *Context) { | ||||
11094 | if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) | ||||
11095 | return true; | ||||
11096 | |||||
11097 | if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) | ||||
11098 | return true; | ||||
11099 | |||||
11100 | if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, | ||||
11101 | Context)) | ||||
11102 | return true; | ||||
11103 | |||||
11104 | return isImpliedCondOperandsHelper(Pred, LHS, RHS, | ||||
11105 | FoundLHS, FoundRHS); | ||||
11106 | } | ||||
11107 | |||||
11108 | /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? | ||||
11109 | template <typename MinMaxExprType> | ||||
11110 | static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, | ||||
11111 | const SCEV *Candidate) { | ||||
11112 | const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); | ||||
11113 | if (!MinMaxExpr) | ||||
11114 | return false; | ||||
11115 | |||||
11116 | return is_contained(MinMaxExpr->operands(), Candidate); | ||||
11117 | } | ||||
11118 | |||||
11119 | static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, | ||||
11120 | ICmpInst::Predicate Pred, | ||||
11121 | const SCEV *LHS, const SCEV *RHS) { | ||||
11122 | // If both sides are affine addrecs for the same loop, with equal | ||||
11123 | // steps, and we know the recurrences don't wrap, then we only | ||||
11124 | // need to check the predicate on the starting values. | ||||
11125 | |||||
11126 | if (!ICmpInst::isRelational(Pred)) | ||||
11127 | return false; | ||||
11128 | |||||
11129 | const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); | ||||
11130 | if (!LAR) | ||||
11131 | return false; | ||||
11132 | const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); | ||||
11133 | if (!RAR) | ||||
11134 | return false; | ||||
11135 | if (LAR->getLoop() != RAR->getLoop()) | ||||
11136 | return false; | ||||
11137 | if (!LAR->isAffine() || !RAR->isAffine()) | ||||
11138 | return false; | ||||
11139 | |||||
11140 | if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) | ||||
11141 | return false; | ||||
11142 | |||||
11143 | SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? | ||||
11144 | SCEV::FlagNSW : SCEV::FlagNUW; | ||||
11145 | if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) | ||||
11146 | return false; | ||||
11147 | |||||
11148 | return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); | ||||
11149 | } | ||||
11150 | |||||
11151 | /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max | ||||
11152 | /// expression? | ||||
11153 | static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, | ||||
11154 | ICmpInst::Predicate Pred, | ||||
11155 | const SCEV *LHS, const SCEV *RHS) { | ||||
11156 | switch (Pred) { | ||||
11157 | default: | ||||
11158 | return false; | ||||
11159 | |||||
11160 | case ICmpInst::ICMP_SGE: | ||||
11161 | std::swap(LHS, RHS); | ||||
11162 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
11163 | case ICmpInst::ICMP_SLE: | ||||
11164 | return | ||||
11165 | // min(A, ...) <= A | ||||
11166 | IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || | ||||
11167 | // A <= max(A, ...) | ||||
11168 | IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); | ||||
11169 | |||||
11170 | case ICmpInst::ICMP_UGE: | ||||
11171 | std::swap(LHS, RHS); | ||||
11172 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
11173 | case ICmpInst::ICMP_ULE: | ||||
11174 | return | ||||
11175 | // min(A, ...) <= A | ||||
11176 | IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || | ||||
11177 | // A <= max(A, ...) | ||||
11178 | IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); | ||||
11179 | } | ||||
11180 | |||||
11181 | llvm_unreachable("covered switch fell through?!")__builtin_unreachable(); | ||||
11182 | } | ||||
11183 | |||||
11184 | bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, | ||||
11185 | const SCEV *LHS, const SCEV *RHS, | ||||
11186 | const SCEV *FoundLHS, | ||||
11187 | const SCEV *FoundRHS, | ||||
11188 | unsigned Depth) { | ||||
11189 | assert(getTypeSizeInBits(LHS->getType()) ==((void)0) | ||||
11190 | getTypeSizeInBits(RHS->getType()) &&((void)0) | ||||
11191 | "LHS and RHS have different sizes?")((void)0); | ||||
11192 | assert(getTypeSizeInBits(FoundLHS->getType()) ==((void)0) | ||||
11193 | getTypeSizeInBits(FoundRHS->getType()) &&((void)0) | ||||
11194 | "FoundLHS and FoundRHS have different sizes?")((void)0); | ||||
11195 | // We want to avoid hurting the compile time with analysis of too big trees. | ||||
11196 | if (Depth > MaxSCEVOperationsImplicationDepth) | ||||
11197 | return false; | ||||
11198 | |||||
11199 | // We only want to work with GT comparison so far. | ||||
11200 | if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { | ||||
11201 | Pred = CmpInst::getSwappedPredicate(Pred); | ||||
11202 | std::swap(LHS, RHS); | ||||
11203 | std::swap(FoundLHS, FoundRHS); | ||||
11204 | } | ||||
11205 | |||||
11206 | // For unsigned, try to reduce it to corresponding signed comparison. | ||||
11207 | if (Pred == ICmpInst::ICMP_UGT) | ||||
11208 | // We can replace unsigned predicate with its signed counterpart if all | ||||
11209 | // involved values are non-negative. | ||||
11210 | // TODO: We could have better support for unsigned. | ||||
11211 | if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { | ||||
11212 | // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing | ||||
11213 | // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us | ||||
11214 | // use this fact to prove that LHS and RHS are non-negative. | ||||
11215 | const SCEV *MinusOne = getMinusOne(LHS->getType()); | ||||
11216 | if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, | ||||
11217 | FoundRHS) && | ||||
11218 | isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, | ||||
11219 | FoundRHS)) | ||||
11220 | Pred = ICmpInst::ICMP_SGT; | ||||
11221 | } | ||||
11222 | |||||
11223 | if (Pred != ICmpInst::ICMP_SGT) | ||||
11224 | return false; | ||||
11225 | |||||
11226 | auto GetOpFromSExt = [&](const SCEV *S) { | ||||
11227 | if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) | ||||
11228 | return Ext->getOperand(); | ||||
11229 | // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off | ||||
11230 | // the constant in some cases. | ||||
11231 | return S; | ||||
11232 | }; | ||||
11233 | |||||
11234 | // Acquire values from extensions. | ||||
11235 | auto *OrigLHS = LHS; | ||||
11236 | auto *OrigFoundLHS = FoundLHS; | ||||
11237 | LHS = GetOpFromSExt(LHS); | ||||
11238 | FoundLHS = GetOpFromSExt(FoundLHS); | ||||
11239 | |||||
11240 | // Is the SGT predicate can be proved trivially or using the found context. | ||||
11241 | auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { | ||||
11242 | return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || | ||||
11243 | isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, | ||||
11244 | FoundRHS, Depth + 1); | ||||
11245 | }; | ||||
11246 | |||||
11247 | if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { | ||||
11248 | // We want to avoid creation of any new non-constant SCEV. Since we are | ||||
11249 | // going to compare the operands to RHS, we should be certain that we don't | ||||
11250 | // need any size extensions for this. So let's decline all cases when the | ||||
11251 | // sizes of types of LHS and RHS do not match. | ||||
11252 | // TODO: Maybe try to get RHS from sext to catch more cases? | ||||
11253 | if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) | ||||
11254 | return false; | ||||
11255 | |||||
11256 | // Should not overflow. | ||||
11257 | if (!LHSAddExpr->hasNoSignedWrap()) | ||||
11258 | return false; | ||||
11259 | |||||
11260 | auto *LL = LHSAddExpr->getOperand(0); | ||||
11261 | auto *LR = LHSAddExpr->getOperand(1); | ||||
11262 | auto *MinusOne = getMinusOne(RHS->getType()); | ||||
11263 | |||||
11264 | // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. | ||||
11265 | auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { | ||||
11266 | return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); | ||||
11267 | }; | ||||
11268 | // Try to prove the following rule: | ||||
11269 | // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). | ||||
11270 | // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). | ||||
11271 | if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) | ||||
11272 | return true; | ||||
11273 | } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { | ||||
11274 | Value *LL, *LR; | ||||
11275 | // FIXME: Once we have SDiv implemented, we can get rid of this matching. | ||||
11276 | |||||
11277 | using namespace llvm::PatternMatch; | ||||
11278 | |||||
11279 | if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { | ||||
11280 | // Rules for division. | ||||
11281 | // We are going to perform some comparisons with Denominator and its | ||||
11282 | // derivative expressions. In general case, creating a SCEV for it may | ||||
11283 | // lead to a complex analysis of the entire graph, and in particular it | ||||
11284 | // can request trip count recalculation for the same loop. This would | ||||
11285 | // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid | ||||
11286 | // this, we only want to create SCEVs that are constants in this section. | ||||
11287 | // So we bail if Denominator is not a constant. | ||||
11288 | if (!isa<ConstantInt>(LR)) | ||||
11289 | return false; | ||||
11290 | |||||
11291 | auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); | ||||
11292 | |||||
11293 | // We want to make sure that LHS = FoundLHS / Denominator. If it is so, | ||||
11294 | // then a SCEV for the numerator already exists and matches with FoundLHS. | ||||
11295 | auto *Numerator = getExistingSCEV(LL); | ||||
11296 | if (!Numerator || Numerator->getType() != FoundLHS->getType()) | ||||
11297 | return false; | ||||
11298 | |||||
11299 | // Make sure that the numerator matches with FoundLHS and the denominator | ||||
11300 | // is positive. | ||||
11301 | if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) | ||||
11302 | return false; | ||||
11303 | |||||
11304 | auto *DTy = Denominator->getType(); | ||||
11305 | auto *FRHSTy = FoundRHS->getType(); | ||||
11306 | if (DTy->isPointerTy() != FRHSTy->isPointerTy()) | ||||
11307 | // One of types is a pointer and another one is not. We cannot extend | ||||
11308 | // them properly to a wider type, so let us just reject this case. | ||||
11309 | // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help | ||||
11310 | // to avoid this check. | ||||
11311 | return false; | ||||
11312 | |||||
11313 | // Given that: | ||||
11314 | // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. | ||||
11315 | auto *WTy = getWiderType(DTy, FRHSTy); | ||||
11316 | auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); | ||||
11317 | auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); | ||||
11318 | |||||
11319 | // Try to prove the following rule: | ||||
11320 | // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). | ||||
11321 | // For example, given that FoundLHS > 2. It means that FoundLHS is at | ||||
11322 | // least 3. If we divide it by Denominator < 4, we will have at least 1. | ||||
11323 | auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); | ||||
11324 | if (isKnownNonPositive(RHS) && | ||||
11325 | IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) | ||||
11326 | return true; | ||||
11327 | |||||
11328 | // Try to prove the following rule: | ||||
11329 | // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). | ||||
11330 | // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. | ||||
11331 | // If we divide it by Denominator > 2, then: | ||||
11332 | // 1. If FoundLHS is negative, then the result is 0. | ||||
11333 | // 2. If FoundLHS is non-negative, then the result is non-negative. | ||||
11334 | // Anyways, the result is non-negative. | ||||
11335 | auto *MinusOne = getMinusOne(WTy); | ||||
11336 | auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); | ||||
11337 | if (isKnownNegative(RHS) && | ||||
11338 | IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) | ||||
11339 | return true; | ||||
11340 | } | ||||
11341 | } | ||||
11342 | |||||
11343 | // If our expression contained SCEVUnknown Phis, and we split it down and now | ||||
11344 | // need to prove something for them, try to prove the predicate for every | ||||
11345 | // possible incoming values of those Phis. | ||||
11346 | if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) | ||||
11347 | return true; | ||||
11348 | |||||
11349 | return false; | ||||
11350 | } | ||||
11351 | |||||
11352 | static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, | ||||
11353 | const SCEV *LHS, const SCEV *RHS) { | ||||
11354 | // zext x u<= sext x, sext x s<= zext x | ||||
11355 | switch (Pred) { | ||||
11356 | case ICmpInst::ICMP_SGE: | ||||
11357 | std::swap(LHS, RHS); | ||||
11358 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
11359 | case ICmpInst::ICMP_SLE: { | ||||
11360 | // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. | ||||
11361 | const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); | ||||
11362 | const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); | ||||
11363 | if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) | ||||
11364 | return true; | ||||
11365 | break; | ||||
11366 | } | ||||
11367 | case ICmpInst::ICMP_UGE: | ||||
11368 | std::swap(LHS, RHS); | ||||
11369 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
11370 | case ICmpInst::ICMP_ULE: { | ||||
11371 | // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. | ||||
11372 | const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); | ||||
11373 | const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); | ||||
11374 | if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) | ||||
11375 | return true; | ||||
11376 | break; | ||||
11377 | } | ||||
11378 | default: | ||||
11379 | break; | ||||
11380 | }; | ||||
11381 | return false; | ||||
11382 | } | ||||
11383 | |||||
11384 | bool | ||||
11385 | ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, | ||||
11386 | const SCEV *LHS, const SCEV *RHS) { | ||||
11387 | return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || | ||||
11388 | isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || | ||||
11389 | IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || | ||||
11390 | IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || | ||||
11391 | isKnownPredicateViaNoOverflow(Pred, LHS, RHS); | ||||
11392 | } | ||||
11393 | |||||
11394 | bool | ||||
11395 | ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, | ||||
11396 | const SCEV *LHS, const SCEV *RHS, | ||||
11397 | const SCEV *FoundLHS, | ||||
11398 | const SCEV *FoundRHS) { | ||||
11399 | switch (Pred) { | ||||
11400 | default: llvm_unreachable("Unexpected ICmpInst::Predicate value!")__builtin_unreachable(); | ||||
11401 | case ICmpInst::ICMP_EQ: | ||||
11402 | case ICmpInst::ICMP_NE: | ||||
11403 | if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) | ||||
11404 | return true; | ||||
11405 | break; | ||||
11406 | case ICmpInst::ICMP_SLT: | ||||
11407 | case ICmpInst::ICMP_SLE: | ||||
11408 | if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && | ||||
11409 | isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) | ||||
11410 | return true; | ||||
11411 | break; | ||||
11412 | case ICmpInst::ICMP_SGT: | ||||
11413 | case ICmpInst::ICMP_SGE: | ||||
11414 | if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && | ||||
11415 | isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) | ||||
11416 | return true; | ||||
11417 | break; | ||||
11418 | case ICmpInst::ICMP_ULT: | ||||
11419 | case ICmpInst::ICMP_ULE: | ||||
11420 | if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && | ||||
11421 | isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) | ||||
11422 | return true; | ||||
11423 | break; | ||||
11424 | case ICmpInst::ICMP_UGT: | ||||
11425 | case ICmpInst::ICMP_UGE: | ||||
11426 | if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && | ||||
11427 | isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) | ||||
11428 | return true; | ||||
11429 | break; | ||||
11430 | } | ||||
11431 | |||||
11432 | // Maybe it can be proved via operations? | ||||
11433 | if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) | ||||
11434 | return true; | ||||
11435 | |||||
11436 | return false; | ||||
11437 | } | ||||
11438 | |||||
11439 | bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, | ||||
11440 | const SCEV *LHS, | ||||
11441 | const SCEV *RHS, | ||||
11442 | const SCEV *FoundLHS, | ||||
11443 | const SCEV *FoundRHS) { | ||||
11444 | if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) | ||||
11445 | // The restriction on `FoundRHS` be lifted easily -- it exists only to | ||||
11446 | // reduce the compile time impact of this optimization. | ||||
11447 | return false; | ||||
11448 | |||||
11449 | Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); | ||||
11450 | if (!Addend) | ||||
11451 | return false; | ||||
11452 | |||||
11453 | const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); | ||||
11454 | |||||
11455 | // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the | ||||
11456 | // antecedent "`FoundLHS` `Pred` `FoundRHS`". | ||||
11457 | ConstantRange FoundLHSRange = | ||||
11458 | ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS); | ||||
11459 | |||||
11460 | // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: | ||||
11461 | ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); | ||||
11462 | |||||
11463 | // We can also compute the range of values for `LHS` that satisfy the | ||||
11464 | // consequent, "`LHS` `Pred` `RHS`": | ||||
11465 | const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); | ||||
11466 | // The antecedent implies the consequent if every value of `LHS` that | ||||
11467 | // satisfies the antecedent also satisfies the consequent. | ||||
11468 | return LHSRange.icmp(Pred, ConstRHS); | ||||
11469 | } | ||||
11470 | |||||
11471 | bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, | ||||
11472 | bool IsSigned) { | ||||
11473 | assert(isKnownPositive(Stride) && "Positive stride expected!")((void)0); | ||||
11474 | |||||
11475 | unsigned BitWidth = getTypeSizeInBits(RHS->getType()); | ||||
11476 | const SCEV *One = getOne(Stride->getType()); | ||||
11477 | |||||
11478 | if (IsSigned) { | ||||
11479 | APInt MaxRHS = getSignedRangeMax(RHS); | ||||
11480 | APInt MaxValue = APInt::getSignedMaxValue(BitWidth); | ||||
11481 | APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); | ||||
11482 | |||||
11483 | // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! | ||||
11484 | return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); | ||||
11485 | } | ||||
11486 | |||||
11487 | APInt MaxRHS = getUnsignedRangeMax(RHS); | ||||
11488 | APInt MaxValue = APInt::getMaxValue(BitWidth); | ||||
11489 | APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); | ||||
11490 | |||||
11491 | // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! | ||||
11492 | return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); | ||||
11493 | } | ||||
11494 | |||||
11495 | bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, | ||||
11496 | bool IsSigned) { | ||||
11497 | |||||
11498 | unsigned BitWidth = getTypeSizeInBits(RHS->getType()); | ||||
11499 | const SCEV *One = getOne(Stride->getType()); | ||||
11500 | |||||
11501 | if (IsSigned) { | ||||
11502 | APInt MinRHS = getSignedRangeMin(RHS); | ||||
11503 | APInt MinValue = APInt::getSignedMinValue(BitWidth); | ||||
11504 | APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); | ||||
11505 | |||||
11506 | // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! | ||||
11507 | return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); | ||||
11508 | } | ||||
11509 | |||||
11510 | APInt MinRHS = getUnsignedRangeMin(RHS); | ||||
11511 | APInt MinValue = APInt::getMinValue(BitWidth); | ||||
11512 | APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); | ||||
11513 | |||||
11514 | // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! | ||||
11515 | return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); | ||||
11516 | } | ||||
11517 | |||||
11518 | const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) { | ||||
11519 | // umin(N, 1) + floor((N - umin(N, 1)) / D) | ||||
11520 | // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin | ||||
11521 | // expression fixes the case of N=0. | ||||
11522 | const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType())); | ||||
11523 | const SCEV *NMinusOne = getMinusSCEV(N, MinNOne); | ||||
11524 | return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D)); | ||||
11525 | } | ||||
11526 | |||||
11527 | const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, | ||||
11528 | const SCEV *Stride, | ||||
11529 | const SCEV *End, | ||||
11530 | unsigned BitWidth, | ||||
11531 | bool IsSigned) { | ||||
11532 | // The logic in this function assumes we can represent a positive stride. | ||||
11533 | // If we can't, the backedge-taken count must be zero. | ||||
11534 | if (IsSigned && BitWidth == 1) | ||||
11535 | return getZero(Stride->getType()); | ||||
11536 | |||||
11537 | // Calculate the maximum backedge count based on the range of values | ||||
11538 | // permitted by Start, End, and Stride. | ||||
11539 | APInt MinStart = | ||||
11540 | IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); | ||||
11541 | |||||
11542 | APInt MinStride = | ||||
11543 | IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); | ||||
11544 | |||||
11545 | // We assume either the stride is positive, or the backedge-taken count | ||||
11546 | // is zero. So force StrideForMaxBECount to be at least one. | ||||
11547 | APInt One(BitWidth, 1); | ||||
11548 | APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride) | ||||
11549 | : APIntOps::umax(One, MinStride); | ||||
11550 | |||||
11551 | APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) | ||||
11552 | : APInt::getMaxValue(BitWidth); | ||||
11553 | APInt Limit = MaxValue - (StrideForMaxBECount - 1); | ||||
11554 | |||||
11555 | // Although End can be a MAX expression we estimate MaxEnd considering only | ||||
11556 | // the case End = RHS of the loop termination condition. This is safe because | ||||
11557 | // in the other case (End - Start) is zero, leading to a zero maximum backedge | ||||
11558 | // taken count. | ||||
11559 | APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) | ||||
11560 | : APIntOps::umin(getUnsignedRangeMax(End), Limit); | ||||
11561 | |||||
11562 | // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride) | ||||
11563 | MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart) | ||||
11564 | : APIntOps::umax(MaxEnd, MinStart); | ||||
11565 | |||||
11566 | return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */, | ||||
11567 | getConstant(StrideForMaxBECount) /* Step */); | ||||
11568 | } | ||||
11569 | |||||
11570 | ScalarEvolution::ExitLimit | ||||
11571 | ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, | ||||
11572 | const Loop *L, bool IsSigned, | ||||
11573 | bool ControlsExit, bool AllowPredicates) { | ||||
11574 | SmallPtrSet<const SCEVPredicate *, 4> Predicates; | ||||
11575 | |||||
11576 | const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); | ||||
11577 | bool PredicatedIV = false; | ||||
11578 | |||||
11579 | if (!IV && AllowPredicates) { | ||||
11580 | // Try to make this an AddRec using runtime tests, in the first X | ||||
11581 | // iterations of this loop, where X is the SCEV expression found by the | ||||
11582 | // algorithm below. | ||||
11583 | IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); | ||||
11584 | PredicatedIV = true; | ||||
11585 | } | ||||
11586 | |||||
11587 | // Avoid weird loops | ||||
11588 | if (!IV || IV->getLoop() != L || !IV->isAffine()) | ||||
11589 | return getCouldNotCompute(); | ||||
11590 | |||||
11591 | // A precondition of this method is that the condition being analyzed | ||||
11592 | // reaches an exiting branch which dominates the latch. Given that, we can | ||||
11593 | // assume that an increment which violates the nowrap specification and | ||||
11594 | // produces poison must cause undefined behavior when the resulting poison | ||||
11595 | // value is branched upon and thus we can conclude that the backedge is | ||||
11596 | // taken no more often than would be required to produce that poison value. | ||||
11597 | // Note that a well defined loop can exit on the iteration which violates | ||||
11598 | // the nowrap specification if there is another exit (either explicit or | ||||
11599 | // implicit/exceptional) which causes the loop to execute before the | ||||
11600 | // exiting instruction we're analyzing would trigger UB. | ||||
11601 | auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; | ||||
11602 | bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); | ||||
11603 | ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; | ||||
11604 | |||||
11605 | const SCEV *Stride = IV->getStepRecurrence(*this); | ||||
11606 | |||||
11607 | bool PositiveStride = isKnownPositive(Stride); | ||||
11608 | |||||
11609 | // Avoid negative or zero stride values. | ||||
11610 | if (!PositiveStride) { | ||||
11611 | // We can compute the correct backedge taken count for loops with unknown | ||||
11612 | // strides if we can prove that the loop is not an infinite loop with side | ||||
11613 | // effects. Here's the loop structure we are trying to handle - | ||||
11614 | // | ||||
11615 | // i = start | ||||
11616 | // do { | ||||
11617 | // A[i] = i; | ||||
11618 | // i += s; | ||||
11619 | // } while (i < end); | ||||
11620 | // | ||||
11621 | // The backedge taken count for such loops is evaluated as - | ||||
11622 | // (max(end, start + stride) - start - 1) /u stride | ||||
11623 | // | ||||
11624 | // The additional preconditions that we need to check to prove correctness | ||||
11625 | // of the above formula is as follows - | ||||
11626 | // | ||||
11627 | // a) IV is either nuw or nsw depending upon signedness (indicated by the | ||||
11628 | // NoWrap flag). | ||||
11629 | // b) loop is single exit with no side effects. | ||||
11630 | // | ||||
11631 | // | ||||
11632 | // Precondition a) implies that if the stride is negative, this is a single | ||||
11633 | // trip loop. The backedge taken count formula reduces to zero in this case. | ||||
11634 | // | ||||
11635 | // Precondition b) implies that if rhs is invariant in L, then unknown | ||||
11636 | // stride being zero means the backedge can't be taken without UB. | ||||
11637 | // | ||||
11638 | // The positive stride case is the same as isKnownPositive(Stride) returning | ||||
11639 | // true (original behavior of the function). | ||||
11640 | // | ||||
11641 | // We want to make sure that the stride is truly unknown as there are edge | ||||
11642 | // cases where ScalarEvolution propagates no wrap flags to the | ||||
11643 | // post-increment/decrement IV even though the increment/decrement operation | ||||
11644 | // itself is wrapping. The computed backedge taken count may be wrong in | ||||
11645 | // such cases. This is prevented by checking that the stride is not known to | ||||
11646 | // be either positive or non-positive. For example, no wrap flags are | ||||
11647 | // propagated to the post-increment IV of this loop with a trip count of 2 - | ||||
11648 | // | ||||
11649 | // unsigned char i; | ||||
11650 | // for(i=127; i<128; i+=129) | ||||
11651 | // A[i] = i; | ||||
11652 | // | ||||
11653 | if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || | ||||
11654 | !loopIsFiniteByAssumption(L)) | ||||
11655 | return getCouldNotCompute(); | ||||
11656 | |||||
11657 | if (!isKnownNonZero(Stride)) { | ||||
11658 | // If we have a step of zero, and RHS isn't invariant in L, we don't know | ||||
11659 | // if it might eventually be greater than start and if so, on which | ||||
11660 | // iteration. We can't even produce a useful upper bound. | ||||
11661 | if (!isLoopInvariant(RHS, L)) | ||||
11662 | return getCouldNotCompute(); | ||||
11663 | |||||
11664 | // We allow a potentially zero stride, but we need to divide by stride | ||||
11665 | // below. Since the loop can't be infinite and this check must control | ||||
11666 | // the sole exit, we can infer the exit must be taken on the first | ||||
11667 | // iteration (e.g. backedge count = 0) if the stride is zero. Given that, | ||||
11668 | // we know the numerator in the divides below must be zero, so we can | ||||
11669 | // pick an arbitrary non-zero value for the denominator (e.g. stride) | ||||
11670 | // and produce the right result. | ||||
11671 | // FIXME: Handle the case where Stride is poison? | ||||
11672 | auto wouldZeroStrideBeUB = [&]() { | ||||
11673 | // Proof by contradiction. Suppose the stride were zero. If we can | ||||
11674 | // prove that the backedge *is* taken on the first iteration, then since | ||||
11675 | // we know this condition controls the sole exit, we must have an | ||||
11676 | // infinite loop. We can't have a (well defined) infinite loop per | ||||
11677 | // check just above. | ||||
11678 | // Note: The (Start - Stride) term is used to get the start' term from | ||||
11679 | // (start' + stride,+,stride). Remember that we only care about the | ||||
11680 | // result of this expression when stride == 0 at runtime. | ||||
11681 | auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride); | ||||
11682 | return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS); | ||||
11683 | }; | ||||
11684 | if (!wouldZeroStrideBeUB()) { | ||||
11685 | Stride = getUMaxExpr(Stride, getOne(Stride->getType())); | ||||
11686 | } | ||||
11687 | } | ||||
11688 | } else if (!Stride->isOne() && !NoWrap) { | ||||
11689 | auto isUBOnWrap = [&]() { | ||||
11690 | // Can we prove this loop *must* be UB if overflow of IV occurs? | ||||
11691 | // Reasoning goes as follows: | ||||
11692 | // * Suppose the IV did self wrap. | ||||
11693 | // * If Stride evenly divides the iteration space, then once wrap | ||||
11694 | // occurs, the loop must revisit the same values. | ||||
11695 | // * We know that RHS is invariant, and that none of those values | ||||
11696 | // caused this exit to be taken previously. Thus, this exit is | ||||
11697 | // dynamically dead. | ||||
11698 | // * If this is the sole exit, then a dead exit implies the loop | ||||
11699 | // must be infinite if there are no abnormal exits. | ||||
11700 | // * If the loop were infinite, then it must either not be mustprogress | ||||
11701 | // or have side effects. Otherwise, it must be UB. | ||||
11702 | // * It can't (by assumption), be UB so we have contradicted our | ||||
11703 | // premise and can conclude the IV did not in fact self-wrap. | ||||
11704 | // From no-self-wrap, we need to then prove no-(un)signed-wrap. This | ||||
11705 | // follows trivially from the fact that every (un)signed-wrapped, but | ||||
11706 | // not self-wrapped value must be LT than the last value before | ||||
11707 | // (un)signed wrap. Since we know that last value didn't exit, nor | ||||
11708 | // will any smaller one. | ||||
11709 | |||||
11710 | if (!isLoopInvariant(RHS, L)) | ||||
11711 | return false; | ||||
11712 | |||||
11713 | auto *StrideC = dyn_cast<SCEVConstant>(Stride); | ||||
11714 | if (!StrideC || !StrideC->getAPInt().isPowerOf2()) | ||||
11715 | return false; | ||||
11716 | |||||
11717 | if (!ControlsExit || !loopHasNoAbnormalExits(L)) | ||||
11718 | return false; | ||||
11719 | |||||
11720 | return loopIsFiniteByAssumption(L); | ||||
11721 | }; | ||||
11722 | |||||
11723 | // Avoid proven overflow cases: this will ensure that the backedge taken | ||||
11724 | // count will not generate any unsigned overflow. Relaxed no-overflow | ||||
11725 | // conditions exploit NoWrapFlags, allowing to optimize in presence of | ||||
11726 | // undefined behaviors like the case of C language. | ||||
11727 | if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap()) | ||||
11728 | return getCouldNotCompute(); | ||||
11729 | } | ||||
11730 | |||||
11731 | // On all paths just preceeding, we established the following invariant: | ||||
11732 | // IV can be assumed not to overflow up to and including the exiting | ||||
11733 | // iteration. We proved this in one of two ways: | ||||
11734 | // 1) We can show overflow doesn't occur before the exiting iteration | ||||
11735 | // 1a) canIVOverflowOnLT, and b) step of one | ||||
11736 | // 2) We can show that if overflow occurs, the loop must execute UB | ||||
11737 | // before any possible exit. | ||||
11738 | // Note that we have not yet proved RHS invariant (in general). | ||||
11739 | |||||
11740 | const SCEV *Start = IV->getStart(); | ||||
11741 | |||||
11742 | // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond. | ||||
11743 | // Use integer-typed versions for actual computation. | ||||
11744 | const SCEV *OrigStart = Start; | ||||
11745 | const SCEV *OrigRHS = RHS; | ||||
11746 | if (Start->getType()->isPointerTy()) { | ||||
11747 | Start = getLosslessPtrToIntExpr(Start); | ||||
11748 | if (isa<SCEVCouldNotCompute>(Start)) | ||||
11749 | return Start; | ||||
11750 | } | ||||
11751 | if (RHS->getType()->isPointerTy()) { | ||||
11752 | RHS = getLosslessPtrToIntExpr(RHS); | ||||
11753 | if (isa<SCEVCouldNotCompute>(RHS)) | ||||
11754 | return RHS; | ||||
11755 | } | ||||
11756 | |||||
11757 | // When the RHS is not invariant, we do not know the end bound of the loop and | ||||
11758 | // cannot calculate the ExactBECount needed by ExitLimit. However, we can | ||||
11759 | // calculate the MaxBECount, given the start, stride and max value for the end | ||||
11760 | // bound of the loop (RHS), and the fact that IV does not overflow (which is | ||||
11761 | // checked above). | ||||
11762 | if (!isLoopInvariant(RHS, L)) { | ||||
11763 | const SCEV *MaxBECount = computeMaxBECountForLT( | ||||
11764 | Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); | ||||
11765 | return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, | ||||
11766 | false /*MaxOrZero*/, Predicates); | ||||
11767 | } | ||||
11768 | |||||
11769 | // We use the expression (max(End,Start)-Start)/Stride to describe the | ||||
11770 | // backedge count, as if the backedge is taken at least once max(End,Start) | ||||
11771 | // is End and so the result is as above, and if not max(End,Start) is Start | ||||
11772 | // so we get a backedge count of zero. | ||||
11773 | const SCEV *BECount = nullptr; | ||||
11774 | auto *StartMinusStride = getMinusSCEV(OrigStart, Stride); | ||||
11775 | // Can we prove (max(RHS,Start) > Start - Stride? | ||||
11776 | if (isLoopEntryGuardedByCond(L, Cond, StartMinusStride, Start) && | ||||
11777 | isLoopEntryGuardedByCond(L, Cond, StartMinusStride, RHS)) { | ||||
11778 | // In this case, we can use a refined formula for computing backedge taken | ||||
11779 | // count. The general formula remains: | ||||
11780 | // "End-Start /uceiling Stride" where "End = max(RHS,Start)" | ||||
11781 | // We want to use the alternate formula: | ||||
11782 | // "((End - 1) - (Start - Stride)) /u Stride" | ||||
11783 | // Let's do a quick case analysis to show these are equivalent under | ||||
11784 | // our precondition that max(RHS,Start) > Start - Stride. | ||||
11785 | // * For RHS <= Start, the backedge-taken count must be zero. | ||||
11786 | // "((End - 1) - (Start - Stride)) /u Stride" reduces to | ||||
11787 | // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to | ||||
11788 | // "Stride - 1 /u Stride" which is indeed zero for all non-zero values | ||||
11789 | // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing | ||||
11790 | // this to the stride of 1 case. | ||||
11791 | // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride". | ||||
11792 | // "((End - 1) - (Start - Stride)) /u Stride" reduces to | ||||
11793 | // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to | ||||
11794 | // "((RHS - (Start - Stride) - 1) /u Stride". | ||||
11795 | // Our preconditions trivially imply no overflow in that form. | ||||
11796 | const SCEV *MinusOne = getMinusOne(Stride->getType()); | ||||
11797 | const SCEV *Numerator = | ||||
11798 | getMinusSCEV(getAddExpr(RHS, MinusOne), StartMinusStride); | ||||
11799 | if (!isa<SCEVCouldNotCompute>(Numerator)) { | ||||
11800 | BECount = getUDivExpr(Numerator, Stride); | ||||
11801 | } | ||||
11802 | } | ||||
11803 | |||||
11804 | const SCEV *BECountIfBackedgeTaken = nullptr; | ||||
11805 | if (!BECount) { | ||||
11806 | auto canProveRHSGreaterThanEqualStart = [&]() { | ||||
11807 | auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; | ||||
11808 | if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart)) | ||||
11809 | return true; | ||||
11810 | |||||
11811 | // (RHS > Start - 1) implies RHS >= Start. | ||||
11812 | // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if | ||||
11813 | // "Start - 1" doesn't overflow. | ||||
11814 | // * For signed comparison, if Start - 1 does overflow, it's equal | ||||
11815 | // to INT_MAX, and "RHS >s INT_MAX" is trivially false. | ||||
11816 | // * For unsigned comparison, if Start - 1 does overflow, it's equal | ||||
11817 | // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false. | ||||
11818 | // | ||||
11819 | // FIXME: Should isLoopEntryGuardedByCond do this for us? | ||||
11820 | auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; | ||||
11821 | auto *StartMinusOne = getAddExpr(OrigStart, | ||||
11822 | getMinusOne(OrigStart->getType())); | ||||
11823 | return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne); | ||||
11824 | }; | ||||
11825 | |||||
11826 | // If we know that RHS >= Start in the context of loop, then we know that | ||||
11827 | // max(RHS, Start) = RHS at this point. | ||||
11828 | const SCEV *End; | ||||
11829 | if (canProveRHSGreaterThanEqualStart()) { | ||||
11830 | End = RHS; | ||||
11831 | } else { | ||||
11832 | // If RHS < Start, the backedge will be taken zero times. So in | ||||
11833 | // general, we can write the backedge-taken count as: | ||||
11834 | // | ||||
11835 | // RHS >= Start ? ceil(RHS - Start) / Stride : 0 | ||||
11836 | // | ||||
11837 | // We convert it to the following to make it more convenient for SCEV: | ||||
11838 | // | ||||
11839 | // ceil(max(RHS, Start) - Start) / Stride | ||||
11840 | End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); | ||||
11841 | |||||
11842 | // See what would happen if we assume the backedge is taken. This is | ||||
11843 | // used to compute MaxBECount. | ||||
11844 | BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride); | ||||
11845 | } | ||||
11846 | |||||
11847 | // At this point, we know: | ||||
11848 | // | ||||
11849 | // 1. If IsSigned, Start <=s End; otherwise, Start <=u End | ||||
11850 | // 2. The index variable doesn't overflow. | ||||
11851 | // | ||||
11852 | // Therefore, we know N exists such that | ||||
11853 | // (Start + Stride * N) >= End, and computing "(Start + Stride * N)" | ||||
11854 | // doesn't overflow. | ||||
11855 | // | ||||
11856 | // Using this information, try to prove whether the addition in | ||||
11857 | // "(Start - End) + (Stride - 1)" has unsigned overflow. | ||||
11858 | const SCEV *One = getOne(Stride->getType()); | ||||
11859 | bool MayAddOverflow = [&] { | ||||
11860 | if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) { | ||||
11861 | if (StrideC->getAPInt().isPowerOf2()) { | ||||
11862 | // Suppose Stride is a power of two, and Start/End are unsigned | ||||
11863 | // integers. Let UMAX be the largest representable unsigned | ||||
11864 | // integer. | ||||
11865 | // | ||||
11866 | // By the preconditions of this function, we know | ||||
11867 | // "(Start + Stride * N) >= End", and this doesn't overflow. | ||||
11868 | // As a formula: | ||||
11869 | // | ||||
11870 | // End <= (Start + Stride * N) <= UMAX | ||||
11871 | // | ||||
11872 | // Subtracting Start from all the terms: | ||||
11873 | // | ||||
11874 | // End - Start <= Stride * N <= UMAX - Start | ||||
11875 | // | ||||
11876 | // Since Start is unsigned, UMAX - Start <= UMAX. Therefore: | ||||
11877 | // | ||||
11878 | // End - Start <= Stride * N <= UMAX | ||||
11879 | // | ||||
11880 | // Stride * N is a multiple of Stride. Therefore, | ||||
11881 | // | ||||
11882 | // End - Start <= Stride * N <= UMAX - (UMAX mod Stride) | ||||
11883 | // | ||||
11884 | // Since Stride is a power of two, UMAX + 1 is divisible by Stride. | ||||
11885 | // Therefore, UMAX mod Stride == Stride - 1. So we can write: | ||||
11886 | // | ||||
11887 | // End - Start <= Stride * N <= UMAX - Stride - 1 | ||||
11888 | // | ||||
11889 | // Dropping the middle term: | ||||
11890 | // | ||||
11891 | // End - Start <= UMAX - Stride - 1 | ||||
11892 | // | ||||
11893 | // Adding Stride - 1 to both sides: | ||||
11894 | // | ||||
11895 | // (End - Start) + (Stride - 1) <= UMAX | ||||
11896 | // | ||||
11897 | // In other words, the addition doesn't have unsigned overflow. | ||||
11898 | // | ||||
11899 | // A similar proof works if we treat Start/End as signed values. | ||||
11900 | // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to | ||||
11901 | // use signed max instead of unsigned max. Note that we're trying | ||||
11902 | // to prove a lack of unsigned overflow in either case. | ||||
11903 | return false; | ||||
11904 | } | ||||
11905 | } | ||||
11906 | if (Start == Stride || Start == getMinusSCEV(Stride, One)) { | ||||
11907 | // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1. | ||||
11908 | // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End. | ||||
11909 | // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End. | ||||
11910 | // | ||||
11911 | // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End. | ||||
11912 | return false; | ||||
11913 | } | ||||
11914 | return true; | ||||
11915 | }(); | ||||
11916 | |||||
11917 | const SCEV *Delta = getMinusSCEV(End, Start); | ||||
11918 | if (!MayAddOverflow) { | ||||
11919 | // floor((D + (S - 1)) / S) | ||||
11920 | // We prefer this formulation if it's legal because it's fewer operations. | ||||
11921 | BECount = | ||||
11922 | getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride); | ||||
11923 | } else { | ||||
11924 | BECount = getUDivCeilSCEV(Delta, Stride); | ||||
11925 | } | ||||
11926 | } | ||||
11927 | |||||
11928 | const SCEV *MaxBECount; | ||||
11929 | bool MaxOrZero = false; | ||||
11930 | if (isa<SCEVConstant>(BECount)) { | ||||
11931 | MaxBECount = BECount; | ||||
11932 | } else if (BECountIfBackedgeTaken && | ||||
11933 | isa<SCEVConstant>(BECountIfBackedgeTaken)) { | ||||
11934 | // If we know exactly how many times the backedge will be taken if it's | ||||
11935 | // taken at least once, then the backedge count will either be that or | ||||
11936 | // zero. | ||||
11937 | MaxBECount = BECountIfBackedgeTaken; | ||||
11938 | MaxOrZero = true; | ||||
11939 | } else { | ||||
11940 | MaxBECount = computeMaxBECountForLT( | ||||
11941 | Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); | ||||
11942 | } | ||||
11943 | |||||
11944 | if (isa<SCEVCouldNotCompute>(MaxBECount) && | ||||
11945 | !isa<SCEVCouldNotCompute>(BECount)) | ||||
11946 | MaxBECount = getConstant(getUnsignedRangeMax(BECount)); | ||||
11947 | |||||
11948 | return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); | ||||
11949 | } | ||||
11950 | |||||
11951 | ScalarEvolution::ExitLimit | ||||
11952 | ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, | ||||
11953 | const Loop *L, bool IsSigned, | ||||
11954 | bool ControlsExit, bool AllowPredicates) { | ||||
11955 | SmallPtrSet<const SCEVPredicate *, 4> Predicates; | ||||
11956 | // We handle only IV > Invariant | ||||
11957 | if (!isLoopInvariant(RHS, L)) | ||||
11958 | return getCouldNotCompute(); | ||||
11959 | |||||
11960 | const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); | ||||
11961 | if (!IV && AllowPredicates) | ||||
11962 | // Try to make this an AddRec using runtime tests, in the first X | ||||
11963 | // iterations of this loop, where X is the SCEV expression found by the | ||||
11964 | // algorithm below. | ||||
11965 | IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); | ||||
11966 | |||||
11967 | // Avoid weird loops | ||||
11968 | if (!IV || IV->getLoop() != L || !IV->isAffine()) | ||||
11969 | return getCouldNotCompute(); | ||||
11970 | |||||
11971 | auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; | ||||
11972 | bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); | ||||
11973 | ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; | ||||
11974 | |||||
11975 | const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); | ||||
11976 | |||||
11977 | // Avoid negative or zero stride values | ||||
11978 | if (!isKnownPositive(Stride)) | ||||
11979 | return getCouldNotCompute(); | ||||
11980 | |||||
11981 | // Avoid proven overflow cases: this will ensure that the backedge taken count | ||||
11982 | // will not generate any unsigned overflow. Relaxed no-overflow conditions | ||||
11983 | // exploit NoWrapFlags, allowing to optimize in presence of undefined | ||||
11984 | // behaviors like the case of C language. | ||||
11985 | if (!Stride->isOne() && !NoWrap) | ||||
11986 | if (canIVOverflowOnGT(RHS, Stride, IsSigned)) | ||||
11987 | return getCouldNotCompute(); | ||||
11988 | |||||
11989 | const SCEV *Start = IV->getStart(); | ||||
11990 | const SCEV *End = RHS; | ||||
11991 | if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { | ||||
11992 | // If we know that Start >= RHS in the context of loop, then we know that | ||||
11993 | // min(RHS, Start) = RHS at this point. | ||||
11994 | if (isLoopEntryGuardedByCond( | ||||
11995 | L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) | ||||
11996 | End = RHS; | ||||
11997 | else | ||||
11998 | End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); | ||||
11999 | } | ||||
12000 | |||||
12001 | if (Start->getType()->isPointerTy()) { | ||||
12002 | Start = getLosslessPtrToIntExpr(Start); | ||||
12003 | if (isa<SCEVCouldNotCompute>(Start)) | ||||
12004 | return Start; | ||||
12005 | } | ||||
12006 | if (End->getType()->isPointerTy()) { | ||||
12007 | End = getLosslessPtrToIntExpr(End); | ||||
12008 | if (isa<SCEVCouldNotCompute>(End)) | ||||
12009 | return End; | ||||
12010 | } | ||||
12011 | |||||
12012 | // Compute ((Start - End) + (Stride - 1)) / Stride. | ||||
12013 | // FIXME: This can overflow. Holding off on fixing this for now; | ||||
12014 | // howManyGreaterThans will hopefully be gone soon. | ||||
12015 | const SCEV *One = getOne(Stride->getType()); | ||||
12016 | const SCEV *BECount = getUDivExpr( | ||||
12017 | getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride); | ||||
12018 | |||||
12019 | APInt MaxStart = IsSigned ? getSignedRangeMax(Start) | ||||
12020 | : getUnsignedRangeMax(Start); | ||||
12021 | |||||
12022 | APInt MinStride = IsSigned ? getSignedRangeMin(Stride) | ||||
12023 | : getUnsignedRangeMin(Stride); | ||||
12024 | |||||
12025 | unsigned BitWidth = getTypeSizeInBits(LHS->getType()); | ||||
12026 | APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) | ||||
12027 | : APInt::getMinValue(BitWidth) + (MinStride - 1); | ||||
12028 | |||||
12029 | // Although End can be a MIN expression we estimate MinEnd considering only | ||||
12030 | // the case End = RHS. This is safe because in the other case (Start - End) | ||||
12031 | // is zero, leading to a zero maximum backedge taken count. | ||||
12032 | APInt MinEnd = | ||||
12033 | IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) | ||||
12034 | : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); | ||||
12035 | |||||
12036 | const SCEV *MaxBECount = isa<SCEVConstant>(BECount) | ||||
12037 | ? BECount | ||||
12038 | : getUDivCeilSCEV(getConstant(MaxStart - MinEnd), | ||||
12039 | getConstant(MinStride)); | ||||
12040 | |||||
12041 | if (isa<SCEVCouldNotCompute>(MaxBECount)) | ||||
12042 | MaxBECount = BECount; | ||||
12043 | |||||
12044 | return ExitLimit(BECount, MaxBECount, false, Predicates); | ||||
12045 | } | ||||
12046 | |||||
12047 | const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, | ||||
12048 | ScalarEvolution &SE) const { | ||||
12049 | if (Range.isFullSet()) // Infinite loop. | ||||
12050 | return SE.getCouldNotCompute(); | ||||
12051 | |||||
12052 | // If the start is a non-zero constant, shift the range to simplify things. | ||||
12053 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) | ||||
12054 | if (!SC->getValue()->isZero()) { | ||||
12055 | SmallVector<const SCEV *, 4> Operands(operands()); | ||||
12056 | Operands[0] = SE.getZero(SC->getType()); | ||||
12057 | const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), | ||||
12058 | getNoWrapFlags(FlagNW)); | ||||
12059 | if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) | ||||
12060 | return ShiftedAddRec->getNumIterationsInRange( | ||||
12061 | Range.subtract(SC->getAPInt()), SE); | ||||
12062 | // This is strange and shouldn't happen. | ||||
12063 | return SE.getCouldNotCompute(); | ||||
12064 | } | ||||
12065 | |||||
12066 | // The only time we can solve this is when we have all constant indices. | ||||
12067 | // Otherwise, we cannot determine the overflow conditions. | ||||
12068 | if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) | ||||
12069 | return SE.getCouldNotCompute(); | ||||
12070 | |||||
12071 | // Okay at this point we know that all elements of the chrec are constants and | ||||
12072 | // that the start element is zero. | ||||
12073 | |||||
12074 | // First check to see if the range contains zero. If not, the first | ||||
12075 | // iteration exits. | ||||
12076 | unsigned BitWidth = SE.getTypeSizeInBits(getType()); | ||||
12077 | if (!Range.contains(APInt(BitWidth, 0))) | ||||
12078 | return SE.getZero(getType()); | ||||
12079 | |||||
12080 | if (isAffine()) { | ||||
12081 | // If this is an affine expression then we have this situation: | ||||
12082 | // Solve {0,+,A} in Range === Ax in Range | ||||
12083 | |||||
12084 | // We know that zero is in the range. If A is positive then we know that | ||||
12085 | // the upper value of the range must be the first possible exit value. | ||||
12086 | // If A is negative then the lower of the range is the last possible loop | ||||
12087 | // value. Also note that we already checked for a full range. | ||||
12088 | APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); | ||||
12089 | APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); | ||||
12090 | |||||
12091 | // The exit value should be (End+A)/A. | ||||
12092 | APInt ExitVal = (End + A).udiv(A); | ||||
12093 | ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); | ||||
12094 | |||||
12095 | // Evaluate at the exit value. If we really did fall out of the valid | ||||
12096 | // range, then we computed our trip count, otherwise wrap around or other | ||||
12097 | // things must have happened. | ||||
12098 | ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); | ||||
12099 | if (Range.contains(Val->getValue())) | ||||
12100 | return SE.getCouldNotCompute(); // Something strange happened | ||||
12101 | |||||
12102 | // Ensure that the previous value is in the range. This is a sanity check. | ||||
12103 | assert(Range.contains(((void)0) | ||||
12104 | EvaluateConstantChrecAtConstant(this,((void)0) | ||||
12105 | ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&((void)0) | ||||
12106 | "Linear scev computation is off in a bad way!")((void)0); | ||||
12107 | return SE.getConstant(ExitValue); | ||||
12108 | } | ||||
12109 | |||||
12110 | if (isQuadratic()) { | ||||
12111 | if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) | ||||
12112 | return SE.getConstant(S.getValue()); | ||||
12113 | } | ||||
12114 | |||||
12115 | return SE.getCouldNotCompute(); | ||||
12116 | } | ||||
12117 | |||||
12118 | const SCEVAddRecExpr * | ||||
12119 | SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { | ||||
12120 | assert(getNumOperands() > 1 && "AddRec with zero step?")((void)0); | ||||
12121 | // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), | ||||
12122 | // but in this case we cannot guarantee that the value returned will be an | ||||
12123 | // AddRec because SCEV does not have a fixed point where it stops | ||||
12124 | // simplification: it is legal to return ({rec1} + {rec2}). For example, it | ||||
12125 | // may happen if we reach arithmetic depth limit while simplifying. So we | ||||
12126 | // construct the returned value explicitly. | ||||
12127 | SmallVector<const SCEV *, 3> Ops; | ||||
12128 | // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and | ||||
12129 | // (this + Step) is {A+B,+,B+C,+...,+,N}. | ||||
12130 | for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) | ||||
12131 | Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); | ||||
12132 | // We know that the last operand is not a constant zero (otherwise it would | ||||
12133 | // have been popped out earlier). This guarantees us that if the result has | ||||
12134 | // the same last operand, then it will also not be popped out, meaning that | ||||
12135 | // the returned value will be an AddRec. | ||||
12136 | const SCEV *Last = getOperand(getNumOperands() - 1); | ||||
12137 | assert(!Last->isZero() && "Recurrency with zero step?")((void)0); | ||||
12138 | Ops.push_back(Last); | ||||
12139 | return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), | ||||
12140 | SCEV::FlagAnyWrap)); | ||||
12141 | } | ||||
12142 | |||||
12143 | // Return true when S contains at least an undef value. | ||||
12144 | static inline bool containsUndefs(const SCEV *S) { | ||||
12145 | return SCEVExprContains(S, [](const SCEV *S) { | ||||
12146 | if (const auto *SU = dyn_cast<SCEVUnknown>(S)) | ||||
12147 | return isa<UndefValue>(SU->getValue()); | ||||
12148 | return false; | ||||
12149 | }); | ||||
12150 | } | ||||
12151 | |||||
12152 | namespace { | ||||
12153 | |||||
12154 | // Collect all steps of SCEV expressions. | ||||
12155 | struct SCEVCollectStrides { | ||||
12156 | ScalarEvolution &SE; | ||||
12157 | SmallVectorImpl<const SCEV *> &Strides; | ||||
12158 | |||||
12159 | SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) | ||||
12160 | : SE(SE), Strides(S) {} | ||||
12161 | |||||
12162 | bool follow(const SCEV *S) { | ||||
12163 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) | ||||
12164 | Strides.push_back(AR->getStepRecurrence(SE)); | ||||
12165 | return true; | ||||
12166 | } | ||||
12167 | |||||
12168 | bool isDone() const { return false; } | ||||
12169 | }; | ||||
12170 | |||||
12171 | // Collect all SCEVUnknown and SCEVMulExpr expressions. | ||||
12172 | struct SCEVCollectTerms { | ||||
12173 | SmallVectorImpl<const SCEV *> &Terms; | ||||
12174 | |||||
12175 | SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} | ||||
12176 | |||||
12177 | bool follow(const SCEV *S) { | ||||
12178 | if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || | ||||
12179 | isa<SCEVSignExtendExpr>(S)) { | ||||
12180 | if (!containsUndefs(S)) | ||||
12181 | Terms.push_back(S); | ||||
12182 | |||||
12183 | // Stop recursion: once we collected a term, do not walk its operands. | ||||
12184 | return false; | ||||
12185 | } | ||||
12186 | |||||
12187 | // Keep looking. | ||||
12188 | return true; | ||||
12189 | } | ||||
12190 | |||||
12191 | bool isDone() const { return false; } | ||||
12192 | }; | ||||
12193 | |||||
12194 | // Check if a SCEV contains an AddRecExpr. | ||||
12195 | struct SCEVHasAddRec { | ||||
12196 | bool &ContainsAddRec; | ||||
12197 | |||||
12198 | SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { | ||||
12199 | ContainsAddRec = false; | ||||
12200 | } | ||||
12201 | |||||
12202 | bool follow(const SCEV *S) { | ||||
12203 | if (isa<SCEVAddRecExpr>(S)) { | ||||
12204 | ContainsAddRec = true; | ||||
12205 | |||||
12206 | // Stop recursion: once we collected a term, do not walk its operands. | ||||
12207 | return false; | ||||
12208 | } | ||||
12209 | |||||
12210 | // Keep looking. | ||||
12211 | return true; | ||||
12212 | } | ||||
12213 | |||||
12214 | bool isDone() const { return false; } | ||||
12215 | }; | ||||
12216 | |||||
12217 | // Find factors that are multiplied with an expression that (possibly as a | ||||
12218 | // subexpression) contains an AddRecExpr. In the expression: | ||||
12219 | // | ||||
12220 | // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) | ||||
12221 | // | ||||
12222 | // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" | ||||
12223 | // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size | ||||
12224 | // parameters as they form a product with an induction variable. | ||||
12225 | // | ||||
12226 | // This collector expects all array size parameters to be in the same MulExpr. | ||||
12227 | // It might be necessary to later add support for collecting parameters that are | ||||
12228 | // spread over different nested MulExpr. | ||||
12229 | struct SCEVCollectAddRecMultiplies { | ||||
12230 | SmallVectorImpl<const SCEV *> &Terms; | ||||
12231 | ScalarEvolution &SE; | ||||
12232 | |||||
12233 | SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) | ||||
12234 | : Terms(T), SE(SE) {} | ||||
12235 | |||||
12236 | bool follow(const SCEV *S) { | ||||
12237 | if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { | ||||
12238 | bool HasAddRec = false; | ||||
12239 | SmallVector<const SCEV *, 0> Operands; | ||||
12240 | for (auto Op : Mul->operands()) { | ||||
12241 | const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); | ||||
12242 | if (Unknown && !isa<CallInst>(Unknown->getValue())) { | ||||
12243 | Operands.push_back(Op); | ||||
12244 | } else if (Unknown) { | ||||
12245 | HasAddRec = true; | ||||
12246 | } else { | ||||
12247 | bool ContainsAddRec = false; | ||||
12248 | SCEVHasAddRec ContiansAddRec(ContainsAddRec); | ||||
12249 | visitAll(Op, ContiansAddRec); | ||||
12250 | HasAddRec |= ContainsAddRec; | ||||
12251 | } | ||||
12252 | } | ||||
12253 | if (Operands.size() == 0) | ||||
12254 | return true; | ||||
12255 | |||||
12256 | if (!HasAddRec) | ||||
12257 | return false; | ||||
12258 | |||||
12259 | Terms.push_back(SE.getMulExpr(Operands)); | ||||
12260 | // Stop recursion: once we collected a term, do not walk its operands. | ||||
12261 | return false; | ||||
12262 | } | ||||
12263 | |||||
12264 | // Keep looking. | ||||
12265 | return true; | ||||
12266 | } | ||||
12267 | |||||
12268 | bool isDone() const { return false; } | ||||
12269 | }; | ||||
12270 | |||||
12271 | } // end anonymous namespace | ||||
12272 | |||||
12273 | /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in | ||||
12274 | /// two places: | ||||
12275 | /// 1) The strides of AddRec expressions. | ||||
12276 | /// 2) Unknowns that are multiplied with AddRec expressions. | ||||
12277 | void ScalarEvolution::collectParametricTerms(const SCEV *Expr, | ||||
12278 | SmallVectorImpl<const SCEV *> &Terms) { | ||||
12279 | SmallVector<const SCEV *, 4> Strides; | ||||
12280 | SCEVCollectStrides StrideCollector(*this, Strides); | ||||
12281 | visitAll(Expr, StrideCollector); | ||||
12282 | |||||
12283 | LLVM_DEBUG({do { } while (false) | ||||
12284 | dbgs() << "Strides:\n";do { } while (false) | ||||
12285 | for (const SCEV *S : Strides)do { } while (false) | ||||
12286 | dbgs() << *S << "\n";do { } while (false) | ||||
12287 | })do { } while (false); | ||||
12288 | |||||
12289 | for (const SCEV *S : Strides) { | ||||
12290 | SCEVCollectTerms TermCollector(Terms); | ||||
12291 | visitAll(S, TermCollector); | ||||
12292 | } | ||||
12293 | |||||
12294 | LLVM_DEBUG({do { } while (false) | ||||
12295 | dbgs() << "Terms:\n";do { } while (false) | ||||
12296 | for (const SCEV *T : Terms)do { } while (false) | ||||
12297 | dbgs() << *T << "\n";do { } while (false) | ||||
12298 | })do { } while (false); | ||||
12299 | |||||
12300 | SCEVCollectAddRecMultiplies MulCollector(Terms, *this); | ||||
12301 | visitAll(Expr, MulCollector); | ||||
12302 | } | ||||
12303 | |||||
12304 | static bool findArrayDimensionsRec(ScalarEvolution &SE, | ||||
12305 | SmallVectorImpl<const SCEV *> &Terms, | ||||
12306 | SmallVectorImpl<const SCEV *> &Sizes) { | ||||
12307 | int Last = Terms.size() - 1; | ||||
12308 | const SCEV *Step = Terms[Last]; | ||||
12309 | |||||
12310 | // End of recursion. | ||||
12311 | if (Last == 0) { | ||||
12312 | if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { | ||||
12313 | SmallVector<const SCEV *, 2> Qs; | ||||
12314 | for (const SCEV *Op : M->operands()) | ||||
12315 | if (!isa<SCEVConstant>(Op)) | ||||
12316 | Qs.push_back(Op); | ||||
12317 | |||||
12318 | Step = SE.getMulExpr(Qs); | ||||
12319 | } | ||||
12320 | |||||
12321 | Sizes.push_back(Step); | ||||
12322 | return true; | ||||
12323 | } | ||||
12324 | |||||
12325 | for (const SCEV *&Term : Terms) { | ||||
12326 | // Normalize the terms before the next call to findArrayDimensionsRec. | ||||
12327 | const SCEV *Q, *R; | ||||
12328 | SCEVDivision::divide(SE, Term, Step, &Q, &R); | ||||
12329 | |||||
12330 | // Bail out when GCD does not evenly divide one of the terms. | ||||
12331 | if (!R->isZero()) | ||||
12332 | return false; | ||||
12333 | |||||
12334 | Term = Q; | ||||
12335 | } | ||||
12336 | |||||
12337 | // Remove all SCEVConstants. | ||||
12338 | erase_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }); | ||||
12339 | |||||
12340 | if (Terms.size() > 0) | ||||
12341 | if (!findArrayDimensionsRec(SE, Terms, Sizes)) | ||||
12342 | return false; | ||||
12343 | |||||
12344 | Sizes.push_back(Step); | ||||
12345 | return true; | ||||
12346 | } | ||||
12347 | |||||
12348 | // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. | ||||
12349 | static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { | ||||
12350 | for (const SCEV *T : Terms) | ||||
12351 | if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) | ||||
12352 | return true; | ||||
12353 | |||||
12354 | return false; | ||||
12355 | } | ||||
12356 | |||||
12357 | // Return the number of product terms in S. | ||||
12358 | static inline int numberOfTerms(const SCEV *S) { | ||||
12359 | if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) | ||||
12360 | return Expr->getNumOperands(); | ||||
12361 | return 1; | ||||
12362 | } | ||||
12363 | |||||
12364 | static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { | ||||
12365 | if (isa<SCEVConstant>(T)) | ||||
12366 | return nullptr; | ||||
12367 | |||||
12368 | if (isa<SCEVUnknown>(T)) | ||||
12369 | return T; | ||||
12370 | |||||
12371 | if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { | ||||
12372 | SmallVector<const SCEV *, 2> Factors; | ||||
12373 | for (const SCEV *Op : M->operands()) | ||||
12374 | if (!isa<SCEVConstant>(Op)) | ||||
12375 | Factors.push_back(Op); | ||||
12376 | |||||
12377 | return SE.getMulExpr(Factors); | ||||
12378 | } | ||||
12379 | |||||
12380 | return T; | ||||
12381 | } | ||||
12382 | |||||
12383 | /// Return the size of an element read or written by Inst. | ||||
12384 | const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { | ||||
12385 | Type *Ty; | ||||
12386 | if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) | ||||
12387 | Ty = Store->getValueOperand()->getType(); | ||||
12388 | else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) | ||||
12389 | Ty = Load->getType(); | ||||
12390 | else | ||||
12391 | return nullptr; | ||||
12392 | |||||
12393 | Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); | ||||
12394 | return getSizeOfExpr(ETy, Ty); | ||||
12395 | } | ||||
12396 | |||||
12397 | void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, | ||||
12398 | SmallVectorImpl<const SCEV *> &Sizes, | ||||
12399 | const SCEV *ElementSize) { | ||||
12400 | if (Terms.size() < 1 || !ElementSize) | ||||
12401 | return; | ||||
12402 | |||||
12403 | // Early return when Terms do not contain parameters: we do not delinearize | ||||
12404 | // non parametric SCEVs. | ||||
12405 | if (!containsParameters(Terms)) | ||||
12406 | return; | ||||
12407 | |||||
12408 | LLVM_DEBUG({do { } while (false) | ||||
12409 | dbgs() << "Terms:\n";do { } while (false) | ||||
12410 | for (const SCEV *T : Terms)do { } while (false) | ||||
12411 | dbgs() << *T << "\n";do { } while (false) | ||||
12412 | })do { } while (false); | ||||
12413 | |||||
12414 | // Remove duplicates. | ||||
12415 | array_pod_sort(Terms.begin(), Terms.end()); | ||||
12416 | Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); | ||||
12417 | |||||
12418 | // Put larger terms first. | ||||
12419 | llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { | ||||
12420 | return numberOfTerms(LHS) > numberOfTerms(RHS); | ||||
12421 | }); | ||||
12422 | |||||
12423 | // Try to divide all terms by the element size. If term is not divisible by | ||||
12424 | // element size, proceed with the original term. | ||||
12425 | for (const SCEV *&Term : Terms) { | ||||
12426 | const SCEV *Q, *R; | ||||
12427 | SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); | ||||
12428 | if (!Q->isZero()) | ||||
12429 | Term = Q; | ||||
12430 | } | ||||
12431 | |||||
12432 | SmallVector<const SCEV *, 4> NewTerms; | ||||
12433 | |||||
12434 | // Remove constant factors. | ||||
12435 | for (const SCEV *T : Terms) | ||||
12436 | if (const SCEV *NewT = removeConstantFactors(*this, T)) | ||||
12437 | NewTerms.push_back(NewT); | ||||
12438 | |||||
12439 | LLVM_DEBUG({do { } while (false) | ||||
12440 | dbgs() << "Terms after sorting:\n";do { } while (false) | ||||
12441 | for (const SCEV *T : NewTerms)do { } while (false) | ||||
12442 | dbgs() << *T << "\n";do { } while (false) | ||||
12443 | })do { } while (false); | ||||
12444 | |||||
12445 | if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { | ||||
12446 | Sizes.clear(); | ||||
12447 | return; | ||||
12448 | } | ||||
12449 | |||||
12450 | // The last element to be pushed into Sizes is the size of an element. | ||||
12451 | Sizes.push_back(ElementSize); | ||||
12452 | |||||
12453 | LLVM_DEBUG({do { } while (false) | ||||
12454 | dbgs() << "Sizes:\n";do { } while (false) | ||||
12455 | for (const SCEV *S : Sizes)do { } while (false) | ||||
12456 | dbgs() << *S << "\n";do { } while (false) | ||||
12457 | })do { } while (false); | ||||
12458 | } | ||||
12459 | |||||
12460 | void ScalarEvolution::computeAccessFunctions( | ||||
12461 | const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, | ||||
12462 | SmallVectorImpl<const SCEV *> &Sizes) { | ||||
12463 | // Early exit in case this SCEV is not an affine multivariate function. | ||||
12464 | if (Sizes.empty()) | ||||
12465 | return; | ||||
12466 | |||||
12467 | if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) | ||||
12468 | if (!AR->isAffine()) | ||||
12469 | return; | ||||
12470 | |||||
12471 | const SCEV *Res = Expr; | ||||
12472 | int Last = Sizes.size() - 1; | ||||
12473 | for (int i = Last; i >= 0; i--) { | ||||
12474 | const SCEV *Q, *R; | ||||
12475 | SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); | ||||
12476 | |||||
12477 | LLVM_DEBUG({do { } while (false) | ||||
12478 | dbgs() << "Res: " << *Res << "\n";do { } while (false) | ||||
12479 | dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";do { } while (false) | ||||
12480 | dbgs() << "Res divided by Sizes[i]:\n";do { } while (false) | ||||
12481 | dbgs() << "Quotient: " << *Q << "\n";do { } while (false) | ||||
12482 | dbgs() << "Remainder: " << *R << "\n";do { } while (false) | ||||
12483 | })do { } while (false); | ||||
12484 | |||||
12485 | Res = Q; | ||||
12486 | |||||
12487 | // Do not record the last subscript corresponding to the size of elements in | ||||
12488 | // the array. | ||||
12489 | if (i == Last) { | ||||
12490 | |||||
12491 | // Bail out if the remainder is too complex. | ||||
12492 | if (isa<SCEVAddRecExpr>(R)) { | ||||
12493 | Subscripts.clear(); | ||||
12494 | Sizes.clear(); | ||||
12495 | return; | ||||
12496 | } | ||||
12497 | |||||
12498 | continue; | ||||
12499 | } | ||||
12500 | |||||
12501 | // Record the access function for the current subscript. | ||||
12502 | Subscripts.push_back(R); | ||||
12503 | } | ||||
12504 | |||||
12505 | // Also push in last position the remainder of the last division: it will be | ||||
12506 | // the access function of the innermost dimension. | ||||
12507 | Subscripts.push_back(Res); | ||||
12508 | |||||
12509 | std::reverse(Subscripts.begin(), Subscripts.end()); | ||||
12510 | |||||
12511 | LLVM_DEBUG({do { } while (false) | ||||
12512 | dbgs() << "Subscripts:\n";do { } while (false) | ||||
12513 | for (const SCEV *S : Subscripts)do { } while (false) | ||||
12514 | dbgs() << *S << "\n";do { } while (false) | ||||
12515 | })do { } while (false); | ||||
12516 | } | ||||
12517 | |||||
12518 | /// Splits the SCEV into two vectors of SCEVs representing the subscripts and | ||||
12519 | /// sizes of an array access. Returns the remainder of the delinearization that | ||||
12520 | /// is the offset start of the array. The SCEV->delinearize algorithm computes | ||||
12521 | /// the multiples of SCEV coefficients: that is a pattern matching of sub | ||||
12522 | /// expressions in the stride and base of a SCEV corresponding to the | ||||
12523 | /// computation of a GCD (greatest common divisor) of base and stride. When | ||||
12524 | /// SCEV->delinearize fails, it returns the SCEV unchanged. | ||||
12525 | /// | ||||
12526 | /// For example: when analyzing the memory access A[i][j][k] in this loop nest | ||||
12527 | /// | ||||
12528 | /// void foo(long n, long m, long o, double A[n][m][o]) { | ||||
12529 | /// | ||||
12530 | /// for (long i = 0; i < n; i++) | ||||
12531 | /// for (long j = 0; j < m; j++) | ||||
12532 | /// for (long k = 0; k < o; k++) | ||||
12533 | /// A[i][j][k] = 1.0; | ||||
12534 | /// } | ||||
12535 | /// | ||||
12536 | /// the delinearization input is the following AddRec SCEV: | ||||
12537 | /// | ||||
12538 | /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> | ||||
12539 | /// | ||||
12540 | /// From this SCEV, we are able to say that the base offset of the access is %A | ||||
12541 | /// because it appears as an offset that does not divide any of the strides in | ||||
12542 | /// the loops: | ||||
12543 | /// | ||||
12544 | /// CHECK: Base offset: %A | ||||
12545 | /// | ||||
12546 | /// and then SCEV->delinearize determines the size of some of the dimensions of | ||||
12547 | /// the array as these are the multiples by which the strides are happening: | ||||
12548 | /// | ||||
12549 | /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. | ||||
12550 | /// | ||||
12551 | /// Note that the outermost dimension remains of UnknownSize because there are | ||||
12552 | /// no strides that would help identifying the size of the last dimension: when | ||||
12553 | /// the array has been statically allocated, one could compute the size of that | ||||
12554 | /// dimension by dividing the overall size of the array by the size of the known | ||||
12555 | /// dimensions: %m * %o * 8. | ||||
12556 | /// | ||||
12557 | /// Finally delinearize provides the access functions for the array reference | ||||
12558 | /// that does correspond to A[i][j][k] of the above C testcase: | ||||
12559 | /// | ||||
12560 | /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] | ||||
12561 | /// | ||||
12562 | /// The testcases are checking the output of a function pass: | ||||
12563 | /// DelinearizationPass that walks through all loads and stores of a function | ||||
12564 | /// asking for the SCEV of the memory access with respect to all enclosing | ||||
12565 | /// loops, calling SCEV->delinearize on that and printing the results. | ||||
12566 | void ScalarEvolution::delinearize(const SCEV *Expr, | ||||
12567 | SmallVectorImpl<const SCEV *> &Subscripts, | ||||
12568 | SmallVectorImpl<const SCEV *> &Sizes, | ||||
12569 | const SCEV *ElementSize) { | ||||
12570 | // First step: collect parametric terms. | ||||
12571 | SmallVector<const SCEV *, 4> Terms; | ||||
12572 | collectParametricTerms(Expr, Terms); | ||||
12573 | |||||
12574 | if (Terms.empty()) | ||||
12575 | return; | ||||
12576 | |||||
12577 | // Second step: find subscript sizes. | ||||
12578 | findArrayDimensions(Terms, Sizes, ElementSize); | ||||
12579 | |||||
12580 | if (Sizes.empty()) | ||||
12581 | return; | ||||
12582 | |||||
12583 | // Third step: compute the access functions for each subscript. | ||||
12584 | computeAccessFunctions(Expr, Subscripts, Sizes); | ||||
12585 | |||||
12586 | if (Subscripts.empty()) | ||||
12587 | return; | ||||
12588 | |||||
12589 | LLVM_DEBUG({do { } while (false) | ||||
12590 | dbgs() << "succeeded to delinearize " << *Expr << "\n";do { } while (false) | ||||
12591 | dbgs() << "ArrayDecl[UnknownSize]";do { } while (false) | ||||
12592 | for (const SCEV *S : Sizes)do { } while (false) | ||||
12593 | dbgs() << "[" << *S << "]";do { } while (false) | ||||
12594 | |||||
12595 | dbgs() << "\nArrayRef";do { } while (false) | ||||
12596 | for (const SCEV *S : Subscripts)do { } while (false) | ||||
12597 | dbgs() << "[" << *S << "]";do { } while (false) | ||||
12598 | dbgs() << "\n";do { } while (false) | ||||
12599 | })do { } while (false); | ||||
12600 | } | ||||
12601 | |||||
12602 | bool ScalarEvolution::getIndexExpressionsFromGEP( | ||||
12603 | const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, | ||||
12604 | SmallVectorImpl<int> &Sizes) { | ||||
12605 | assert(Subscripts.empty() && Sizes.empty() &&((void)0) | ||||
12606 | "Expected output lists to be empty on entry to this function.")((void)0); | ||||
12607 | assert(GEP && "getIndexExpressionsFromGEP called with a null GEP")((void)0); | ||||
12608 | Type *Ty = nullptr; | ||||
12609 | bool DroppedFirstDim = false; | ||||
12610 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) { | ||||
12611 | const SCEV *Expr = getSCEV(GEP->getOperand(i)); | ||||
12612 | if (i == 1) { | ||||
12613 | Ty = GEP->getSourceElementType(); | ||||
12614 | if (auto *Const = dyn_cast<SCEVConstant>(Expr)) | ||||
12615 | if (Const->getValue()->isZero()) { | ||||
12616 | DroppedFirstDim = true; | ||||
12617 | continue; | ||||
12618 | } | ||||
12619 | Subscripts.push_back(Expr); | ||||
12620 | continue; | ||||
12621 | } | ||||
12622 | |||||
12623 | auto *ArrayTy = dyn_cast<ArrayType>(Ty); | ||||
12624 | if (!ArrayTy) { | ||||
12625 | Subscripts.clear(); | ||||
12626 | Sizes.clear(); | ||||
12627 | return false; | ||||
12628 | } | ||||
12629 | |||||
12630 | Subscripts.push_back(Expr); | ||||
12631 | if (!(DroppedFirstDim && i == 2)) | ||||
12632 | Sizes.push_back(ArrayTy->getNumElements()); | ||||
12633 | |||||
12634 | Ty = ArrayTy->getElementType(); | ||||
12635 | } | ||||
12636 | return !Subscripts.empty(); | ||||
12637 | } | ||||
12638 | |||||
12639 | //===----------------------------------------------------------------------===// | ||||
12640 | // SCEVCallbackVH Class Implementation | ||||
12641 | //===----------------------------------------------------------------------===// | ||||
12642 | |||||
12643 | void ScalarEvolution::SCEVCallbackVH::deleted() { | ||||
12644 | assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!")((void)0); | ||||
12645 | if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) | ||||
12646 | SE->ConstantEvolutionLoopExitValue.erase(PN); | ||||
12647 | SE->eraseValueFromMap(getValPtr()); | ||||
12648 | // this now dangles! | ||||
12649 | } | ||||
12650 | |||||
12651 | void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { | ||||
12652 | assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!")((void)0); | ||||
12653 | |||||
12654 | // Forget all the expressions associated with users of the old value, | ||||
12655 | // so that future queries will recompute the expressions using the new | ||||
12656 | // value. | ||||
12657 | Value *Old = getValPtr(); | ||||
12658 | SmallVector<User *, 16> Worklist(Old->users()); | ||||
12659 | SmallPtrSet<User *, 8> Visited; | ||||
12660 | while (!Worklist.empty()) { | ||||
12661 | User *U = Worklist.pop_back_val(); | ||||
12662 | // Deleting the Old value will cause this to dangle. Postpone | ||||
12663 | // that until everything else is done. | ||||
12664 | if (U == Old) | ||||
12665 | continue; | ||||
12666 | if (!Visited.insert(U).second) | ||||
12667 | continue; | ||||
12668 | if (PHINode *PN = dyn_cast<PHINode>(U)) | ||||
12669 | SE->ConstantEvolutionLoopExitValue.erase(PN); | ||||
12670 | SE->eraseValueFromMap(U); | ||||
12671 | llvm::append_range(Worklist, U->users()); | ||||
12672 | } | ||||
12673 | // Delete the Old value. | ||||
12674 | if (PHINode *PN = dyn_cast<PHINode>(Old)) | ||||
12675 | SE->ConstantEvolutionLoopExitValue.erase(PN); | ||||
12676 | SE->eraseValueFromMap(Old); | ||||
12677 | // this now dangles! | ||||
12678 | } | ||||
12679 | |||||
12680 | ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) | ||||
12681 | : CallbackVH(V), SE(se) {} | ||||
12682 | |||||
12683 | //===----------------------------------------------------------------------===// | ||||
12684 | // ScalarEvolution Class Implementation | ||||
12685 | //===----------------------------------------------------------------------===// | ||||
12686 | |||||
12687 | ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, | ||||
12688 | AssumptionCache &AC, DominatorTree &DT, | ||||
12689 | LoopInfo &LI) | ||||
12690 | : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), | ||||
12691 | CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), | ||||
12692 | LoopDispositions(64), BlockDispositions(64) { | ||||
12693 | // To use guards for proving predicates, we need to scan every instruction in | ||||
12694 | // relevant basic blocks, and not just terminators. Doing this is a waste of | ||||
12695 | // time if the IR does not actually contain any calls to | ||||
12696 | // @llvm.experimental.guard, so do a quick check and remember this beforehand. | ||||
12697 | // | ||||
12698 | // This pessimizes the case where a pass that preserves ScalarEvolution wants | ||||
12699 | // to _add_ guards to the module when there weren't any before, and wants | ||||
12700 | // ScalarEvolution to optimize based on those guards. For now we prefer to be | ||||
12701 | // efficient in lieu of being smart in that rather obscure case. | ||||
12702 | |||||
12703 | auto *GuardDecl = F.getParent()->getFunction( | ||||
12704 | Intrinsic::getName(Intrinsic::experimental_guard)); | ||||
12705 | HasGuards = GuardDecl && !GuardDecl->use_empty(); | ||||
12706 | } | ||||
12707 | |||||
12708 | ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) | ||||
12709 | : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), | ||||
12710 | LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), | ||||
12711 | ValueExprMap(std::move(Arg.ValueExprMap)), | ||||
12712 | PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), | ||||
12713 | PendingPhiRanges(std::move(Arg.PendingPhiRanges)), | ||||
12714 | PendingMerges(std::move(Arg.PendingMerges)), | ||||
12715 | MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), | ||||
12716 | BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), | ||||
12717 | PredicatedBackedgeTakenCounts( | ||||
12718 | std::move(Arg.PredicatedBackedgeTakenCounts)), | ||||
12719 | ConstantEvolutionLoopExitValue( | ||||
12720 | std::move(Arg.ConstantEvolutionLoopExitValue)), | ||||
12721 | ValuesAtScopes(std::move(Arg.ValuesAtScopes)), | ||||
12722 | LoopDispositions(std::move(Arg.LoopDispositions)), | ||||
12723 | LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), | ||||
12724 | BlockDispositions(std::move(Arg.BlockDispositions)), | ||||
12725 | UnsignedRanges(std::move(Arg.UnsignedRanges)), | ||||
12726 | SignedRanges(std::move(Arg.SignedRanges)), | ||||
12727 | UniqueSCEVs(std::move(Arg.UniqueSCEVs)), | ||||
12728 | UniquePreds(std::move(Arg.UniquePreds)), | ||||
12729 | SCEVAllocator(std::move(Arg.SCEVAllocator)), | ||||
12730 | LoopUsers(std::move(Arg.LoopUsers)), | ||||
12731 | PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), | ||||
12732 | FirstUnknown(Arg.FirstUnknown) { | ||||
12733 | Arg.FirstUnknown = nullptr; | ||||
12734 | } | ||||
12735 | |||||
12736 | ScalarEvolution::~ScalarEvolution() { | ||||
12737 | // Iterate through all the SCEVUnknown instances and call their | ||||
12738 | // destructors, so that they release their references to their values. | ||||
12739 | for (SCEVUnknown *U = FirstUnknown; U;) { | ||||
12740 | SCEVUnknown *Tmp = U; | ||||
12741 | U = U->Next; | ||||
12742 | Tmp->~SCEVUnknown(); | ||||
12743 | } | ||||
12744 | FirstUnknown = nullptr; | ||||
12745 | |||||
12746 | ExprValueMap.clear(); | ||||
12747 | ValueExprMap.clear(); | ||||
12748 | HasRecMap.clear(); | ||||
12749 | BackedgeTakenCounts.clear(); | ||||
12750 | PredicatedBackedgeTakenCounts.clear(); | ||||
12751 | |||||
12752 | assert(PendingLoopPredicates.empty() && "isImpliedCond garbage")((void)0); | ||||
12753 | assert(PendingPhiRanges.empty() && "getRangeRef garbage")((void)0); | ||||
12754 | assert(PendingMerges.empty() && "isImpliedViaMerge garbage")((void)0); | ||||
12755 | assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!")((void)0); | ||||
12756 | assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!")((void)0); | ||||
12757 | } | ||||
12758 | |||||
12759 | bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { | ||||
12760 | return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); | ||||
12761 | } | ||||
12762 | |||||
12763 | static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, | ||||
12764 | const Loop *L) { | ||||
12765 | // Print all inner loops first | ||||
12766 | for (Loop *I : *L) | ||||
12767 | PrintLoopInfo(OS, SE, I); | ||||
12768 | |||||
12769 | OS << "Loop "; | ||||
12770 | L->getHeader()->printAsOperand(OS, /*PrintType=*/false); | ||||
12771 | OS << ": "; | ||||
12772 | |||||
12773 | SmallVector<BasicBlock *, 8> ExitingBlocks; | ||||
12774 | L->getExitingBlocks(ExitingBlocks); | ||||
12775 | if (ExitingBlocks.size() != 1) | ||||
12776 | OS << "<multiple exits> "; | ||||
12777 | |||||
12778 | if (SE->hasLoopInvariantBackedgeTakenCount(L)) | ||||
12779 | OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; | ||||
12780 | else | ||||
12781 | OS << "Unpredictable backedge-taken count.\n"; | ||||
12782 | |||||
12783 | if (ExitingBlocks.size() > 1) | ||||
12784 | for (BasicBlock *ExitingBlock : ExitingBlocks) { | ||||
12785 | OS << " exit count for " << ExitingBlock->getName() << ": " | ||||
12786 | << *SE->getExitCount(L, ExitingBlock) << "\n"; | ||||
12787 | } | ||||
12788 | |||||
12789 | OS << "Loop "; | ||||
12790 | L->getHeader()->printAsOperand(OS, /*PrintType=*/false); | ||||
12791 | OS << ": "; | ||||
12792 | |||||
12793 | if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { | ||||
12794 | OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); | ||||
12795 | if (SE->isBackedgeTakenCountMaxOrZero(L)) | ||||
12796 | OS << ", actual taken count either this or zero."; | ||||
12797 | } else { | ||||
12798 | OS << "Unpredictable max backedge-taken count. "; | ||||
12799 | } | ||||
12800 | |||||
12801 | OS << "\n" | ||||
12802 | "Loop "; | ||||
12803 | L->getHeader()->printAsOperand(OS, /*PrintType=*/false); | ||||
12804 | OS << ": "; | ||||
12805 | |||||
12806 | SCEVUnionPredicate Pred; | ||||
12807 | auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); | ||||
12808 | if (!isa<SCEVCouldNotCompute>(PBT)) { | ||||
12809 | OS << "Predicated backedge-taken count is " << *PBT << "\n"; | ||||
12810 | OS << " Predicates:\n"; | ||||
12811 | Pred.print(OS, 4); | ||||
12812 | } else { | ||||
12813 | OS << "Unpredictable predicated backedge-taken count. "; | ||||
12814 | } | ||||
12815 | OS << "\n"; | ||||
12816 | |||||
12817 | if (SE->hasLoopInvariantBackedgeTakenCount(L)) { | ||||
12818 | OS << "Loop "; | ||||
12819 | L->getHeader()->printAsOperand(OS, /*PrintType=*/false); | ||||
12820 | OS << ": "; | ||||
12821 | OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; | ||||
12822 | } | ||||
12823 | } | ||||
12824 | |||||
12825 | static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { | ||||
12826 | switch (LD) { | ||||
12827 | case ScalarEvolution::LoopVariant: | ||||
12828 | return "Variant"; | ||||
12829 | case ScalarEvolution::LoopInvariant: | ||||
12830 | return "Invariant"; | ||||
12831 | case ScalarEvolution::LoopComputable: | ||||
12832 | return "Computable"; | ||||
12833 | } | ||||
12834 | llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!")__builtin_unreachable(); | ||||
12835 | } | ||||
12836 | |||||
12837 | void ScalarEvolution::print(raw_ostream &OS) const { | ||||
12838 | // ScalarEvolution's implementation of the print method is to print | ||||
12839 | // out SCEV values of all instructions that are interesting. Doing | ||||
12840 | // this potentially causes it to create new SCEV objects though, | ||||
12841 | // which technically conflicts with the const qualifier. This isn't | ||||
12842 | // observable from outside the class though, so casting away the | ||||
12843 | // const isn't dangerous. | ||||
12844 | ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); | ||||
12845 | |||||
12846 | if (ClassifyExpressions) { | ||||
12847 | OS << "Classifying expressions for: "; | ||||
12848 | F.printAsOperand(OS, /*PrintType=*/false); | ||||
12849 | OS << "\n"; | ||||
12850 | for (Instruction &I : instructions(F)) | ||||
12851 | if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { | ||||
12852 | OS << I << '\n'; | ||||
12853 | OS << " --> "; | ||||
12854 | const SCEV *SV = SE.getSCEV(&I); | ||||
12855 | SV->print(OS); | ||||
12856 | if (!isa<SCEVCouldNotCompute>(SV)) { | ||||
12857 | OS << " U: "; | ||||
12858 | SE.getUnsignedRange(SV).print(OS); | ||||
12859 | OS << " S: "; | ||||
12860 | SE.getSignedRange(SV).print(OS); | ||||
12861 | } | ||||
12862 | |||||
12863 | const Loop *L = LI.getLoopFor(I.getParent()); | ||||
12864 | |||||
12865 | const SCEV *AtUse = SE.getSCEVAtScope(SV, L); | ||||
12866 | if (AtUse != SV) { | ||||
12867 | OS << " --> "; | ||||
12868 | AtUse->print(OS); | ||||
12869 | if (!isa<SCEVCouldNotCompute>(AtUse)) { | ||||
12870 | OS << " U: "; | ||||
12871 | SE.getUnsignedRange(AtUse).print(OS); | ||||
12872 | OS << " S: "; | ||||
12873 | SE.getSignedRange(AtUse).print(OS); | ||||
12874 | } | ||||
12875 | } | ||||
12876 | |||||
12877 | if (L) { | ||||
12878 | OS << "\t\t" "Exits: "; | ||||
12879 | const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); | ||||
12880 | if (!SE.isLoopInvariant(ExitValue, L)) { | ||||
12881 | OS << "<<Unknown>>"; | ||||
12882 | } else { | ||||
12883 | OS << *ExitValue; | ||||
12884 | } | ||||
12885 | |||||
12886 | bool First = true; | ||||
12887 | for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { | ||||
12888 | if (First) { | ||||
12889 | OS << "\t\t" "LoopDispositions: { "; | ||||
12890 | First = false; | ||||
12891 | } else { | ||||
12892 | OS << ", "; | ||||
12893 | } | ||||
12894 | |||||
12895 | Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); | ||||
12896 | OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); | ||||
12897 | } | ||||
12898 | |||||
12899 | for (auto *InnerL : depth_first(L)) { | ||||
12900 | if (InnerL == L) | ||||
12901 | continue; | ||||
12902 | if (First) { | ||||
12903 | OS << "\t\t" "LoopDispositions: { "; | ||||
12904 | First = false; | ||||
12905 | } else { | ||||
12906 | OS << ", "; | ||||
12907 | } | ||||
12908 | |||||
12909 | InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); | ||||
12910 | OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); | ||||
12911 | } | ||||
12912 | |||||
12913 | OS << " }"; | ||||
12914 | } | ||||
12915 | |||||
12916 | OS << "\n"; | ||||
12917 | } | ||||
12918 | } | ||||
12919 | |||||
12920 | OS << "Determining loop execution counts for: "; | ||||
12921 | F.printAsOperand(OS, /*PrintType=*/false); | ||||
12922 | OS << "\n"; | ||||
12923 | for (Loop *I : LI) | ||||
12924 | PrintLoopInfo(OS, &SE, I); | ||||
12925 | } | ||||
12926 | |||||
12927 | ScalarEvolution::LoopDisposition | ||||
12928 | ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { | ||||
12929 | auto &Values = LoopDispositions[S]; | ||||
12930 | for (auto &V : Values) { | ||||
12931 | if (V.getPointer() == L) | ||||
12932 | return V.getInt(); | ||||
12933 | } | ||||
12934 | Values.emplace_back(L, LoopVariant); | ||||
12935 | LoopDisposition D = computeLoopDisposition(S, L); | ||||
12936 | auto &Values2 = LoopDispositions[S]; | ||||
12937 | for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { | ||||
12938 | if (V.getPointer() == L) { | ||||
12939 | V.setInt(D); | ||||
12940 | break; | ||||
12941 | } | ||||
12942 | } | ||||
12943 | return D; | ||||
12944 | } | ||||
12945 | |||||
12946 | ScalarEvolution::LoopDisposition | ||||
12947 | ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { | ||||
12948 | switch (S->getSCEVType()) { | ||||
12949 | case scConstant: | ||||
12950 | return LoopInvariant; | ||||
12951 | case scPtrToInt: | ||||
12952 | case scTruncate: | ||||
12953 | case scZeroExtend: | ||||
12954 | case scSignExtend: | ||||
12955 | return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); | ||||
12956 | case scAddRecExpr: { | ||||
12957 | const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); | ||||
12958 | |||||
12959 | // If L is the addrec's loop, it's computable. | ||||
12960 | if (AR->getLoop() == L) | ||||
12961 | return LoopComputable; | ||||
12962 | |||||
12963 | // Add recurrences are never invariant in the function-body (null loop). | ||||
12964 | if (!L) | ||||
12965 | return LoopVariant; | ||||
12966 | |||||
12967 | // Everything that is not defined at loop entry is variant. | ||||
12968 | if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) | ||||
12969 | return LoopVariant; | ||||
12970 | assert(!L->contains(AR->getLoop()) && "Containing loop's header does not"((void)0) | ||||
12971 | " dominate the contained loop's header?")((void)0); | ||||
12972 | |||||
12973 | // This recurrence is invariant w.r.t. L if AR's loop contains L. | ||||
12974 | if (AR->getLoop()->contains(L)) | ||||
12975 | return LoopInvariant; | ||||
12976 | |||||
12977 | // This recurrence is variant w.r.t. L if any of its operands | ||||
12978 | // are variant. | ||||
12979 | for (auto *Op : AR->operands()) | ||||
12980 | if (!isLoopInvariant(Op, L)) | ||||
12981 | return LoopVariant; | ||||
12982 | |||||
12983 | // Otherwise it's loop-invariant. | ||||
12984 | return LoopInvariant; | ||||
12985 | } | ||||
12986 | case scAddExpr: | ||||
12987 | case scMulExpr: | ||||
12988 | case scUMaxExpr: | ||||
12989 | case scSMaxExpr: | ||||
12990 | case scUMinExpr: | ||||
12991 | case scSMinExpr: { | ||||
12992 | bool HasVarying = false; | ||||
12993 | for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { | ||||
12994 | LoopDisposition D = getLoopDisposition(Op, L); | ||||
12995 | if (D == LoopVariant) | ||||
12996 | return LoopVariant; | ||||
12997 | if (D == LoopComputable) | ||||
12998 | HasVarying = true; | ||||
12999 | } | ||||
13000 | return HasVarying ? LoopComputable : LoopInvariant; | ||||
13001 | } | ||||
13002 | case scUDivExpr: { | ||||
13003 | const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); | ||||
13004 | LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); | ||||
13005 | if (LD == LoopVariant) | ||||
13006 | return LoopVariant; | ||||
13007 | LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); | ||||
13008 | if (RD == LoopVariant) | ||||
13009 | return LoopVariant; | ||||
13010 | return (LD == LoopInvariant && RD == LoopInvariant) ? | ||||
13011 | LoopInvariant : LoopComputable; | ||||
13012 | } | ||||
13013 | case scUnknown: | ||||
13014 | // All non-instruction values are loop invariant. All instructions are loop | ||||
13015 | // invariant if they are not contained in the specified loop. | ||||
13016 | // Instructions are never considered invariant in the function body | ||||
13017 | // (null loop) because they are defined within the "loop". | ||||
13018 | if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) | ||||
13019 | return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; | ||||
13020 | return LoopInvariant; | ||||
13021 | case scCouldNotCompute: | ||||
13022 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable(); | ||||
13023 | } | ||||
13024 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | ||||
13025 | } | ||||
13026 | |||||
13027 | bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { | ||||
13028 | return getLoopDisposition(S, L) == LoopInvariant; | ||||
13029 | } | ||||
13030 | |||||
13031 | bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { | ||||
13032 | return getLoopDisposition(S, L) == LoopComputable; | ||||
13033 | } | ||||
13034 | |||||
13035 | ScalarEvolution::BlockDisposition | ||||
13036 | ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { | ||||
13037 | auto &Values = BlockDispositions[S]; | ||||
13038 | for (auto &V : Values) { | ||||
13039 | if (V.getPointer() == BB) | ||||
13040 | return V.getInt(); | ||||
13041 | } | ||||
13042 | Values.emplace_back(BB, DoesNotDominateBlock); | ||||
13043 | BlockDisposition D = computeBlockDisposition(S, BB); | ||||
13044 | auto &Values2 = BlockDispositions[S]; | ||||
13045 | for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { | ||||
13046 | if (V.getPointer() == BB) { | ||||
13047 | V.setInt(D); | ||||
13048 | break; | ||||
13049 | } | ||||
13050 | } | ||||
13051 | return D; | ||||
13052 | } | ||||
13053 | |||||
13054 | ScalarEvolution::BlockDisposition | ||||
13055 | ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { | ||||
13056 | switch (S->getSCEVType()) { | ||||
13057 | case scConstant: | ||||
13058 | return ProperlyDominatesBlock; | ||||
13059 | case scPtrToInt: | ||||
13060 | case scTruncate: | ||||
13061 | case scZeroExtend: | ||||
13062 | case scSignExtend: | ||||
13063 | return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); | ||||
13064 | case scAddRecExpr: { | ||||
13065 | // This uses a "dominates" query instead of "properly dominates" query | ||||
13066 | // to test for proper dominance too, because the instruction which | ||||
13067 | // produces the addrec's value is a PHI, and a PHI effectively properly | ||||
13068 | // dominates its entire containing block. | ||||
13069 | const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); | ||||
13070 | if (!DT.dominates(AR->getLoop()->getHeader(), BB)) | ||||
13071 | return DoesNotDominateBlock; | ||||
13072 | |||||
13073 | // Fall through into SCEVNAryExpr handling. | ||||
13074 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
13075 | } | ||||
13076 | case scAddExpr: | ||||
13077 | case scMulExpr: | ||||
13078 | case scUMaxExpr: | ||||
13079 | case scSMaxExpr: | ||||
13080 | case scUMinExpr: | ||||
13081 | case scSMinExpr: { | ||||
13082 | const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); | ||||
13083 | bool Proper = true; | ||||
13084 | for (const SCEV *NAryOp : NAry->operands()) { | ||||
13085 | BlockDisposition D = getBlockDisposition(NAryOp, BB); | ||||
13086 | if (D == DoesNotDominateBlock) | ||||
13087 | return DoesNotDominateBlock; | ||||
13088 | if (D == DominatesBlock) | ||||
13089 | Proper = false; | ||||
13090 | } | ||||
13091 | return Proper ? ProperlyDominatesBlock : DominatesBlock; | ||||
13092 | } | ||||
13093 | case scUDivExpr: { | ||||
13094 | const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); | ||||
13095 | const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); | ||||
13096 | BlockDisposition LD = getBlockDisposition(LHS, BB); | ||||
13097 | if (LD == DoesNotDominateBlock) | ||||
13098 | return DoesNotDominateBlock; | ||||
13099 | BlockDisposition RD = getBlockDisposition(RHS, BB); | ||||
13100 | if (RD == DoesNotDominateBlock) | ||||
13101 | return DoesNotDominateBlock; | ||||
13102 | return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? | ||||
13103 | ProperlyDominatesBlock : DominatesBlock; | ||||
13104 | } | ||||
13105 | case scUnknown: | ||||
13106 | if (Instruction *I = | ||||
13107 | dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { | ||||
13108 | if (I->getParent() == BB) | ||||
13109 | return DominatesBlock; | ||||
13110 | if (DT.properlyDominates(I->getParent(), BB)) | ||||
13111 | return ProperlyDominatesBlock; | ||||
13112 | return DoesNotDominateBlock; | ||||
13113 | } | ||||
13114 | return ProperlyDominatesBlock; | ||||
13115 | case scCouldNotCompute: | ||||
13116 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable(); | ||||
13117 | } | ||||
13118 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | ||||
13119 | } | ||||
13120 | |||||
13121 | bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { | ||||
13122 | return getBlockDisposition(S, BB) >= DominatesBlock; | ||||
13123 | } | ||||
13124 | |||||
13125 | bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { | ||||
13126 | return getBlockDisposition(S, BB) == ProperlyDominatesBlock; | ||||
13127 | } | ||||
13128 | |||||
13129 | bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { | ||||
13130 | return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); | ||||
13131 | } | ||||
13132 | |||||
13133 | void | ||||
13134 | ScalarEvolution::forgetMemoizedResults(const SCEV *S) { | ||||
13135 | ValuesAtScopes.erase(S); | ||||
13136 | LoopDispositions.erase(S); | ||||
13137 | BlockDispositions.erase(S); | ||||
13138 | UnsignedRanges.erase(S); | ||||
13139 | SignedRanges.erase(S); | ||||
13140 | ExprValueMap.erase(S); | ||||
13141 | HasRecMap.erase(S); | ||||
13142 | MinTrailingZerosCache.erase(S); | ||||
13143 | |||||
13144 | for (auto I = PredicatedSCEVRewrites.begin(); | ||||
13145 | I != PredicatedSCEVRewrites.end();) { | ||||
13146 | std::pair<const SCEV *, const Loop *> Entry = I->first; | ||||
13147 | if (Entry.first == S) | ||||
13148 | PredicatedSCEVRewrites.erase(I++); | ||||
13149 | else | ||||
13150 | ++I; | ||||
13151 | } | ||||
13152 | |||||
13153 | auto RemoveSCEVFromBackedgeMap = | ||||
13154 | [S](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { | ||||
13155 | for (auto I = Map.begin(), E = Map.end(); I != E;) { | ||||
13156 | BackedgeTakenInfo &BEInfo = I->second; | ||||
13157 | if (BEInfo.hasOperand(S)) | ||||
13158 | Map.erase(I++); | ||||
13159 | else | ||||
13160 | ++I; | ||||
13161 | } | ||||
13162 | }; | ||||
13163 | |||||
13164 | RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); | ||||
13165 | RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); | ||||
13166 | } | ||||
13167 | |||||
13168 | void | ||||
13169 | ScalarEvolution::getUsedLoops(const SCEV *S, | ||||
13170 | SmallPtrSetImpl<const Loop *> &LoopsUsed) { | ||||
13171 | struct FindUsedLoops { | ||||
13172 | FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) | ||||
13173 | : LoopsUsed(LoopsUsed) {} | ||||
13174 | SmallPtrSetImpl<const Loop *> &LoopsUsed; | ||||
13175 | bool follow(const SCEV *S) { | ||||
13176 | if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) | ||||
13177 | LoopsUsed.insert(AR->getLoop()); | ||||
13178 | return true; | ||||
13179 | } | ||||
13180 | |||||
13181 | bool isDone() const { return false; } | ||||
13182 | }; | ||||
13183 | |||||
13184 | FindUsedLoops F(LoopsUsed); | ||||
13185 | SCEVTraversal<FindUsedLoops>(F).visitAll(S); | ||||
13186 | } | ||||
13187 | |||||
13188 | void ScalarEvolution::addToLoopUseLists(const SCEV *S) { | ||||
13189 | SmallPtrSet<const Loop *, 8> LoopsUsed; | ||||
13190 | getUsedLoops(S, LoopsUsed); | ||||
13191 | for (auto *L : LoopsUsed) | ||||
13192 | LoopUsers[L].push_back(S); | ||||
13193 | } | ||||
13194 | |||||
13195 | void ScalarEvolution::verify() const { | ||||
13196 | ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); | ||||
13197 | ScalarEvolution SE2(F, TLI, AC, DT, LI); | ||||
13198 | |||||
13199 | SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); | ||||
13200 | |||||
13201 | // Map's SCEV expressions from one ScalarEvolution "universe" to another. | ||||
13202 | struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { | ||||
13203 | SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} | ||||
13204 | |||||
13205 | const SCEV *visitConstant(const SCEVConstant *Constant) { | ||||
13206 | return SE.getConstant(Constant->getAPInt()); | ||||
13207 | } | ||||
13208 | |||||
13209 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | ||||
13210 | return SE.getUnknown(Expr->getValue()); | ||||
13211 | } | ||||
13212 | |||||
13213 | const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { | ||||
13214 | return SE.getCouldNotCompute(); | ||||
13215 | } | ||||
13216 | }; | ||||
13217 | |||||
13218 | SCEVMapper SCM(SE2); | ||||
13219 | |||||
13220 | while (!LoopStack.empty()) { | ||||
13221 | auto *L = LoopStack.pop_back_val(); | ||||
13222 | llvm::append_range(LoopStack, *L); | ||||
13223 | |||||
13224 | auto *CurBECount = SCM.visit( | ||||
13225 | const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); | ||||
13226 | auto *NewBECount = SE2.getBackedgeTakenCount(L); | ||||
13227 | |||||
13228 | if (CurBECount == SE2.getCouldNotCompute() || | ||||
13229 | NewBECount == SE2.getCouldNotCompute()) { | ||||
13230 | // NB! This situation is legal, but is very suspicious -- whatever pass | ||||
13231 | // change the loop to make a trip count go from could not compute to | ||||
13232 | // computable or vice-versa *should have* invalidated SCEV. However, we | ||||
13233 | // choose not to assert here (for now) since we don't want false | ||||
13234 | // positives. | ||||
13235 | continue; | ||||
13236 | } | ||||
13237 | |||||
13238 | if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { | ||||
13239 | // SCEV treats "undef" as an unknown but consistent value (i.e. it does | ||||
13240 | // not propagate undef aggressively). This means we can (and do) fail | ||||
13241 | // verification in cases where a transform makes the trip count of a loop | ||||
13242 | // go from "undef" to "undef+1" (say). The transform is fine, since in | ||||
13243 | // both cases the loop iterates "undef" times, but SCEV thinks we | ||||
13244 | // increased the trip count of the loop by 1 incorrectly. | ||||
13245 | continue; | ||||
13246 | } | ||||
13247 | |||||
13248 | if (SE.getTypeSizeInBits(CurBECount->getType()) > | ||||
13249 | SE.getTypeSizeInBits(NewBECount->getType())) | ||||
13250 | NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); | ||||
13251 | else if (SE.getTypeSizeInBits(CurBECount->getType()) < | ||||
13252 | SE.getTypeSizeInBits(NewBECount->getType())) | ||||
13253 | CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); | ||||
13254 | |||||
13255 | const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); | ||||
13256 | |||||
13257 | // Unless VerifySCEVStrict is set, we only compare constant deltas. | ||||
13258 | if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { | ||||
13259 | dbgs() << "Trip Count for " << *L << " Changed!\n"; | ||||
13260 | dbgs() << "Old: " << *CurBECount << "\n"; | ||||
13261 | dbgs() << "New: " << *NewBECount << "\n"; | ||||
13262 | dbgs() << "Delta: " << *Delta << "\n"; | ||||
13263 | std::abort(); | ||||
13264 | } | ||||
13265 | } | ||||
13266 | |||||
13267 | // Collect all valid loops currently in LoopInfo. | ||||
13268 | SmallPtrSet<Loop *, 32> ValidLoops; | ||||
13269 | SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); | ||||
13270 | while (!Worklist.empty()) { | ||||
13271 | Loop *L = Worklist.pop_back_val(); | ||||
13272 | if (ValidLoops.contains(L)) | ||||
13273 | continue; | ||||
13274 | ValidLoops.insert(L); | ||||
13275 | Worklist.append(L->begin(), L->end()); | ||||
13276 | } | ||||
13277 | // Check for SCEV expressions referencing invalid/deleted loops. | ||||
13278 | for (auto &KV : ValueExprMap) { | ||||
13279 | auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second); | ||||
13280 | if (!AR) | ||||
13281 | continue; | ||||
13282 | assert(ValidLoops.contains(AR->getLoop()) &&((void)0) | ||||
13283 | "AddRec references invalid loop")((void)0); | ||||
13284 | } | ||||
13285 | } | ||||
13286 | |||||
13287 | bool ScalarEvolution::invalidate( | ||||
13288 | Function &F, const PreservedAnalyses &PA, | ||||
13289 | FunctionAnalysisManager::Invalidator &Inv) { | ||||
13290 | // Invalidate the ScalarEvolution object whenever it isn't preserved or one | ||||
13291 | // of its dependencies is invalidated. | ||||
13292 | auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); | ||||
13293 | return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || | ||||
13294 | Inv.invalidate<AssumptionAnalysis>(F, PA) || | ||||
13295 | Inv.invalidate<DominatorTreeAnalysis>(F, PA) || | ||||
13296 | Inv.invalidate<LoopAnalysis>(F, PA); | ||||
13297 | } | ||||
13298 | |||||
13299 | AnalysisKey ScalarEvolutionAnalysis::Key; | ||||
13300 | |||||
13301 | ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, | ||||
13302 | FunctionAnalysisManager &AM) { | ||||
13303 | return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), | ||||
13304 | AM.getResult<AssumptionAnalysis>(F), | ||||
13305 | AM.getResult<DominatorTreeAnalysis>(F), | ||||
13306 | AM.getResult<LoopAnalysis>(F)); | ||||
13307 | } | ||||
13308 | |||||
13309 | PreservedAnalyses | ||||
13310 | ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { | ||||
13311 | AM.getResult<ScalarEvolutionAnalysis>(F).verify(); | ||||
13312 | return PreservedAnalyses::all(); | ||||
13313 | } | ||||
13314 | |||||
13315 | PreservedAnalyses | ||||
13316 | ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { | ||||
13317 | // For compatibility with opt's -analyze feature under legacy pass manager | ||||
13318 | // which was not ported to NPM. This keeps tests using | ||||
13319 | // update_analyze_test_checks.py working. | ||||
13320 | OS << "Printing analysis 'Scalar Evolution Analysis' for function '" | ||||
13321 | << F.getName() << "':\n"; | ||||
13322 | AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); | ||||
13323 | return PreservedAnalyses::all(); | ||||
13324 | } | ||||
13325 | |||||
13326 | INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",static void *initializeScalarEvolutionWrapperPassPassOnce(PassRegistry &Registry) { | ||||
13327 | "Scalar Evolution Analysis", false, true)static void *initializeScalarEvolutionWrapperPassPassOnce(PassRegistry &Registry) { | ||||
13328 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | ||||
13329 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry); | ||||
13330 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||
13331 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||
13332 | INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",PassInfo *PI = new PassInfo( "Scalar Evolution Analysis", "scalar-evolution" , &ScalarEvolutionWrapperPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<ScalarEvolutionWrapperPass>), false, true ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeScalarEvolutionWrapperPassPassFlag; void llvm::initializeScalarEvolutionWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeScalarEvolutionWrapperPassPassFlag , initializeScalarEvolutionWrapperPassPassOnce, std::ref(Registry )); } | ||||
13333 | "Scalar Evolution Analysis", false, true)PassInfo *PI = new PassInfo( "Scalar Evolution Analysis", "scalar-evolution" , &ScalarEvolutionWrapperPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<ScalarEvolutionWrapperPass>), false, true ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeScalarEvolutionWrapperPassPassFlag; void llvm::initializeScalarEvolutionWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeScalarEvolutionWrapperPassPassFlag , initializeScalarEvolutionWrapperPassPassOnce, std::ref(Registry )); } | ||||
13334 | |||||
13335 | char ScalarEvolutionWrapperPass::ID = 0; | ||||
13336 | |||||
13337 | ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { | ||||
13338 | initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); | ||||
13339 | } | ||||
13340 | |||||
13341 | bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { | ||||
13342 | SE.reset(new ScalarEvolution( | ||||
13343 | F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), | ||||
13344 | getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), | ||||
13345 | getAnalysis<DominatorTreeWrapperPass>().getDomTree(), | ||||
13346 | getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); | ||||
13347 | return false; | ||||
13348 | } | ||||
13349 | |||||
13350 | void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } | ||||
13351 | |||||
13352 | void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { | ||||
13353 | SE->print(OS); | ||||
13354 | } | ||||
13355 | |||||
13356 | void ScalarEvolutionWrapperPass::verifyAnalysis() const { | ||||
13357 | if (!VerifySCEV) | ||||
13358 | return; | ||||
13359 | |||||
13360 | SE->verify(); | ||||
13361 | } | ||||
13362 | |||||
13363 | void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { | ||||
13364 | AU.setPreservesAll(); | ||||
13365 | AU.addRequiredTransitive<AssumptionCacheTracker>(); | ||||
13366 | AU.addRequiredTransitive<LoopInfoWrapperPass>(); | ||||
13367 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); | ||||
13368 | AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); | ||||
13369 | } | ||||
13370 | |||||
13371 | const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, | ||||
13372 | const SCEV *RHS) { | ||||
13373 | FoldingSetNodeID ID; | ||||
13374 | assert(LHS->getType() == RHS->getType() &&((void)0) | ||||
13375 | "Type mismatch between LHS and RHS")((void)0); | ||||
13376 | // Unique this node based on the arguments | ||||
13377 | ID.AddInteger(SCEVPredicate::P_Equal); | ||||
13378 | ID.AddPointer(LHS); | ||||
13379 | ID.AddPointer(RHS); | ||||
13380 | void *IP = nullptr; | ||||
13381 | if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) | ||||
13382 | return S; | ||||
13383 | SCEVEqualPredicate *Eq = new (SCEVAllocator) | ||||
13384 | SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); | ||||
13385 | UniquePreds.InsertNode(Eq, IP); | ||||
13386 | return Eq; | ||||
13387 | } | ||||
13388 | |||||
13389 | const SCEVPredicate *ScalarEvolution::getWrapPredicate( | ||||
13390 | const SCEVAddRecExpr *AR, | ||||
13391 | SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { | ||||
13392 | FoldingSetNodeID ID; | ||||
13393 | // Unique this node based on the arguments | ||||
13394 | ID.AddInteger(SCEVPredicate::P_Wrap); | ||||
13395 | ID.AddPointer(AR); | ||||
13396 | ID.AddInteger(AddedFlags); | ||||
13397 | void *IP = nullptr; | ||||
13398 | if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) | ||||
13399 | return S; | ||||
13400 | auto *OF = new (SCEVAllocator) | ||||
13401 | SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); | ||||
13402 | UniquePreds.InsertNode(OF, IP); | ||||
13403 | return OF; | ||||
13404 | } | ||||
13405 | |||||
13406 | namespace { | ||||
13407 | |||||
13408 | class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { | ||||
13409 | public: | ||||
13410 | |||||
13411 | /// Rewrites \p S in the context of a loop L and the SCEV predication | ||||
13412 | /// infrastructure. | ||||
13413 | /// | ||||
13414 | /// If \p Pred is non-null, the SCEV expression is rewritten to respect the | ||||
13415 | /// equivalences present in \p Pred. | ||||
13416 | /// | ||||
13417 | /// If \p NewPreds is non-null, rewrite is free to add further predicates to | ||||
13418 | /// \p NewPreds such that the result will be an AddRecExpr. | ||||
13419 | static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, | ||||
13420 | SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, | ||||
13421 | SCEVUnionPredicate *Pred) { | ||||
13422 | SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); | ||||
13423 | return Rewriter.visit(S); | ||||
13424 | } | ||||
13425 | |||||
13426 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | ||||
13427 | if (Pred) { | ||||
13428 | auto ExprPreds = Pred->getPredicatesForExpr(Expr); | ||||
13429 | for (auto *Pred : ExprPreds) | ||||
13430 | if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) | ||||
13431 | if (IPred->getLHS() == Expr) | ||||
13432 | return IPred->getRHS(); | ||||
13433 | } | ||||
13434 | return convertToAddRecWithPreds(Expr); | ||||
13435 | } | ||||
13436 | |||||
13437 | const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { | ||||
13438 | const SCEV *Operand = visit(Expr->getOperand()); | ||||
13439 | const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); | ||||
13440 | if (AR && AR->getLoop() == L && AR->isAffine()) { | ||||
13441 | // This couldn't be folded because the operand didn't have the nuw | ||||
13442 | // flag. Add the nusw flag as an assumption that we could make. | ||||
13443 | const SCEV *Step = AR->getStepRecurrence(SE); | ||||
13444 | Type *Ty = Expr->getType(); | ||||
13445 | if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) | ||||
13446 | return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), | ||||
13447 | SE.getSignExtendExpr(Step, Ty), L, | ||||
13448 | AR->getNoWrapFlags()); | ||||
13449 | } | ||||
13450 | return SE.getZeroExtendExpr(Operand, Expr->getType()); | ||||
13451 | } | ||||
13452 | |||||
13453 | const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { | ||||
13454 | const SCEV *Operand = visit(Expr->getOperand()); | ||||
13455 | const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); | ||||
13456 | if (AR && AR->getLoop() == L && AR->isAffine()) { | ||||
13457 | // This couldn't be folded because the operand didn't have the nsw | ||||
13458 | // flag. Add the nssw flag as an assumption that we could make. | ||||
13459 | const SCEV *Step = AR->getStepRecurrence(SE); | ||||
13460 | Type *Ty = Expr->getType(); | ||||
13461 | if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) | ||||
13462 | return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), | ||||
13463 | SE.getSignExtendExpr(Step, Ty), L, | ||||
13464 | AR->getNoWrapFlags()); | ||||
13465 | } | ||||
13466 | return SE.getSignExtendExpr(Operand, Expr->getType()); | ||||
13467 | } | ||||
13468 | |||||
13469 | private: | ||||
13470 | explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, | ||||
13471 | SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, | ||||
13472 | SCEVUnionPredicate *Pred) | ||||
13473 | : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} | ||||
13474 | |||||
13475 | bool addOverflowAssumption(const SCEVPredicate *P) { | ||||
13476 | if (!NewPreds) { | ||||
13477 | // Check if we've already made this assumption. | ||||
13478 | return Pred && Pred->implies(P); | ||||
13479 | } | ||||
13480 | NewPreds->insert(P); | ||||
13481 | return true; | ||||
13482 | } | ||||
13483 | |||||
13484 | bool addOverflowAssumption(const SCEVAddRecExpr *AR, | ||||
13485 | SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { | ||||
13486 | auto *A = SE.getWrapPredicate(AR, AddedFlags); | ||||
13487 | return addOverflowAssumption(A); | ||||
13488 | } | ||||
13489 | |||||
13490 | // If \p Expr represents a PHINode, we try to see if it can be represented | ||||
13491 | // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible | ||||
13492 | // to add this predicate as a runtime overflow check, we return the AddRec. | ||||
13493 | // If \p Expr does not meet these conditions (is not a PHI node, or we | ||||
13494 | // couldn't create an AddRec for it, or couldn't add the predicate), we just | ||||
13495 | // return \p Expr. | ||||
13496 | const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { | ||||
13497 | if (!isa<PHINode>(Expr->getValue())) | ||||
13498 | return Expr; | ||||
13499 | Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> | ||||
13500 | PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); | ||||
13501 | if (!PredicatedRewrite) | ||||
13502 | return Expr; | ||||
13503 | for (auto *P : PredicatedRewrite->second){ | ||||
13504 | // Wrap predicates from outer loops are not supported. | ||||
13505 | if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { | ||||
13506 | auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); | ||||
13507 | if (L != AR->getLoop()) | ||||
13508 | return Expr; | ||||
13509 | } | ||||
13510 | if (!addOverflowAssumption(P)) | ||||
13511 | return Expr; | ||||
13512 | } | ||||
13513 | return PredicatedRewrite->first; | ||||
13514 | } | ||||
13515 | |||||
13516 | SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; | ||||
13517 | SCEVUnionPredicate *Pred; | ||||
13518 | const Loop *L; | ||||
13519 | }; | ||||
13520 | |||||
13521 | } // end anonymous namespace | ||||
13522 | |||||
13523 | const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, | ||||
13524 | SCEVUnionPredicate &Preds) { | ||||
13525 | return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); | ||||
13526 | } | ||||
13527 | |||||
13528 | const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( | ||||
13529 | const SCEV *S, const Loop *L, | ||||
13530 | SmallPtrSetImpl<const SCEVPredicate *> &Preds) { | ||||
13531 | SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; | ||||
13532 | S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); | ||||
13533 | auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); | ||||
13534 | |||||
13535 | if (!AddRec) | ||||
13536 | return nullptr; | ||||
13537 | |||||
13538 | // Since the transformation was successful, we can now transfer the SCEV | ||||
13539 | // predicates. | ||||
13540 | for (auto *P : TransformPreds) | ||||
13541 | Preds.insert(P); | ||||
13542 | |||||
13543 | return AddRec; | ||||
13544 | } | ||||
13545 | |||||
13546 | /// SCEV predicates | ||||
13547 | SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, | ||||
13548 | SCEVPredicateKind Kind) | ||||
13549 | : FastID(ID), Kind(Kind) {} | ||||
13550 | |||||
13551 | SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, | ||||
13552 | const SCEV *LHS, const SCEV *RHS) | ||||
13553 | : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { | ||||
13554 | assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match")((void)0); | ||||
13555 | assert(LHS != RHS && "LHS and RHS are the same SCEV")((void)0); | ||||
13556 | } | ||||
13557 | |||||
13558 | bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { | ||||
13559 | const auto *Op = dyn_cast<SCEVEqualPredicate>(N); | ||||
13560 | |||||
13561 | if (!Op) | ||||
13562 | return false; | ||||
13563 | |||||
13564 | return Op->LHS == LHS && Op->RHS == RHS; | ||||
13565 | } | ||||
13566 | |||||
13567 | bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } | ||||
13568 | |||||
13569 | const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } | ||||
13570 | |||||
13571 | void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { | ||||
13572 | OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; | ||||
13573 | } | ||||
13574 | |||||
13575 | SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, | ||||
13576 | const SCEVAddRecExpr *AR, | ||||
13577 | IncrementWrapFlags Flags) | ||||
13578 | : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} | ||||
13579 | |||||
13580 | const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } | ||||
13581 | |||||
13582 | bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { | ||||
13583 | const auto *Op = dyn_cast<SCEVWrapPredicate>(N); | ||||
13584 | |||||
13585 | return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; | ||||
13586 | } | ||||
13587 | |||||
13588 | bool SCEVWrapPredicate::isAlwaysTrue() const { | ||||
13589 | SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); | ||||
13590 | IncrementWrapFlags IFlags = Flags; | ||||
13591 | |||||
13592 | if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) | ||||
13593 | IFlags = clearFlags(IFlags, IncrementNSSW); | ||||
13594 | |||||
13595 | return IFlags == IncrementAnyWrap; | ||||
13596 | } | ||||
13597 | |||||
13598 | void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { | ||||
13599 | OS.indent(Depth) << *getExpr() << " Added Flags: "; | ||||
13600 | if (SCEVWrapPredicate::IncrementNUSW & getFlags()) | ||||
13601 | OS << "<nusw>"; | ||||
13602 | if (SCEVWrapPredicate::IncrementNSSW & getFlags()) | ||||
13603 | OS << "<nssw>"; | ||||
13604 | OS << "\n"; | ||||
13605 | } | ||||
13606 | |||||
13607 | SCEVWrapPredicate::IncrementWrapFlags | ||||
13608 | SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, | ||||
13609 | ScalarEvolution &SE) { | ||||
13610 | IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; | ||||
13611 | SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); | ||||
13612 | |||||
13613 | // We can safely transfer the NSW flag as NSSW. | ||||
13614 | if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) | ||||
13615 | ImpliedFlags = IncrementNSSW; | ||||
13616 | |||||
13617 | if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { | ||||
13618 | // If the increment is positive, the SCEV NUW flag will also imply the | ||||
13619 | // WrapPredicate NUSW flag. | ||||
13620 | if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) | ||||
13621 | if (Step->getValue()->getValue().isNonNegative()) | ||||
13622 | ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); | ||||
13623 | } | ||||
13624 | |||||
13625 | return ImpliedFlags; | ||||
13626 | } | ||||
13627 | |||||
13628 | /// Union predicates don't get cached so create a dummy set ID for it. | ||||
13629 | SCEVUnionPredicate::SCEVUnionPredicate() | ||||
13630 | : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} | ||||
13631 | |||||
13632 | bool SCEVUnionPredicate::isAlwaysTrue() const { | ||||
13633 | return all_of(Preds, | ||||
13634 | [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); | ||||
13635 | } | ||||
13636 | |||||
13637 | ArrayRef<const SCEVPredicate *> | ||||
13638 | SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { | ||||
13639 | auto I = SCEVToPreds.find(Expr); | ||||
13640 | if (I == SCEVToPreds.end()) | ||||
13641 | return ArrayRef<const SCEVPredicate *>(); | ||||
13642 | return I->second; | ||||
13643 | } | ||||
13644 | |||||
13645 | bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { | ||||
13646 | if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) | ||||
13647 | return all_of(Set->Preds, | ||||
13648 | [this](const SCEVPredicate *I) { return this->implies(I); }); | ||||
13649 | |||||
13650 | auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); | ||||
13651 | if (ScevPredsIt == SCEVToPreds.end()) | ||||
13652 | return false; | ||||
13653 | auto &SCEVPreds = ScevPredsIt->second; | ||||
13654 | |||||
13655 | return any_of(SCEVPreds, | ||||
13656 | [N](const SCEVPredicate *I) { return I->implies(N); }); | ||||
13657 | } | ||||
13658 | |||||
13659 | const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } | ||||
13660 | |||||
13661 | void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { | ||||
13662 | for (auto Pred : Preds) | ||||
13663 | Pred->print(OS, Depth); | ||||
13664 | } | ||||
13665 | |||||
13666 | void SCEVUnionPredicate::add(const SCEVPredicate *N) { | ||||
13667 | if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { | ||||
13668 | for (auto Pred : Set->Preds) | ||||
13669 | add(Pred); | ||||
13670 | return; | ||||
13671 | } | ||||
13672 | |||||
13673 | if (implies(N)) | ||||
13674 | return; | ||||
13675 | |||||
13676 | const SCEV *Key = N->getExpr(); | ||||
13677 | assert(Key && "Only SCEVUnionPredicate doesn't have an "((void)0) | ||||
13678 | " associated expression!")((void)0); | ||||
13679 | |||||
13680 | SCEVToPreds[Key].push_back(N); | ||||
13681 | Preds.push_back(N); | ||||
13682 | } | ||||
13683 | |||||
13684 | PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, | ||||
13685 | Loop &L) | ||||
13686 | : SE(SE), L(L) {} | ||||
13687 | |||||
13688 | const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { | ||||
13689 | const SCEV *Expr = SE.getSCEV(V); | ||||
13690 | RewriteEntry &Entry = RewriteMap[Expr]; | ||||
13691 | |||||
13692 | // If we already have an entry and the version matches, return it. | ||||
13693 | if (Entry.second && Generation == Entry.first) | ||||
13694 | return Entry.second; | ||||
13695 | |||||
13696 | // We found an entry but it's stale. Rewrite the stale entry | ||||
13697 | // according to the current predicate. | ||||
13698 | if (Entry.second) | ||||
13699 | Expr = Entry.second; | ||||
13700 | |||||
13701 | const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); | ||||
13702 | Entry = {Generation, NewSCEV}; | ||||
13703 | |||||
13704 | return NewSCEV; | ||||
13705 | } | ||||
13706 | |||||
13707 | const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { | ||||
13708 | if (!BackedgeCount) { | ||||
13709 | SCEVUnionPredicate BackedgePred; | ||||
13710 | BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); | ||||
13711 | addPredicate(BackedgePred); | ||||
13712 | } | ||||
13713 | return BackedgeCount; | ||||
13714 | } | ||||
13715 | |||||
13716 | void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { | ||||
13717 | if (Preds.implies(&Pred)) | ||||
13718 | return; | ||||
13719 | Preds.add(&Pred); | ||||
13720 | updateGeneration(); | ||||
13721 | } | ||||
13722 | |||||
13723 | const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { | ||||
13724 | return Preds; | ||||
13725 | } | ||||
13726 | |||||
13727 | void PredicatedScalarEvolution::updateGeneration() { | ||||
13728 | // If the generation number wrapped recompute everything. | ||||
13729 | if (++Generation == 0) { | ||||
13730 | for (auto &II : RewriteMap) { | ||||
13731 | const SCEV *Rewritten = II.second.second; | ||||
13732 | II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; | ||||
13733 | } | ||||
13734 | } | ||||
13735 | } | ||||
13736 | |||||
13737 | void PredicatedScalarEvolution::setNoOverflow( | ||||
13738 | Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { | ||||
13739 | const SCEV *Expr = getSCEV(V); | ||||
13740 | const auto *AR = cast<SCEVAddRecExpr>(Expr); | ||||
13741 | |||||
13742 | auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); | ||||
13743 | |||||
13744 | // Clear the statically implied flags. | ||||
13745 | Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); | ||||
13746 | addPredicate(*SE.getWrapPredicate(AR, Flags)); | ||||
13747 | |||||
13748 | auto II = FlagsMap.insert({V, Flags}); | ||||
13749 | if (!II.second) | ||||
13750 | II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); | ||||
13751 | } | ||||
13752 | |||||
13753 | bool PredicatedScalarEvolution::hasNoOverflow( | ||||
13754 | Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { | ||||
13755 | const SCEV *Expr = getSCEV(V); | ||||
13756 | const auto *AR = cast<SCEVAddRecExpr>(Expr); | ||||
13757 | |||||
13758 | Flags = SCEVWrapPredicate::clearFlags( | ||||
13759 | Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); | ||||
13760 | |||||
13761 | auto II = FlagsMap.find(V); | ||||
13762 | |||||
13763 | if (II != FlagsMap.end()) | ||||
13764 | Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); | ||||
13765 | |||||
13766 | return Flags == SCEVWrapPredicate::IncrementAnyWrap; | ||||
13767 | } | ||||
13768 | |||||
13769 | const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { | ||||
13770 | const SCEV *Expr = this->getSCEV(V); | ||||
13771 | SmallPtrSet<const SCEVPredicate *, 4> NewPreds; | ||||
13772 | auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); | ||||
13773 | |||||
13774 | if (!New) | ||||
13775 | return nullptr; | ||||
13776 | |||||
13777 | for (auto *P : NewPreds) | ||||
13778 | Preds.add(P); | ||||
13779 | |||||
13780 | updateGeneration(); | ||||
13781 | RewriteMap[SE.getSCEV(V)] = {Generation, New}; | ||||
13782 | return New; | ||||
13783 | } | ||||
13784 | |||||
13785 | PredicatedScalarEvolution::PredicatedScalarEvolution( | ||||
13786 | const PredicatedScalarEvolution &Init) | ||||
13787 | : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), | ||||
13788 | Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { | ||||
13789 | for (auto I : Init.FlagsMap) | ||||
13790 | FlagsMap.insert(I); | ||||
13791 | } | ||||
13792 | |||||
13793 | void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { | ||||
13794 | // For each block. | ||||
13795 | for (auto *BB : L.getBlocks()) | ||||
13796 | for (auto &I : *BB) { | ||||
13797 | if (!SE.isSCEVable(I.getType())) | ||||
13798 | continue; | ||||
13799 | |||||
13800 | auto *Expr = SE.getSCEV(&I); | ||||
13801 | auto II = RewriteMap.find(Expr); | ||||
13802 | |||||
13803 | if (II == RewriteMap.end()) | ||||
13804 | continue; | ||||
13805 | |||||
13806 | // Don't print things that are not interesting. | ||||
13807 | if (II->second.second == Expr) | ||||
13808 | continue; | ||||
13809 | |||||
13810 | OS.indent(Depth) << "[PSE]" << I << ":\n"; | ||||
13811 | OS.indent(Depth + 2) << *Expr << "\n"; | ||||
13812 | OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; | ||||
13813 | } | ||||
13814 | } | ||||
13815 | |||||
13816 | // Match the mathematical pattern A - (A / B) * B, where A and B can be | ||||
13817 | // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used | ||||
13818 | // for URem with constant power-of-2 second operands. | ||||
13819 | // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is | ||||
13820 | // 4, A / B becomes X / 8). | ||||
13821 | bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, | ||||
13822 | const SCEV *&RHS) { | ||||
13823 | // Try to match 'zext (trunc A to iB) to iY', which is used | ||||
13824 | // for URem with constant power-of-2 second operands. Make sure the size of | ||||
13825 | // the operand A matches the size of the whole expressions. | ||||
13826 | if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) | ||||
13827 | if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { | ||||
13828 | LHS = Trunc->getOperand(); | ||||
13829 | // Bail out if the type of the LHS is larger than the type of the | ||||
13830 | // expression for now. | ||||
13831 | if (getTypeSizeInBits(LHS->getType()) > | ||||
13832 | getTypeSizeInBits(Expr->getType())) | ||||
13833 | return false; | ||||
13834 | if (LHS->getType() != Expr->getType()) | ||||
13835 | LHS = getZeroExtendExpr(LHS, Expr->getType()); | ||||
13836 | RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) | ||||
13837 | << getTypeSizeInBits(Trunc->getType())); | ||||
13838 | return true; | ||||
13839 | } | ||||
13840 | const auto *Add = dyn_cast<SCEVAddExpr>(Expr); | ||||
13841 | if (Add == nullptr || Add->getNumOperands() != 2) | ||||
13842 | return false; | ||||
13843 | |||||
13844 | const SCEV *A = Add->getOperand(1); | ||||
13845 | const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); | ||||
13846 | |||||
13847 | if (Mul == nullptr) | ||||
13848 | return false; | ||||
13849 | |||||
13850 | const auto MatchURemWithDivisor = [&](const SCEV *B) { | ||||
13851 | // (SomeExpr + (-(SomeExpr / B) * B)). | ||||
13852 | if (Expr == getURemExpr(A, B)) { | ||||
13853 | LHS = A; | ||||
13854 | RHS = B; | ||||
13855 | return true; | ||||
13856 | } | ||||
13857 | return false; | ||||
13858 | }; | ||||
13859 | |||||
13860 | // (SomeExpr + (-1 * (SomeExpr / B) * B)). | ||||
13861 | if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) | ||||
13862 | return MatchURemWithDivisor(Mul->getOperand(1)) || | ||||
13863 | MatchURemWithDivisor(Mul->getOperand(2)); | ||||
13864 | |||||
13865 | // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). | ||||
13866 | if (Mul->getNumOperands() == 2) | ||||
13867 | return MatchURemWithDivisor(Mul->getOperand(1)) || | ||||
13868 | MatchURemWithDivisor(Mul->getOperand(0)) || | ||||
13869 | MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || | ||||
13870 | MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); | ||||
13871 | return false; | ||||
13872 | } | ||||
13873 | |||||
13874 | const SCEV * | ||||
13875 | ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { | ||||
13876 | SmallVector<BasicBlock*, 16> ExitingBlocks; | ||||
13877 | L->getExitingBlocks(ExitingBlocks); | ||||
13878 | |||||
13879 | // Form an expression for the maximum exit count possible for this loop. We | ||||
13880 | // merge the max and exact information to approximate a version of | ||||
13881 | // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. | ||||
13882 | SmallVector<const SCEV*, 4> ExitCounts; | ||||
13883 | for (BasicBlock *ExitingBB : ExitingBlocks) { | ||||
13884 | const SCEV *ExitCount = getExitCount(L, ExitingBB); | ||||
13885 | if (isa<SCEVCouldNotCompute>(ExitCount)) | ||||
13886 | ExitCount = getExitCount(L, ExitingBB, | ||||
13887 | ScalarEvolution::ConstantMaximum); | ||||
13888 | if (!isa<SCEVCouldNotCompute>(ExitCount)) { | ||||
13889 | assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&((void)0) | ||||
13890 | "We should only have known counts for exiting blocks that "((void)0) | ||||
13891 | "dominate latch!")((void)0); | ||||
13892 | ExitCounts.push_back(ExitCount); | ||||
13893 | } | ||||
13894 | } | ||||
13895 | if (ExitCounts.empty()) | ||||
13896 | return getCouldNotCompute(); | ||||
13897 | return getUMinFromMismatchedTypes(ExitCounts); | ||||
13898 | } | ||||
13899 | |||||
13900 | /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown | ||||
13901 | /// components following the Map (Value -> SCEV)), but skips AddRecExpr because | ||||
13902 | /// we cannot guarantee that the replacement is loop invariant in the loop of | ||||
13903 | /// the AddRec. | ||||
13904 | class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { | ||||
13905 | ValueToSCEVMapTy ⤅ | ||||
13906 | |||||
13907 | public: | ||||
13908 | SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M) | ||||
13909 | : SCEVRewriteVisitor(SE), Map(M) {} | ||||
13910 | |||||
13911 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } | ||||
13912 | |||||
13913 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | ||||
13914 | auto I = Map.find(Expr->getValue()); | ||||
13915 | if (I == Map.end()) | ||||
13916 | return Expr; | ||||
13917 | return I->second; | ||||
13918 | } | ||||
13919 | }; | ||||
13920 | |||||
13921 | const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { | ||||
13922 | auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, | ||||
13923 | const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) { | ||||
13924 | // If we have LHS == 0, check if LHS is computing a property of some unknown | ||||
13925 | // SCEV %v which we can rewrite %v to express explicitly. | ||||
13926 | const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); | ||||
13927 | if (Predicate == CmpInst::ICMP_EQ && RHSC && | ||||
13928 | RHSC->getValue()->isNullValue()) { | ||||
13929 | // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to | ||||
13930 | // explicitly express that. | ||||
13931 | const SCEV *URemLHS = nullptr; | ||||
13932 | const SCEV *URemRHS = nullptr; | ||||
13933 | if (matchURem(LHS, URemLHS, URemRHS)) { | ||||
13934 | if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { | ||||
13935 | Value *V = LHSUnknown->getValue(); | ||||
13936 | auto Multiple = | ||||
13937 | getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS, | ||||
13938 | (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); | ||||
13939 | RewriteMap[V] = Multiple; | ||||
13940 | return; | ||||
13941 | } | ||||
13942 | } | ||||
13943 | } | ||||
13944 | |||||
13945 | if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) { | ||||
13946 | std::swap(LHS, RHS); | ||||
13947 | Predicate = CmpInst::getSwappedPredicate(Predicate); | ||||
13948 | } | ||||
13949 | |||||
13950 | // Check for a condition of the form (-C1 + X < C2). InstCombine will | ||||
13951 | // create this form when combining two checks of the form (X u< C2 + C1) and | ||||
13952 | // (X >=u C1). | ||||
13953 | auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap]() { | ||||
13954 | auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS); | ||||
13955 | if (!AddExpr || AddExpr->getNumOperands() != 2) | ||||
13956 | return false; | ||||
13957 | |||||
13958 | auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0)); | ||||
13959 | auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1)); | ||||
13960 | auto *C2 = dyn_cast<SCEVConstant>(RHS); | ||||
13961 | if (!C1 || !C2 || !LHSUnknown) | ||||
13962 | return false; | ||||
13963 | |||||
13964 | auto ExactRegion = | ||||
13965 | ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt()) | ||||
13966 | .sub(C1->getAPInt()); | ||||
13967 | |||||
13968 | // Bail out, unless we have a non-wrapping, monotonic range. | ||||
13969 | if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet()) | ||||
13970 | return false; | ||||
13971 | auto I = RewriteMap.find(LHSUnknown->getValue()); | ||||
13972 | const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown; | ||||
13973 | RewriteMap[LHSUnknown->getValue()] = getUMaxExpr( | ||||
13974 | getConstant(ExactRegion.getUnsignedMin()), | ||||
13975 | getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax()))); | ||||
13976 | return true; | ||||
13977 | }; | ||||
13978 | if (MatchRangeCheckIdiom()) | ||||
13979 | return; | ||||
13980 | |||||
13981 | // For now, limit to conditions that provide information about unknown | ||||
13982 | // expressions. RHS also cannot contain add recurrences. | ||||
13983 | auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS); | ||||
13984 | if (!LHSUnknown || containsAddRecurrence(RHS)) | ||||
13985 | return; | ||||
13986 | |||||
13987 | // Check whether LHS has already been rewritten. In that case we want to | ||||
13988 | // chain further rewrites onto the already rewritten value. | ||||
13989 | auto I = RewriteMap.find(LHSUnknown->getValue()); | ||||
13990 | const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS; | ||||
13991 | const SCEV *RewrittenRHS = nullptr; | ||||
13992 | switch (Predicate) { | ||||
13993 | case CmpInst::ICMP_ULT: | ||||
13994 | RewrittenRHS = | ||||
13995 | getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); | ||||
13996 | break; | ||||
13997 | case CmpInst::ICMP_SLT: | ||||
13998 | RewrittenRHS = | ||||
13999 | getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); | ||||
14000 | break; | ||||
14001 | case CmpInst::ICMP_ULE: | ||||
14002 | RewrittenRHS = getUMinExpr(RewrittenLHS, RHS); | ||||
14003 | break; | ||||
14004 | case CmpInst::ICMP_SLE: | ||||
14005 | RewrittenRHS = getSMinExpr(RewrittenLHS, RHS); | ||||
14006 | break; | ||||
14007 | case CmpInst::ICMP_UGT: | ||||
14008 | RewrittenRHS = | ||||
14009 | getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); | ||||
14010 | break; | ||||
14011 | case CmpInst::ICMP_SGT: | ||||
14012 | RewrittenRHS = | ||||
14013 | getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); | ||||
14014 | break; | ||||
14015 | case CmpInst::ICMP_UGE: | ||||
14016 | RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS); | ||||
14017 | break; | ||||
14018 | case CmpInst::ICMP_SGE: | ||||
14019 | RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS); | ||||
14020 | break; | ||||
14021 | case CmpInst::ICMP_EQ: | ||||
14022 | if (isa<SCEVConstant>(RHS)) | ||||
14023 | RewrittenRHS = RHS; | ||||
14024 | break; | ||||
14025 | case CmpInst::ICMP_NE: | ||||
14026 | if (isa<SCEVConstant>(RHS) && | ||||
14027 | cast<SCEVConstant>(RHS)->getValue()->isNullValue()) | ||||
14028 | RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType())); | ||||
14029 | break; | ||||
14030 | default: | ||||
14031 | break; | ||||
14032 | } | ||||
14033 | |||||
14034 | if (RewrittenRHS) | ||||
14035 | RewriteMap[LHSUnknown->getValue()] = RewrittenRHS; | ||||
14036 | }; | ||||
14037 | // Starting at the loop predecessor, climb up the predecessor chain, as long | ||||
14038 | // as there are predecessors that can be found that have unique successors | ||||
14039 | // leading to the original header. | ||||
14040 | // TODO: share this logic with isLoopEntryGuardedByCond. | ||||
14041 | ValueToSCEVMapTy RewriteMap; | ||||
14042 | for (std::pair<const BasicBlock *, const BasicBlock *> Pair( | ||||
14043 | L->getLoopPredecessor(), L->getHeader()); | ||||
14044 | Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { | ||||
14045 | |||||
14046 | const BranchInst *LoopEntryPredicate = | ||||
14047 | dyn_cast<BranchInst>(Pair.first->getTerminator()); | ||||
14048 | if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) | ||||
14049 | continue; | ||||
14050 | |||||
14051 | bool EnterIfTrue = LoopEntryPredicate->getSuccessor(0) == Pair.second; | ||||
14052 | SmallVector<Value *, 8> Worklist; | ||||
14053 | SmallPtrSet<Value *, 8> Visited; | ||||
14054 | Worklist.push_back(LoopEntryPredicate->getCondition()); | ||||
14055 | while (!Worklist.empty()) { | ||||
14056 | Value *Cond = Worklist.pop_back_val(); | ||||
14057 | if (!Visited.insert(Cond).second) | ||||
14058 | continue; | ||||
14059 | |||||
14060 | if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { | ||||
14061 | auto Predicate = | ||||
14062 | EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate(); | ||||
14063 | CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), | ||||
14064 | getSCEV(Cmp->getOperand(1)), RewriteMap); | ||||
14065 | continue; | ||||
14066 | } | ||||
14067 | |||||
14068 | Value *L, *R; | ||||
14069 | if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))) | ||||
14070 | : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) { | ||||
14071 | Worklist.push_back(L); | ||||
14072 | Worklist.push_back(R); | ||||
14073 | } | ||||
14074 | } | ||||
14075 | } | ||||
14076 | |||||
14077 | // Also collect information from assumptions dominating the loop. | ||||
14078 | for (auto &AssumeVH : AC.assumptions()) { | ||||
14079 | if (!AssumeVH) | ||||
14080 | continue; | ||||
14081 | auto *AssumeI = cast<CallInst>(AssumeVH); | ||||
14082 | auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); | ||||
14083 | if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) | ||||
14084 | continue; | ||||
14085 | CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), | ||||
14086 | getSCEV(Cmp->getOperand(1)), RewriteMap); | ||||
14087 | } | ||||
14088 | |||||
14089 | if (RewriteMap.empty()) | ||||
14090 | return Expr; | ||||
14091 | SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); | ||||
14092 | return Rewriter.visit(Expr); | ||||
14093 | } |
1 | //===- Optional.h - Simple variant for passing optional values --*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file provides Optional, a template class modeled in the spirit of |
10 | // OCaml's 'opt' variant. The idea is to strongly type whether or not |
11 | // a value can be optional. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_ADT_OPTIONAL_H |
16 | #define LLVM_ADT_OPTIONAL_H |
17 | |
18 | #include "llvm/ADT/Hashing.h" |
19 | #include "llvm/ADT/None.h" |
20 | #include "llvm/ADT/STLForwardCompat.h" |
21 | #include "llvm/Support/Compiler.h" |
22 | #include "llvm/Support/type_traits.h" |
23 | #include <cassert> |
24 | #include <memory> |
25 | #include <new> |
26 | #include <utility> |
27 | |
28 | namespace llvm { |
29 | |
30 | class raw_ostream; |
31 | |
32 | namespace optional_detail { |
33 | |
34 | /// Storage for any type. |
35 | // |
36 | // The specialization condition intentionally uses |
37 | // llvm::is_trivially_copy_constructible instead of |
38 | // std::is_trivially_copy_constructible. GCC versions prior to 7.4 may |
39 | // instantiate the copy constructor of `T` when |
40 | // std::is_trivially_copy_constructible is instantiated. This causes |
41 | // compilation to fail if we query the trivially copy constructible property of |
42 | // a class which is not copy constructible. |
43 | // |
44 | // The current implementation of OptionalStorage insists that in order to use |
45 | // the trivial specialization, the value_type must be trivially copy |
46 | // constructible and trivially copy assignable due to =default implementations |
47 | // of the copy/move constructor/assignment. It does not follow that this is |
48 | // necessarily the case std::is_trivially_copyable is true (hence the expanded |
49 | // specialization condition). |
50 | // |
51 | // The move constructible / assignable conditions emulate the remaining behavior |
52 | // of std::is_trivially_copyable. |
53 | template <typename T, bool = (llvm::is_trivially_copy_constructible<T>::value && |
54 | std::is_trivially_copy_assignable<T>::value && |
55 | (std::is_trivially_move_constructible<T>::value || |
56 | !std::is_move_constructible<T>::value) && |
57 | (std::is_trivially_move_assignable<T>::value || |
58 | !std::is_move_assignable<T>::value))> |
59 | class OptionalStorage { |
60 | union { |
61 | char empty; |
62 | T value; |
63 | }; |
64 | bool hasVal; |
65 | |
66 | public: |
67 | ~OptionalStorage() { reset(); } |
68 | |
69 | constexpr OptionalStorage() noexcept : empty(), hasVal(false) {} |
70 | |
71 | constexpr OptionalStorage(OptionalStorage const &other) : OptionalStorage() { |
72 | if (other.hasValue()) { |
73 | emplace(other.value); |
74 | } |
75 | } |
76 | constexpr OptionalStorage(OptionalStorage &&other) : OptionalStorage() { |
77 | if (other.hasValue()) { |
78 | emplace(std::move(other.value)); |
79 | } |
80 | } |
81 | |
82 | template <class... Args> |
83 | constexpr explicit OptionalStorage(in_place_t, Args &&... args) |
84 | : value(std::forward<Args>(args)...), hasVal(true) {} |
85 | |
86 | void reset() noexcept { |
87 | if (hasVal) { |
88 | value.~T(); |
89 | hasVal = false; |
90 | } |
91 | } |
92 | |
93 | constexpr bool hasValue() const noexcept { return hasVal; } |
94 | |
95 | T &getValue() LLVM_LVALUE_FUNCTION& noexcept { |
96 | assert(hasVal)((void)0); |
97 | return value; |
98 | } |
99 | constexpr T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept { |
100 | assert(hasVal)((void)0); |
101 | return value; |
102 | } |
103 | #if LLVM_HAS_RVALUE_REFERENCE_THIS1 |
104 | T &&getValue() && noexcept { |
105 | assert(hasVal)((void)0); |
106 | return std::move(value); |
107 | } |
108 | #endif |
109 | |
110 | template <class... Args> void emplace(Args &&... args) { |
111 | reset(); |
112 | ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...); |
113 | hasVal = true; |
114 | } |
115 | |
116 | OptionalStorage &operator=(T const &y) { |
117 | if (hasValue()) { |
118 | value = y; |
119 | } else { |
120 | ::new ((void *)std::addressof(value)) T(y); |
121 | hasVal = true; |
122 | } |
123 | return *this; |
124 | } |
125 | OptionalStorage &operator=(T &&y) { |
126 | if (hasValue()) { |
127 | value = std::move(y); |
128 | } else { |
129 | ::new ((void *)std::addressof(value)) T(std::move(y)); |
130 | hasVal = true; |
131 | } |
132 | return *this; |
133 | } |
134 | |
135 | OptionalStorage &operator=(OptionalStorage const &other) { |
136 | if (other.hasValue()) { |
137 | if (hasValue()) { |
138 | value = other.value; |
139 | } else { |
140 | ::new ((void *)std::addressof(value)) T(other.value); |
141 | hasVal = true; |
142 | } |
143 | } else { |
144 | reset(); |
145 | } |
146 | return *this; |
147 | } |
148 | |
149 | OptionalStorage &operator=(OptionalStorage &&other) { |
150 | if (other.hasValue()) { |
151 | if (hasValue()) { |
152 | value = std::move(other.value); |
153 | } else { |
154 | ::new ((void *)std::addressof(value)) T(std::move(other.value)); |
155 | hasVal = true; |
156 | } |
157 | } else { |
158 | reset(); |
159 | } |
160 | return *this; |
161 | } |
162 | }; |
163 | |
164 | template <typename T> class OptionalStorage<T, true> { |
165 | union { |
166 | char empty; |
167 | T value; |
168 | }; |
169 | bool hasVal = false; |
170 | |
171 | public: |
172 | ~OptionalStorage() = default; |
173 | |
174 | constexpr OptionalStorage() noexcept : empty{} {} |
175 | |
176 | constexpr OptionalStorage(OptionalStorage const &other) = default; |
177 | constexpr OptionalStorage(OptionalStorage &&other) = default; |
178 | |
179 | OptionalStorage &operator=(OptionalStorage const &other) = default; |
180 | OptionalStorage &operator=(OptionalStorage &&other) = default; |
181 | |
182 | template <class... Args> |
183 | constexpr explicit OptionalStorage(in_place_t, Args &&... args) |
184 | : value(std::forward<Args>(args)...), hasVal(true) {} |
185 | |
186 | void reset() noexcept { |
187 | if (hasVal) { |
188 | value.~T(); |
189 | hasVal = false; |
190 | } |
191 | } |
192 | |
193 | constexpr bool hasValue() const noexcept { return hasVal; } |
194 | |
195 | T &getValue() LLVM_LVALUE_FUNCTION& noexcept { |
196 | assert(hasVal)((void)0); |
197 | return value; |
198 | } |
199 | constexpr T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept { |
200 | assert(hasVal)((void)0); |
201 | return value; |
202 | } |
203 | #if LLVM_HAS_RVALUE_REFERENCE_THIS1 |
204 | T &&getValue() && noexcept { |
205 | assert(hasVal)((void)0); |
206 | return std::move(value); |
207 | } |
208 | #endif |
209 | |
210 | template <class... Args> void emplace(Args &&... args) { |
211 | reset(); |
212 | ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...); |
213 | hasVal = true; |
214 | } |
215 | |
216 | OptionalStorage &operator=(T const &y) { |
217 | if (hasValue()) { |
218 | value = y; |
219 | } else { |
220 | ::new ((void *)std::addressof(value)) T(y); |
221 | hasVal = true; |
222 | } |
223 | return *this; |
224 | } |
225 | OptionalStorage &operator=(T &&y) { |
226 | if (hasValue()) { |
227 | value = std::move(y); |
228 | } else { |
229 | ::new ((void *)std::addressof(value)) T(std::move(y)); |
230 | hasVal = true; |
231 | } |
232 | return *this; |
233 | } |
234 | }; |
235 | |
236 | } // namespace optional_detail |
237 | |
238 | template <typename T> class Optional { |
239 | optional_detail::OptionalStorage<T> Storage; |
240 | |
241 | public: |
242 | using value_type = T; |
243 | |
244 | constexpr Optional() {} |
245 | constexpr Optional(NoneType) {} |
246 | |
247 | constexpr Optional(const T &y) : Storage(in_place, y) {} |
248 | constexpr Optional(const Optional &O) = default; |
249 | |
250 | constexpr Optional(T &&y) : Storage(in_place, std::move(y)) {} |
251 | constexpr Optional(Optional &&O) = default; |
252 | |
253 | template <typename... ArgTypes> |
254 | constexpr Optional(in_place_t, ArgTypes &&...Args) |
255 | : Storage(in_place, std::forward<ArgTypes>(Args)...) {} |
256 | |
257 | Optional &operator=(T &&y) { |
258 | Storage = std::move(y); |
259 | return *this; |
260 | } |
261 | Optional &operator=(Optional &&O) = default; |
262 | |
263 | /// Create a new object by constructing it in place with the given arguments. |
264 | template <typename... ArgTypes> void emplace(ArgTypes &&... Args) { |
265 | Storage.emplace(std::forward<ArgTypes>(Args)...); |
266 | } |
267 | |
268 | static constexpr Optional create(const T *y) { |
269 | return y ? Optional(*y) : Optional(); |
270 | } |
271 | |
272 | Optional &operator=(const T &y) { |
273 | Storage = y; |
274 | return *this; |
275 | } |
276 | Optional &operator=(const Optional &O) = default; |
277 | |
278 | void reset() { Storage.reset(); } |
279 | |
280 | constexpr const T *getPointer() const { return &Storage.getValue(); } |
281 | T *getPointer() { return &Storage.getValue(); } |
282 | constexpr const T &getValue() const LLVM_LVALUE_FUNCTION& { |
283 | return Storage.getValue(); |
284 | } |
285 | T &getValue() LLVM_LVALUE_FUNCTION& { return Storage.getValue(); } |
286 | |
287 | constexpr explicit operator bool() const { return hasValue(); } |
288 | constexpr bool hasValue() const { return Storage.hasValue(); } |
289 | constexpr const T *operator->() const { return getPointer(); } |
290 | T *operator->() { return getPointer(); } |
291 | constexpr const T &operator*() const LLVM_LVALUE_FUNCTION& { |
292 | return getValue(); |
293 | } |
294 | T &operator*() LLVM_LVALUE_FUNCTION& { return getValue(); } |
295 | |
296 | template <typename U> |
297 | constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION& { |
298 | return hasValue() ? getValue() : std::forward<U>(value); |
299 | } |
300 | |
301 | /// Apply a function to the value if present; otherwise return None. |
302 | template <class Function> |
303 | auto map(const Function &F) const LLVM_LVALUE_FUNCTION& |
304 | -> Optional<decltype(F(getValue()))> { |
305 | if (*this) return F(getValue()); |
306 | return None; |
307 | } |
308 | |
309 | #if LLVM_HAS_RVALUE_REFERENCE_THIS1 |
310 | T &&getValue() && { return std::move(Storage.getValue()); } |
311 | T &&operator*() && { return std::move(Storage.getValue()); } |
312 | |
313 | template <typename U> |
314 | T getValueOr(U &&value) && { |
315 | return hasValue() ? std::move(getValue()) : std::forward<U>(value); |
316 | } |
317 | |
318 | /// Apply a function to the value if present; otherwise return None. |
319 | template <class Function> |
320 | auto map(const Function &F) && |
321 | -> Optional<decltype(F(std::move(*this).getValue()))> { |
322 | if (*this) return F(std::move(*this).getValue()); |
323 | return None; |
324 | } |
325 | #endif |
326 | }; |
327 | |
328 | template <class T> llvm::hash_code hash_value(const Optional<T> &O) { |
329 | return O ? hash_combine(true, *O) : hash_value(false); |
330 | } |
331 | |
332 | template <typename T, typename U> |
333 | constexpr bool operator==(const Optional<T> &X, const Optional<U> &Y) { |
334 | if (X && Y) |
335 | return *X == *Y; |
336 | return X.hasValue() == Y.hasValue(); |
337 | } |
338 | |
339 | template <typename T, typename U> |
340 | constexpr bool operator!=(const Optional<T> &X, const Optional<U> &Y) { |
341 | return !(X == Y); |
342 | } |
343 | |
344 | template <typename T, typename U> |
345 | constexpr bool operator<(const Optional<T> &X, const Optional<U> &Y) { |
346 | if (X && Y) |
347 | return *X < *Y; |
348 | return X.hasValue() < Y.hasValue(); |
349 | } |
350 | |
351 | template <typename T, typename U> |
352 | constexpr bool operator<=(const Optional<T> &X, const Optional<U> &Y) { |
353 | return !(Y < X); |
354 | } |
355 | |
356 | template <typename T, typename U> |
357 | constexpr bool operator>(const Optional<T> &X, const Optional<U> &Y) { |
358 | return Y < X; |
359 | } |
360 | |
361 | template <typename T, typename U> |
362 | constexpr bool operator>=(const Optional<T> &X, const Optional<U> &Y) { |
363 | return !(X < Y); |
364 | } |
365 | |
366 | template <typename T> |
367 | constexpr bool operator==(const Optional<T> &X, NoneType) { |
368 | return !X; |
369 | } |
370 | |
371 | template <typename T> |
372 | constexpr bool operator==(NoneType, const Optional<T> &X) { |
373 | return X == None; |
374 | } |
375 | |
376 | template <typename T> |
377 | constexpr bool operator!=(const Optional<T> &X, NoneType) { |
378 | return !(X == None); |
379 | } |
380 | |
381 | template <typename T> |
382 | constexpr bool operator!=(NoneType, const Optional<T> &X) { |
383 | return X != None; |
384 | } |
385 | |
386 | template <typename T> constexpr bool operator<(const Optional<T> &, NoneType) { |
387 | return false; |
388 | } |
389 | |
390 | template <typename T> constexpr bool operator<(NoneType, const Optional<T> &X) { |
391 | return X.hasValue(); |
392 | } |
393 | |
394 | template <typename T> |
395 | constexpr bool operator<=(const Optional<T> &X, NoneType) { |
396 | return !(None < X); |
397 | } |
398 | |
399 | template <typename T> |
400 | constexpr bool operator<=(NoneType, const Optional<T> &X) { |
401 | return !(X < None); |
402 | } |
403 | |
404 | template <typename T> constexpr bool operator>(const Optional<T> &X, NoneType) { |
405 | return None < X; |
406 | } |
407 | |
408 | template <typename T> constexpr bool operator>(NoneType, const Optional<T> &X) { |
409 | return X < None; |
410 | } |
411 | |
412 | template <typename T> |
413 | constexpr bool operator>=(const Optional<T> &X, NoneType) { |
414 | return None <= X; |
415 | } |
416 | |
417 | template <typename T> |
418 | constexpr bool operator>=(NoneType, const Optional<T> &X) { |
419 | return X <= None; |
420 | } |
421 | |
422 | template <typename T> |
423 | constexpr bool operator==(const Optional<T> &X, const T &Y) { |
424 | return X && *X == Y; |
425 | } |
426 | |
427 | template <typename T> |
428 | constexpr bool operator==(const T &X, const Optional<T> &Y) { |
429 | return Y && X == *Y; |
430 | } |
431 | |
432 | template <typename T> |
433 | constexpr bool operator!=(const Optional<T> &X, const T &Y) { |
434 | return !(X == Y); |
435 | } |
436 | |
437 | template <typename T> |
438 | constexpr bool operator!=(const T &X, const Optional<T> &Y) { |
439 | return !(X == Y); |
440 | } |
441 | |
442 | template <typename T> |
443 | constexpr bool operator<(const Optional<T> &X, const T &Y) { |
444 | return !X || *X < Y; |
445 | } |
446 | |
447 | template <typename T> |
448 | constexpr bool operator<(const T &X, const Optional<T> &Y) { |
449 | return Y && X < *Y; |
450 | } |
451 | |
452 | template <typename T> |
453 | constexpr bool operator<=(const Optional<T> &X, const T &Y) { |
454 | return !(Y < X); |
455 | } |
456 | |
457 | template <typename T> |
458 | constexpr bool operator<=(const T &X, const Optional<T> &Y) { |
459 | return !(Y < X); |
460 | } |
461 | |
462 | template <typename T> |
463 | constexpr bool operator>(const Optional<T> &X, const T &Y) { |
464 | return Y < X; |
465 | } |
466 | |
467 | template <typename T> |
468 | constexpr bool operator>(const T &X, const Optional<T> &Y) { |
469 | return Y < X; |
470 | } |
471 | |
472 | template <typename T> |
473 | constexpr bool operator>=(const Optional<T> &X, const T &Y) { |
474 | return !(X < Y); |
475 | } |
476 | |
477 | template <typename T> |
478 | constexpr bool operator>=(const T &X, const Optional<T> &Y) { |
479 | return !(X < Y); |
480 | } |
481 | |
482 | raw_ostream &operator<<(raw_ostream &OS, NoneType); |
483 | |
484 | template <typename T, typename = decltype(std::declval<raw_ostream &>() |
485 | << std::declval<const T &>())> |
486 | raw_ostream &operator<<(raw_ostream &OS, const Optional<T> &O) { |
487 | if (O) |
488 | OS << *O; |
489 | else |
490 | OS << None; |
491 | return OS; |
492 | } |
493 | |
494 | } // end namespace llvm |
495 | |
496 | #endif // LLVM_ADT_OPTIONAL_H |