File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Analysis/ScalarEvolution.cpp |
Warning: | line 3923, column 7 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file contains the implementation of the scalar evolution analysis | |||
10 | // engine, which is used primarily to analyze expressions involving induction | |||
11 | // variables in loops. | |||
12 | // | |||
13 | // There are several aspects to this library. First is the representation of | |||
14 | // scalar expressions, which are represented as subclasses of the SCEV class. | |||
15 | // These classes are used to represent certain types of subexpressions that we | |||
16 | // can handle. We only create one SCEV of a particular shape, so | |||
17 | // pointer-comparisons for equality are legal. | |||
18 | // | |||
19 | // One important aspect of the SCEV objects is that they are never cyclic, even | |||
20 | // if there is a cycle in the dataflow for an expression (ie, a PHI node). If | |||
21 | // the PHI node is one of the idioms that we can represent (e.g., a polynomial | |||
22 | // recurrence) then we represent it directly as a recurrence node, otherwise we | |||
23 | // represent it as a SCEVUnknown node. | |||
24 | // | |||
25 | // In addition to being able to represent expressions of various types, we also | |||
26 | // have folders that are used to build the *canonical* representation for a | |||
27 | // particular expression. These folders are capable of using a variety of | |||
28 | // rewrite rules to simplify the expressions. | |||
29 | // | |||
30 | // Once the folders are defined, we can implement the more interesting | |||
31 | // higher-level code, such as the code that recognizes PHI nodes of various | |||
32 | // types, computes the execution count of a loop, etc. | |||
33 | // | |||
34 | // TODO: We should use these routines and value representations to implement | |||
35 | // dependence analysis! | |||
36 | // | |||
37 | //===----------------------------------------------------------------------===// | |||
38 | // | |||
39 | // There are several good references for the techniques used in this analysis. | |||
40 | // | |||
41 | // Chains of recurrences -- a method to expedite the evaluation | |||
42 | // of closed-form functions | |||
43 | // Olaf Bachmann, Paul S. Wang, Eugene V. Zima | |||
44 | // | |||
45 | // On computational properties of chains of recurrences | |||
46 | // Eugene V. Zima | |||
47 | // | |||
48 | // Symbolic Evaluation of Chains of Recurrences for Loop Optimization | |||
49 | // Robert A. van Engelen | |||
50 | // | |||
51 | // Efficient Symbolic Analysis for Optimizing Compilers | |||
52 | // Robert A. van Engelen | |||
53 | // | |||
54 | // Using the chains of recurrences algebra for data dependence testing and | |||
55 | // induction variable substitution | |||
56 | // MS Thesis, Johnie Birch | |||
57 | // | |||
58 | //===----------------------------------------------------------------------===// | |||
59 | ||||
60 | #include "llvm/Analysis/ScalarEvolution.h" | |||
61 | #include "llvm/ADT/APInt.h" | |||
62 | #include "llvm/ADT/ArrayRef.h" | |||
63 | #include "llvm/ADT/DenseMap.h" | |||
64 | #include "llvm/ADT/DepthFirstIterator.h" | |||
65 | #include "llvm/ADT/EquivalenceClasses.h" | |||
66 | #include "llvm/ADT/FoldingSet.h" | |||
67 | #include "llvm/ADT/None.h" | |||
68 | #include "llvm/ADT/Optional.h" | |||
69 | #include "llvm/ADT/STLExtras.h" | |||
70 | #include "llvm/ADT/ScopeExit.h" | |||
71 | #include "llvm/ADT/Sequence.h" | |||
72 | #include "llvm/ADT/SetVector.h" | |||
73 | #include "llvm/ADT/SmallPtrSet.h" | |||
74 | #include "llvm/ADT/SmallSet.h" | |||
75 | #include "llvm/ADT/SmallVector.h" | |||
76 | #include "llvm/ADT/Statistic.h" | |||
77 | #include "llvm/ADT/StringRef.h" | |||
78 | #include "llvm/Analysis/AssumptionCache.h" | |||
79 | #include "llvm/Analysis/ConstantFolding.h" | |||
80 | #include "llvm/Analysis/InstructionSimplify.h" | |||
81 | #include "llvm/Analysis/LoopInfo.h" | |||
82 | #include "llvm/Analysis/ScalarEvolutionDivision.h" | |||
83 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | |||
84 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
85 | #include "llvm/Analysis/ValueTracking.h" | |||
86 | #include "llvm/Config/llvm-config.h" | |||
87 | #include "llvm/IR/Argument.h" | |||
88 | #include "llvm/IR/BasicBlock.h" | |||
89 | #include "llvm/IR/CFG.h" | |||
90 | #include "llvm/IR/Constant.h" | |||
91 | #include "llvm/IR/ConstantRange.h" | |||
92 | #include "llvm/IR/Constants.h" | |||
93 | #include "llvm/IR/DataLayout.h" | |||
94 | #include "llvm/IR/DerivedTypes.h" | |||
95 | #include "llvm/IR/Dominators.h" | |||
96 | #include "llvm/IR/Function.h" | |||
97 | #include "llvm/IR/GlobalAlias.h" | |||
98 | #include "llvm/IR/GlobalValue.h" | |||
99 | #include "llvm/IR/GlobalVariable.h" | |||
100 | #include "llvm/IR/InstIterator.h" | |||
101 | #include "llvm/IR/InstrTypes.h" | |||
102 | #include "llvm/IR/Instruction.h" | |||
103 | #include "llvm/IR/Instructions.h" | |||
104 | #include "llvm/IR/IntrinsicInst.h" | |||
105 | #include "llvm/IR/Intrinsics.h" | |||
106 | #include "llvm/IR/LLVMContext.h" | |||
107 | #include "llvm/IR/Metadata.h" | |||
108 | #include "llvm/IR/Operator.h" | |||
109 | #include "llvm/IR/PatternMatch.h" | |||
110 | #include "llvm/IR/Type.h" | |||
111 | #include "llvm/IR/Use.h" | |||
112 | #include "llvm/IR/User.h" | |||
113 | #include "llvm/IR/Value.h" | |||
114 | #include "llvm/IR/Verifier.h" | |||
115 | #include "llvm/InitializePasses.h" | |||
116 | #include "llvm/Pass.h" | |||
117 | #include "llvm/Support/Casting.h" | |||
118 | #include "llvm/Support/CommandLine.h" | |||
119 | #include "llvm/Support/Compiler.h" | |||
120 | #include "llvm/Support/Debug.h" | |||
121 | #include "llvm/Support/ErrorHandling.h" | |||
122 | #include "llvm/Support/KnownBits.h" | |||
123 | #include "llvm/Support/SaveAndRestore.h" | |||
124 | #include "llvm/Support/raw_ostream.h" | |||
125 | #include <algorithm> | |||
126 | #include <cassert> | |||
127 | #include <climits> | |||
128 | #include <cstddef> | |||
129 | #include <cstdint> | |||
130 | #include <cstdlib> | |||
131 | #include <map> | |||
132 | #include <memory> | |||
133 | #include <tuple> | |||
134 | #include <utility> | |||
135 | #include <vector> | |||
136 | ||||
137 | using namespace llvm; | |||
138 | using namespace PatternMatch; | |||
139 | ||||
140 | #define DEBUG_TYPE"scalar-evolution" "scalar-evolution" | |||
141 | ||||
142 | STATISTIC(NumArrayLenItCounts,static llvm::Statistic NumArrayLenItCounts = {"scalar-evolution" , "NumArrayLenItCounts", "Number of trip counts computed with array length" } | |||
143 | "Number of trip counts computed with array length")static llvm::Statistic NumArrayLenItCounts = {"scalar-evolution" , "NumArrayLenItCounts", "Number of trip counts computed with array length" }; | |||
144 | STATISTIC(NumTripCountsComputed,static llvm::Statistic NumTripCountsComputed = {"scalar-evolution" , "NumTripCountsComputed", "Number of loops with predictable loop counts" } | |||
145 | "Number of loops with predictable loop counts")static llvm::Statistic NumTripCountsComputed = {"scalar-evolution" , "NumTripCountsComputed", "Number of loops with predictable loop counts" }; | |||
146 | STATISTIC(NumTripCountsNotComputed,static llvm::Statistic NumTripCountsNotComputed = {"scalar-evolution" , "NumTripCountsNotComputed", "Number of loops without predictable loop counts" } | |||
147 | "Number of loops without predictable loop counts")static llvm::Statistic NumTripCountsNotComputed = {"scalar-evolution" , "NumTripCountsNotComputed", "Number of loops without predictable loop counts" }; | |||
148 | STATISTIC(NumBruteForceTripCountsComputed,static llvm::Statistic NumBruteForceTripCountsComputed = {"scalar-evolution" , "NumBruteForceTripCountsComputed", "Number of loops with trip counts computed by force" } | |||
149 | "Number of loops with trip counts computed by force")static llvm::Statistic NumBruteForceTripCountsComputed = {"scalar-evolution" , "NumBruteForceTripCountsComputed", "Number of loops with trip counts computed by force" }; | |||
150 | ||||
151 | static cl::opt<unsigned> | |||
152 | MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, | |||
153 | cl::ZeroOrMore, | |||
154 | cl::desc("Maximum number of iterations SCEV will " | |||
155 | "symbolically execute a constant " | |||
156 | "derived loop"), | |||
157 | cl::init(100)); | |||
158 | ||||
159 | // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. | |||
160 | static cl::opt<bool> VerifySCEV( | |||
161 | "verify-scev", cl::Hidden, | |||
162 | cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); | |||
163 | static cl::opt<bool> VerifySCEVStrict( | |||
164 | "verify-scev-strict", cl::Hidden, | |||
165 | cl::desc("Enable stricter verification with -verify-scev is passed")); | |||
166 | static cl::opt<bool> | |||
167 | VerifySCEVMap("verify-scev-maps", cl::Hidden, | |||
168 | cl::desc("Verify no dangling value in ScalarEvolution's " | |||
169 | "ExprValueMap (slow)")); | |||
170 | ||||
171 | static cl::opt<bool> VerifyIR( | |||
172 | "scev-verify-ir", cl::Hidden, | |||
173 | cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), | |||
174 | cl::init(false)); | |||
175 | ||||
176 | static cl::opt<unsigned> MulOpsInlineThreshold( | |||
177 | "scev-mulops-inline-threshold", cl::Hidden, | |||
178 | cl::desc("Threshold for inlining multiplication operands into a SCEV"), | |||
179 | cl::init(32)); | |||
180 | ||||
181 | static cl::opt<unsigned> AddOpsInlineThreshold( | |||
182 | "scev-addops-inline-threshold", cl::Hidden, | |||
183 | cl::desc("Threshold for inlining addition operands into a SCEV"), | |||
184 | cl::init(500)); | |||
185 | ||||
186 | static cl::opt<unsigned> MaxSCEVCompareDepth( | |||
187 | "scalar-evolution-max-scev-compare-depth", cl::Hidden, | |||
188 | cl::desc("Maximum depth of recursive SCEV complexity comparisons"), | |||
189 | cl::init(32)); | |||
190 | ||||
191 | static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( | |||
192 | "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, | |||
193 | cl::desc("Maximum depth of recursive SCEV operations implication analysis"), | |||
194 | cl::init(2)); | |||
195 | ||||
196 | static cl::opt<unsigned> MaxValueCompareDepth( | |||
197 | "scalar-evolution-max-value-compare-depth", cl::Hidden, | |||
198 | cl::desc("Maximum depth of recursive value complexity comparisons"), | |||
199 | cl::init(2)); | |||
200 | ||||
201 | static cl::opt<unsigned> | |||
202 | MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, | |||
203 | cl::desc("Maximum depth of recursive arithmetics"), | |||
204 | cl::init(32)); | |||
205 | ||||
206 | static cl::opt<unsigned> MaxConstantEvolvingDepth( | |||
207 | "scalar-evolution-max-constant-evolving-depth", cl::Hidden, | |||
208 | cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); | |||
209 | ||||
210 | static cl::opt<unsigned> | |||
211 | MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, | |||
212 | cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), | |||
213 | cl::init(8)); | |||
214 | ||||
215 | static cl::opt<unsigned> | |||
216 | MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, | |||
217 | cl::desc("Max coefficients in AddRec during evolving"), | |||
218 | cl::init(8)); | |||
219 | ||||
220 | static cl::opt<unsigned> | |||
221 | HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, | |||
222 | cl::desc("Size of the expression which is considered huge"), | |||
223 | cl::init(4096)); | |||
224 | ||||
225 | static cl::opt<bool> | |||
226 | ClassifyExpressions("scalar-evolution-classify-expressions", | |||
227 | cl::Hidden, cl::init(true), | |||
228 | cl::desc("When printing analysis, include information on every instruction")); | |||
229 | ||||
230 | static cl::opt<bool> UseExpensiveRangeSharpening( | |||
231 | "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, | |||
232 | cl::init(false), | |||
233 | cl::desc("Use more powerful methods of sharpening expression ranges. May " | |||
234 | "be costly in terms of compile time")); | |||
235 | ||||
236 | //===----------------------------------------------------------------------===// | |||
237 | // SCEV class definitions | |||
238 | //===----------------------------------------------------------------------===// | |||
239 | ||||
240 | //===----------------------------------------------------------------------===// | |||
241 | // Implementation of the SCEV class. | |||
242 | // | |||
243 | ||||
244 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
245 | LLVM_DUMP_METHOD__attribute__((noinline)) void SCEV::dump() const { | |||
246 | print(dbgs()); | |||
247 | dbgs() << '\n'; | |||
248 | } | |||
249 | #endif | |||
250 | ||||
251 | void SCEV::print(raw_ostream &OS) const { | |||
252 | switch (getSCEVType()) { | |||
253 | case scConstant: | |||
254 | cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); | |||
255 | return; | |||
256 | case scPtrToInt: { | |||
257 | const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); | |||
258 | const SCEV *Op = PtrToInt->getOperand(); | |||
259 | OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " | |||
260 | << *PtrToInt->getType() << ")"; | |||
261 | return; | |||
262 | } | |||
263 | case scTruncate: { | |||
264 | const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); | |||
265 | const SCEV *Op = Trunc->getOperand(); | |||
266 | OS << "(trunc " << *Op->getType() << " " << *Op << " to " | |||
267 | << *Trunc->getType() << ")"; | |||
268 | return; | |||
269 | } | |||
270 | case scZeroExtend: { | |||
271 | const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); | |||
272 | const SCEV *Op = ZExt->getOperand(); | |||
273 | OS << "(zext " << *Op->getType() << " " << *Op << " to " | |||
274 | << *ZExt->getType() << ")"; | |||
275 | return; | |||
276 | } | |||
277 | case scSignExtend: { | |||
278 | const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); | |||
279 | const SCEV *Op = SExt->getOperand(); | |||
280 | OS << "(sext " << *Op->getType() << " " << *Op << " to " | |||
281 | << *SExt->getType() << ")"; | |||
282 | return; | |||
283 | } | |||
284 | case scAddRecExpr: { | |||
285 | const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); | |||
286 | OS << "{" << *AR->getOperand(0); | |||
287 | for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) | |||
288 | OS << ",+," << *AR->getOperand(i); | |||
289 | OS << "}<"; | |||
290 | if (AR->hasNoUnsignedWrap()) | |||
291 | OS << "nuw><"; | |||
292 | if (AR->hasNoSignedWrap()) | |||
293 | OS << "nsw><"; | |||
294 | if (AR->hasNoSelfWrap() && | |||
295 | !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) | |||
296 | OS << "nw><"; | |||
297 | AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); | |||
298 | OS << ">"; | |||
299 | return; | |||
300 | } | |||
301 | case scAddExpr: | |||
302 | case scMulExpr: | |||
303 | case scUMaxExpr: | |||
304 | case scSMaxExpr: | |||
305 | case scUMinExpr: | |||
306 | case scSMinExpr: { | |||
307 | const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); | |||
308 | const char *OpStr = nullptr; | |||
309 | switch (NAry->getSCEVType()) { | |||
310 | case scAddExpr: OpStr = " + "; break; | |||
311 | case scMulExpr: OpStr = " * "; break; | |||
312 | case scUMaxExpr: OpStr = " umax "; break; | |||
313 | case scSMaxExpr: OpStr = " smax "; break; | |||
314 | case scUMinExpr: | |||
315 | OpStr = " umin "; | |||
316 | break; | |||
317 | case scSMinExpr: | |||
318 | OpStr = " smin "; | |||
319 | break; | |||
320 | default: | |||
321 | llvm_unreachable("There are no other nary expression types.")__builtin_unreachable(); | |||
322 | } | |||
323 | OS << "("; | |||
324 | ListSeparator LS(OpStr); | |||
325 | for (const SCEV *Op : NAry->operands()) | |||
326 | OS << LS << *Op; | |||
327 | OS << ")"; | |||
328 | switch (NAry->getSCEVType()) { | |||
329 | case scAddExpr: | |||
330 | case scMulExpr: | |||
331 | if (NAry->hasNoUnsignedWrap()) | |||
332 | OS << "<nuw>"; | |||
333 | if (NAry->hasNoSignedWrap()) | |||
334 | OS << "<nsw>"; | |||
335 | break; | |||
336 | default: | |||
337 | // Nothing to print for other nary expressions. | |||
338 | break; | |||
339 | } | |||
340 | return; | |||
341 | } | |||
342 | case scUDivExpr: { | |||
343 | const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); | |||
344 | OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; | |||
345 | return; | |||
346 | } | |||
347 | case scUnknown: { | |||
348 | const SCEVUnknown *U = cast<SCEVUnknown>(this); | |||
349 | Type *AllocTy; | |||
350 | if (U->isSizeOf(AllocTy)) { | |||
351 | OS << "sizeof(" << *AllocTy << ")"; | |||
352 | return; | |||
353 | } | |||
354 | if (U->isAlignOf(AllocTy)) { | |||
355 | OS << "alignof(" << *AllocTy << ")"; | |||
356 | return; | |||
357 | } | |||
358 | ||||
359 | Type *CTy; | |||
360 | Constant *FieldNo; | |||
361 | if (U->isOffsetOf(CTy, FieldNo)) { | |||
362 | OS << "offsetof(" << *CTy << ", "; | |||
363 | FieldNo->printAsOperand(OS, false); | |||
364 | OS << ")"; | |||
365 | return; | |||
366 | } | |||
367 | ||||
368 | // Otherwise just print it normally. | |||
369 | U->getValue()->printAsOperand(OS, false); | |||
370 | return; | |||
371 | } | |||
372 | case scCouldNotCompute: | |||
373 | OS << "***COULDNOTCOMPUTE***"; | |||
374 | return; | |||
375 | } | |||
376 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | |||
377 | } | |||
378 | ||||
379 | Type *SCEV::getType() const { | |||
380 | switch (getSCEVType()) { | |||
381 | case scConstant: | |||
382 | return cast<SCEVConstant>(this)->getType(); | |||
383 | case scPtrToInt: | |||
384 | case scTruncate: | |||
385 | case scZeroExtend: | |||
386 | case scSignExtend: | |||
387 | return cast<SCEVCastExpr>(this)->getType(); | |||
388 | case scAddRecExpr: | |||
389 | return cast<SCEVAddRecExpr>(this)->getType(); | |||
390 | case scMulExpr: | |||
391 | return cast<SCEVMulExpr>(this)->getType(); | |||
392 | case scUMaxExpr: | |||
393 | case scSMaxExpr: | |||
394 | case scUMinExpr: | |||
395 | case scSMinExpr: | |||
396 | return cast<SCEVMinMaxExpr>(this)->getType(); | |||
397 | case scAddExpr: | |||
398 | return cast<SCEVAddExpr>(this)->getType(); | |||
399 | case scUDivExpr: | |||
400 | return cast<SCEVUDivExpr>(this)->getType(); | |||
401 | case scUnknown: | |||
402 | return cast<SCEVUnknown>(this)->getType(); | |||
403 | case scCouldNotCompute: | |||
404 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable(); | |||
405 | } | |||
406 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | |||
407 | } | |||
408 | ||||
409 | bool SCEV::isZero() const { | |||
410 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) | |||
411 | return SC->getValue()->isZero(); | |||
412 | return false; | |||
413 | } | |||
414 | ||||
415 | bool SCEV::isOne() const { | |||
416 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) | |||
417 | return SC->getValue()->isOne(); | |||
418 | return false; | |||
419 | } | |||
420 | ||||
421 | bool SCEV::isAllOnesValue() const { | |||
422 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) | |||
423 | return SC->getValue()->isMinusOne(); | |||
424 | return false; | |||
425 | } | |||
426 | ||||
427 | bool SCEV::isNonConstantNegative() const { | |||
428 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); | |||
429 | if (!Mul) return false; | |||
430 | ||||
431 | // If there is a constant factor, it will be first. | |||
432 | const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); | |||
433 | if (!SC) return false; | |||
434 | ||||
435 | // Return true if the value is negative, this matches things like (-42 * V). | |||
436 | return SC->getAPInt().isNegative(); | |||
437 | } | |||
438 | ||||
439 | SCEVCouldNotCompute::SCEVCouldNotCompute() : | |||
440 | SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} | |||
441 | ||||
442 | bool SCEVCouldNotCompute::classof(const SCEV *S) { | |||
443 | return S->getSCEVType() == scCouldNotCompute; | |||
444 | } | |||
445 | ||||
446 | const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { | |||
447 | FoldingSetNodeID ID; | |||
448 | ID.AddInteger(scConstant); | |||
449 | ID.AddPointer(V); | |||
450 | void *IP = nullptr; | |||
451 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | |||
452 | SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); | |||
453 | UniqueSCEVs.InsertNode(S, IP); | |||
454 | return S; | |||
455 | } | |||
456 | ||||
457 | const SCEV *ScalarEvolution::getConstant(const APInt &Val) { | |||
458 | return getConstant(ConstantInt::get(getContext(), Val)); | |||
459 | } | |||
460 | ||||
461 | const SCEV * | |||
462 | ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { | |||
463 | IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); | |||
464 | return getConstant(ConstantInt::get(ITy, V, isSigned)); | |||
465 | } | |||
466 | ||||
467 | SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, | |||
468 | const SCEV *op, Type *ty) | |||
469 | : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { | |||
470 | Operands[0] = op; | |||
471 | } | |||
472 | ||||
473 | SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, | |||
474 | Type *ITy) | |||
475 | : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { | |||
476 | assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&((void)0) | |||
477 | "Must be a non-bit-width-changing pointer-to-integer cast!")((void)0); | |||
478 | } | |||
479 | ||||
480 | SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, | |||
481 | SCEVTypes SCEVTy, const SCEV *op, | |||
482 | Type *ty) | |||
483 | : SCEVCastExpr(ID, SCEVTy, op, ty) {} | |||
484 | ||||
485 | SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, | |||
486 | Type *ty) | |||
487 | : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { | |||
488 | assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | |||
489 | "Cannot truncate non-integer value!")((void)0); | |||
490 | } | |||
491 | ||||
492 | SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, | |||
493 | const SCEV *op, Type *ty) | |||
494 | : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { | |||
495 | assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | |||
496 | "Cannot zero extend non-integer value!")((void)0); | |||
497 | } | |||
498 | ||||
499 | SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, | |||
500 | const SCEV *op, Type *ty) | |||
501 | : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { | |||
502 | assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | |||
503 | "Cannot sign extend non-integer value!")((void)0); | |||
504 | } | |||
505 | ||||
506 | void SCEVUnknown::deleted() { | |||
507 | // Clear this SCEVUnknown from various maps. | |||
508 | SE->forgetMemoizedResults(this); | |||
509 | ||||
510 | // Remove this SCEVUnknown from the uniquing map. | |||
511 | SE->UniqueSCEVs.RemoveNode(this); | |||
512 | ||||
513 | // Release the value. | |||
514 | setValPtr(nullptr); | |||
515 | } | |||
516 | ||||
517 | void SCEVUnknown::allUsesReplacedWith(Value *New) { | |||
518 | // Remove this SCEVUnknown from the uniquing map. | |||
519 | SE->UniqueSCEVs.RemoveNode(this); | |||
520 | ||||
521 | // Update this SCEVUnknown to point to the new value. This is needed | |||
522 | // because there may still be outstanding SCEVs which still point to | |||
523 | // this SCEVUnknown. | |||
524 | setValPtr(New); | |||
525 | } | |||
526 | ||||
527 | bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { | |||
528 | if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) | |||
529 | if (VCE->getOpcode() == Instruction::PtrToInt) | |||
530 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) | |||
531 | if (CE->getOpcode() == Instruction::GetElementPtr && | |||
532 | CE->getOperand(0)->isNullValue() && | |||
533 | CE->getNumOperands() == 2) | |||
534 | if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) | |||
535 | if (CI->isOne()) { | |||
536 | AllocTy = cast<GEPOperator>(CE)->getSourceElementType(); | |||
537 | return true; | |||
538 | } | |||
539 | ||||
540 | return false; | |||
541 | } | |||
542 | ||||
543 | bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { | |||
544 | if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) | |||
545 | if (VCE->getOpcode() == Instruction::PtrToInt) | |||
546 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) | |||
547 | if (CE->getOpcode() == Instruction::GetElementPtr && | |||
548 | CE->getOperand(0)->isNullValue()) { | |||
549 | Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); | |||
550 | if (StructType *STy = dyn_cast<StructType>(Ty)) | |||
551 | if (!STy->isPacked() && | |||
552 | CE->getNumOperands() == 3 && | |||
553 | CE->getOperand(1)->isNullValue()) { | |||
554 | if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) | |||
555 | if (CI->isOne() && | |||
556 | STy->getNumElements() == 2 && | |||
557 | STy->getElementType(0)->isIntegerTy(1)) { | |||
558 | AllocTy = STy->getElementType(1); | |||
559 | return true; | |||
560 | } | |||
561 | } | |||
562 | } | |||
563 | ||||
564 | return false; | |||
565 | } | |||
566 | ||||
567 | bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { | |||
568 | if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) | |||
569 | if (VCE->getOpcode() == Instruction::PtrToInt) | |||
570 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) | |||
571 | if (CE->getOpcode() == Instruction::GetElementPtr && | |||
572 | CE->getNumOperands() == 3 && | |||
573 | CE->getOperand(0)->isNullValue() && | |||
574 | CE->getOperand(1)->isNullValue()) { | |||
575 | Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); | |||
576 | // Ignore vector types here so that ScalarEvolutionExpander doesn't | |||
577 | // emit getelementptrs that index into vectors. | |||
578 | if (Ty->isStructTy() || Ty->isArrayTy()) { | |||
579 | CTy = Ty; | |||
580 | FieldNo = CE->getOperand(2); | |||
581 | return true; | |||
582 | } | |||
583 | } | |||
584 | ||||
585 | return false; | |||
586 | } | |||
587 | ||||
588 | //===----------------------------------------------------------------------===// | |||
589 | // SCEV Utilities | |||
590 | //===----------------------------------------------------------------------===// | |||
591 | ||||
592 | /// Compare the two values \p LV and \p RV in terms of their "complexity" where | |||
593 | /// "complexity" is a partial (and somewhat ad-hoc) relation used to order | |||
594 | /// operands in SCEV expressions. \p EqCache is a set of pairs of values that | |||
595 | /// have been previously deemed to be "equally complex" by this routine. It is | |||
596 | /// intended to avoid exponential time complexity in cases like: | |||
597 | /// | |||
598 | /// %a = f(%x, %y) | |||
599 | /// %b = f(%a, %a) | |||
600 | /// %c = f(%b, %b) | |||
601 | /// | |||
602 | /// %d = f(%x, %y) | |||
603 | /// %e = f(%d, %d) | |||
604 | /// %f = f(%e, %e) | |||
605 | /// | |||
606 | /// CompareValueComplexity(%f, %c) | |||
607 | /// | |||
608 | /// Since we do not continue running this routine on expression trees once we | |||
609 | /// have seen unequal values, there is no need to track them in the cache. | |||
610 | static int | |||
611 | CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, | |||
612 | const LoopInfo *const LI, Value *LV, Value *RV, | |||
613 | unsigned Depth) { | |||
614 | if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) | |||
615 | return 0; | |||
616 | ||||
617 | // Order pointer values after integer values. This helps SCEVExpander form | |||
618 | // GEPs. | |||
619 | bool LIsPointer = LV->getType()->isPointerTy(), | |||
620 | RIsPointer = RV->getType()->isPointerTy(); | |||
621 | if (LIsPointer != RIsPointer) | |||
622 | return (int)LIsPointer - (int)RIsPointer; | |||
623 | ||||
624 | // Compare getValueID values. | |||
625 | unsigned LID = LV->getValueID(), RID = RV->getValueID(); | |||
626 | if (LID != RID) | |||
627 | return (int)LID - (int)RID; | |||
628 | ||||
629 | // Sort arguments by their position. | |||
630 | if (const auto *LA = dyn_cast<Argument>(LV)) { | |||
631 | const auto *RA = cast<Argument>(RV); | |||
632 | unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); | |||
633 | return (int)LArgNo - (int)RArgNo; | |||
634 | } | |||
635 | ||||
636 | if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { | |||
637 | const auto *RGV = cast<GlobalValue>(RV); | |||
638 | ||||
639 | const auto IsGVNameSemantic = [&](const GlobalValue *GV) { | |||
640 | auto LT = GV->getLinkage(); | |||
641 | return !(GlobalValue::isPrivateLinkage(LT) || | |||
642 | GlobalValue::isInternalLinkage(LT)); | |||
643 | }; | |||
644 | ||||
645 | // Use the names to distinguish the two values, but only if the | |||
646 | // names are semantically important. | |||
647 | if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) | |||
648 | return LGV->getName().compare(RGV->getName()); | |||
649 | } | |||
650 | ||||
651 | // For instructions, compare their loop depth, and their operand count. This | |||
652 | // is pretty loose. | |||
653 | if (const auto *LInst = dyn_cast<Instruction>(LV)) { | |||
654 | const auto *RInst = cast<Instruction>(RV); | |||
655 | ||||
656 | // Compare loop depths. | |||
657 | const BasicBlock *LParent = LInst->getParent(), | |||
658 | *RParent = RInst->getParent(); | |||
659 | if (LParent != RParent) { | |||
660 | unsigned LDepth = LI->getLoopDepth(LParent), | |||
661 | RDepth = LI->getLoopDepth(RParent); | |||
662 | if (LDepth != RDepth) | |||
663 | return (int)LDepth - (int)RDepth; | |||
664 | } | |||
665 | ||||
666 | // Compare the number of operands. | |||
667 | unsigned LNumOps = LInst->getNumOperands(), | |||
668 | RNumOps = RInst->getNumOperands(); | |||
669 | if (LNumOps != RNumOps) | |||
670 | return (int)LNumOps - (int)RNumOps; | |||
671 | ||||
672 | for (unsigned Idx : seq(0u, LNumOps)) { | |||
673 | int Result = | |||
674 | CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), | |||
675 | RInst->getOperand(Idx), Depth + 1); | |||
676 | if (Result != 0) | |||
677 | return Result; | |||
678 | } | |||
679 | } | |||
680 | ||||
681 | EqCacheValue.unionSets(LV, RV); | |||
682 | return 0; | |||
683 | } | |||
684 | ||||
685 | // Return negative, zero, or positive, if LHS is less than, equal to, or greater | |||
686 | // than RHS, respectively. A three-way result allows recursive comparisons to be | |||
687 | // more efficient. | |||
688 | // If the max analysis depth was reached, return None, assuming we do not know | |||
689 | // if they are equivalent for sure. | |||
690 | static Optional<int> | |||
691 | CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, | |||
692 | EquivalenceClasses<const Value *> &EqCacheValue, | |||
693 | const LoopInfo *const LI, const SCEV *LHS, | |||
694 | const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { | |||
695 | // Fast-path: SCEVs are uniqued so we can do a quick equality check. | |||
696 | if (LHS == RHS) | |||
697 | return 0; | |||
698 | ||||
699 | // Primarily, sort the SCEVs by their getSCEVType(). | |||
700 | SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); | |||
701 | if (LType != RType) | |||
702 | return (int)LType - (int)RType; | |||
703 | ||||
704 | if (EqCacheSCEV.isEquivalent(LHS, RHS)) | |||
705 | return 0; | |||
706 | ||||
707 | if (Depth > MaxSCEVCompareDepth) | |||
708 | return None; | |||
709 | ||||
710 | // Aside from the getSCEVType() ordering, the particular ordering | |||
711 | // isn't very important except that it's beneficial to be consistent, | |||
712 | // so that (a + b) and (b + a) don't end up as different expressions. | |||
713 | switch (LType) { | |||
714 | case scUnknown: { | |||
715 | const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); | |||
716 | const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); | |||
717 | ||||
718 | int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), | |||
719 | RU->getValue(), Depth + 1); | |||
720 | if (X == 0) | |||
721 | EqCacheSCEV.unionSets(LHS, RHS); | |||
722 | return X; | |||
723 | } | |||
724 | ||||
725 | case scConstant: { | |||
726 | const SCEVConstant *LC = cast<SCEVConstant>(LHS); | |||
727 | const SCEVConstant *RC = cast<SCEVConstant>(RHS); | |||
728 | ||||
729 | // Compare constant values. | |||
730 | const APInt &LA = LC->getAPInt(); | |||
731 | const APInt &RA = RC->getAPInt(); | |||
732 | unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); | |||
733 | if (LBitWidth != RBitWidth) | |||
734 | return (int)LBitWidth - (int)RBitWidth; | |||
735 | return LA.ult(RA) ? -1 : 1; | |||
736 | } | |||
737 | ||||
738 | case scAddRecExpr: { | |||
739 | const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); | |||
740 | const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); | |||
741 | ||||
742 | // There is always a dominance between two recs that are used by one SCEV, | |||
743 | // so we can safely sort recs by loop header dominance. We require such | |||
744 | // order in getAddExpr. | |||
745 | const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); | |||
746 | if (LLoop != RLoop) { | |||
747 | const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); | |||
748 | assert(LHead != RHead && "Two loops share the same header?")((void)0); | |||
749 | if (DT.dominates(LHead, RHead)) | |||
750 | return 1; | |||
751 | else | |||
752 | assert(DT.dominates(RHead, LHead) &&((void)0) | |||
753 | "No dominance between recurrences used by one SCEV?")((void)0); | |||
754 | return -1; | |||
755 | } | |||
756 | ||||
757 | // Addrec complexity grows with operand count. | |||
758 | unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); | |||
759 | if (LNumOps != RNumOps) | |||
760 | return (int)LNumOps - (int)RNumOps; | |||
761 | ||||
762 | // Lexicographically compare. | |||
763 | for (unsigned i = 0; i != LNumOps; ++i) { | |||
764 | auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, | |||
765 | LA->getOperand(i), RA->getOperand(i), DT, | |||
766 | Depth + 1); | |||
767 | if (X != 0) | |||
768 | return X; | |||
769 | } | |||
770 | EqCacheSCEV.unionSets(LHS, RHS); | |||
771 | return 0; | |||
772 | } | |||
773 | ||||
774 | case scAddExpr: | |||
775 | case scMulExpr: | |||
776 | case scSMaxExpr: | |||
777 | case scUMaxExpr: | |||
778 | case scSMinExpr: | |||
779 | case scUMinExpr: { | |||
780 | const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); | |||
781 | const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); | |||
782 | ||||
783 | // Lexicographically compare n-ary expressions. | |||
784 | unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); | |||
785 | if (LNumOps != RNumOps) | |||
786 | return (int)LNumOps - (int)RNumOps; | |||
787 | ||||
788 | for (unsigned i = 0; i != LNumOps; ++i) { | |||
789 | auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, | |||
790 | LC->getOperand(i), RC->getOperand(i), DT, | |||
791 | Depth + 1); | |||
792 | if (X != 0) | |||
793 | return X; | |||
794 | } | |||
795 | EqCacheSCEV.unionSets(LHS, RHS); | |||
796 | return 0; | |||
797 | } | |||
798 | ||||
799 | case scUDivExpr: { | |||
800 | const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); | |||
801 | const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); | |||
802 | ||||
803 | // Lexicographically compare udiv expressions. | |||
804 | auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), | |||
805 | RC->getLHS(), DT, Depth + 1); | |||
806 | if (X != 0) | |||
807 | return X; | |||
808 | X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), | |||
809 | RC->getRHS(), DT, Depth + 1); | |||
810 | if (X == 0) | |||
811 | EqCacheSCEV.unionSets(LHS, RHS); | |||
812 | return X; | |||
813 | } | |||
814 | ||||
815 | case scPtrToInt: | |||
816 | case scTruncate: | |||
817 | case scZeroExtend: | |||
818 | case scSignExtend: { | |||
819 | const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); | |||
820 | const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); | |||
821 | ||||
822 | // Compare cast expressions by operand. | |||
823 | auto X = | |||
824 | CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), | |||
825 | RC->getOperand(), DT, Depth + 1); | |||
826 | if (X == 0) | |||
827 | EqCacheSCEV.unionSets(LHS, RHS); | |||
828 | return X; | |||
829 | } | |||
830 | ||||
831 | case scCouldNotCompute: | |||
832 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable(); | |||
833 | } | |||
834 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | |||
835 | } | |||
836 | ||||
837 | /// Given a list of SCEV objects, order them by their complexity, and group | |||
838 | /// objects of the same complexity together by value. When this routine is | |||
839 | /// finished, we know that any duplicates in the vector are consecutive and that | |||
840 | /// complexity is monotonically increasing. | |||
841 | /// | |||
842 | /// Note that we go take special precautions to ensure that we get deterministic | |||
843 | /// results from this routine. In other words, we don't want the results of | |||
844 | /// this to depend on where the addresses of various SCEV objects happened to | |||
845 | /// land in memory. | |||
846 | static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, | |||
847 | LoopInfo *LI, DominatorTree &DT) { | |||
848 | if (Ops.size() < 2) return; // Noop | |||
849 | ||||
850 | EquivalenceClasses<const SCEV *> EqCacheSCEV; | |||
851 | EquivalenceClasses<const Value *> EqCacheValue; | |||
852 | ||||
853 | // Whether LHS has provably less complexity than RHS. | |||
854 | auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { | |||
855 | auto Complexity = | |||
856 | CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); | |||
857 | return Complexity && *Complexity < 0; | |||
858 | }; | |||
859 | if (Ops.size() == 2) { | |||
860 | // This is the common case, which also happens to be trivially simple. | |||
861 | // Special case it. | |||
862 | const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; | |||
863 | if (IsLessComplex(RHS, LHS)) | |||
864 | std::swap(LHS, RHS); | |||
865 | return; | |||
866 | } | |||
867 | ||||
868 | // Do the rough sort by complexity. | |||
869 | llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { | |||
870 | return IsLessComplex(LHS, RHS); | |||
871 | }); | |||
872 | ||||
873 | // Now that we are sorted by complexity, group elements of the same | |||
874 | // complexity. Note that this is, at worst, N^2, but the vector is likely to | |||
875 | // be extremely short in practice. Note that we take this approach because we | |||
876 | // do not want to depend on the addresses of the objects we are grouping. | |||
877 | for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { | |||
878 | const SCEV *S = Ops[i]; | |||
879 | unsigned Complexity = S->getSCEVType(); | |||
880 | ||||
881 | // If there are any objects of the same complexity and same value as this | |||
882 | // one, group them. | |||
883 | for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { | |||
884 | if (Ops[j] == S) { // Found a duplicate. | |||
885 | // Move it to immediately after i'th element. | |||
886 | std::swap(Ops[i+1], Ops[j]); | |||
887 | ++i; // no need to rescan it. | |||
888 | if (i == e-2) return; // Done! | |||
889 | } | |||
890 | } | |||
891 | } | |||
892 | } | |||
893 | ||||
894 | /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at | |||
895 | /// least HugeExprThreshold nodes). | |||
896 | static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { | |||
897 | return any_of(Ops, [](const SCEV *S) { | |||
898 | return S->getExpressionSize() >= HugeExprThreshold; | |||
899 | }); | |||
900 | } | |||
901 | ||||
902 | //===----------------------------------------------------------------------===// | |||
903 | // Simple SCEV method implementations | |||
904 | //===----------------------------------------------------------------------===// | |||
905 | ||||
906 | /// Compute BC(It, K). The result has width W. Assume, K > 0. | |||
907 | static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, | |||
908 | ScalarEvolution &SE, | |||
909 | Type *ResultTy) { | |||
910 | // Handle the simplest case efficiently. | |||
911 | if (K == 1) | |||
912 | return SE.getTruncateOrZeroExtend(It, ResultTy); | |||
913 | ||||
914 | // We are using the following formula for BC(It, K): | |||
915 | // | |||
916 | // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! | |||
917 | // | |||
918 | // Suppose, W is the bitwidth of the return value. We must be prepared for | |||
919 | // overflow. Hence, we must assure that the result of our computation is | |||
920 | // equal to the accurate one modulo 2^W. Unfortunately, division isn't | |||
921 | // safe in modular arithmetic. | |||
922 | // | |||
923 | // However, this code doesn't use exactly that formula; the formula it uses | |||
924 | // is something like the following, where T is the number of factors of 2 in | |||
925 | // K! (i.e. trailing zeros in the binary representation of K!), and ^ is | |||
926 | // exponentiation: | |||
927 | // | |||
928 | // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) | |||
929 | // | |||
930 | // This formula is trivially equivalent to the previous formula. However, | |||
931 | // this formula can be implemented much more efficiently. The trick is that | |||
932 | // K! / 2^T is odd, and exact division by an odd number *is* safe in modular | |||
933 | // arithmetic. To do exact division in modular arithmetic, all we have | |||
934 | // to do is multiply by the inverse. Therefore, this step can be done at | |||
935 | // width W. | |||
936 | // | |||
937 | // The next issue is how to safely do the division by 2^T. The way this | |||
938 | // is done is by doing the multiplication step at a width of at least W + T | |||
939 | // bits. This way, the bottom W+T bits of the product are accurate. Then, | |||
940 | // when we perform the division by 2^T (which is equivalent to a right shift | |||
941 | // by T), the bottom W bits are accurate. Extra bits are okay; they'll get | |||
942 | // truncated out after the division by 2^T. | |||
943 | // | |||
944 | // In comparison to just directly using the first formula, this technique | |||
945 | // is much more efficient; using the first formula requires W * K bits, | |||
946 | // but this formula less than W + K bits. Also, the first formula requires | |||
947 | // a division step, whereas this formula only requires multiplies and shifts. | |||
948 | // | |||
949 | // It doesn't matter whether the subtraction step is done in the calculation | |||
950 | // width or the input iteration count's width; if the subtraction overflows, | |||
951 | // the result must be zero anyway. We prefer here to do it in the width of | |||
952 | // the induction variable because it helps a lot for certain cases; CodeGen | |||
953 | // isn't smart enough to ignore the overflow, which leads to much less | |||
954 | // efficient code if the width of the subtraction is wider than the native | |||
955 | // register width. | |||
956 | // | |||
957 | // (It's possible to not widen at all by pulling out factors of 2 before | |||
958 | // the multiplication; for example, K=2 can be calculated as | |||
959 | // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires | |||
960 | // extra arithmetic, so it's not an obvious win, and it gets | |||
961 | // much more complicated for K > 3.) | |||
962 | ||||
963 | // Protection from insane SCEVs; this bound is conservative, | |||
964 | // but it probably doesn't matter. | |||
965 | if (K > 1000) | |||
966 | return SE.getCouldNotCompute(); | |||
967 | ||||
968 | unsigned W = SE.getTypeSizeInBits(ResultTy); | |||
969 | ||||
970 | // Calculate K! / 2^T and T; we divide out the factors of two before | |||
971 | // multiplying for calculating K! / 2^T to avoid overflow. | |||
972 | // Other overflow doesn't matter because we only care about the bottom | |||
973 | // W bits of the result. | |||
974 | APInt OddFactorial(W, 1); | |||
975 | unsigned T = 1; | |||
976 | for (unsigned i = 3; i <= K; ++i) { | |||
977 | APInt Mult(W, i); | |||
978 | unsigned TwoFactors = Mult.countTrailingZeros(); | |||
979 | T += TwoFactors; | |||
980 | Mult.lshrInPlace(TwoFactors); | |||
981 | OddFactorial *= Mult; | |||
982 | } | |||
983 | ||||
984 | // We need at least W + T bits for the multiplication step | |||
985 | unsigned CalculationBits = W + T; | |||
986 | ||||
987 | // Calculate 2^T, at width T+W. | |||
988 | APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); | |||
989 | ||||
990 | // Calculate the multiplicative inverse of K! / 2^T; | |||
991 | // this multiplication factor will perform the exact division by | |||
992 | // K! / 2^T. | |||
993 | APInt Mod = APInt::getSignedMinValue(W+1); | |||
994 | APInt MultiplyFactor = OddFactorial.zext(W+1); | |||
995 | MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); | |||
996 | MultiplyFactor = MultiplyFactor.trunc(W); | |||
997 | ||||
998 | // Calculate the product, at width T+W | |||
999 | IntegerType *CalculationTy = IntegerType::get(SE.getContext(), | |||
1000 | CalculationBits); | |||
1001 | const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); | |||
1002 | for (unsigned i = 1; i != K; ++i) { | |||
1003 | const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); | |||
1004 | Dividend = SE.getMulExpr(Dividend, | |||
1005 | SE.getTruncateOrZeroExtend(S, CalculationTy)); | |||
1006 | } | |||
1007 | ||||
1008 | // Divide by 2^T | |||
1009 | const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); | |||
1010 | ||||
1011 | // Truncate the result, and divide by K! / 2^T. | |||
1012 | ||||
1013 | return SE.getMulExpr(SE.getConstant(MultiplyFactor), | |||
1014 | SE.getTruncateOrZeroExtend(DivResult, ResultTy)); | |||
1015 | } | |||
1016 | ||||
1017 | /// Return the value of this chain of recurrences at the specified iteration | |||
1018 | /// number. We can evaluate this recurrence by multiplying each element in the | |||
1019 | /// chain by the binomial coefficient corresponding to it. In other words, we | |||
1020 | /// can evaluate {A,+,B,+,C,+,D} as: | |||
1021 | /// | |||
1022 | /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) | |||
1023 | /// | |||
1024 | /// where BC(It, k) stands for binomial coefficient. | |||
1025 | const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, | |||
1026 | ScalarEvolution &SE) const { | |||
1027 | return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE); | |||
1028 | } | |||
1029 | ||||
1030 | const SCEV * | |||
1031 | SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, | |||
1032 | const SCEV *It, ScalarEvolution &SE) { | |||
1033 | assert(Operands.size() > 0)((void)0); | |||
1034 | const SCEV *Result = Operands[0]; | |||
1035 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) { | |||
1036 | // The computation is correct in the face of overflow provided that the | |||
1037 | // multiplication is performed _after_ the evaluation of the binomial | |||
1038 | // coefficient. | |||
1039 | const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); | |||
1040 | if (isa<SCEVCouldNotCompute>(Coeff)) | |||
1041 | return Coeff; | |||
1042 | ||||
1043 | Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); | |||
1044 | } | |||
1045 | return Result; | |||
1046 | } | |||
1047 | ||||
1048 | //===----------------------------------------------------------------------===// | |||
1049 | // SCEV Expression folder implementations | |||
1050 | //===----------------------------------------------------------------------===// | |||
1051 | ||||
1052 | const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, | |||
1053 | unsigned Depth) { | |||
1054 | assert(Depth <= 1 &&((void)0) | |||
1055 | "getLosslessPtrToIntExpr() should self-recurse at most once.")((void)0); | |||
1056 | ||||
1057 | // We could be called with an integer-typed operands during SCEV rewrites. | |||
1058 | // Since the operand is an integer already, just perform zext/trunc/self cast. | |||
1059 | if (!Op->getType()->isPointerTy()) | |||
1060 | return Op; | |||
1061 | ||||
1062 | // What would be an ID for such a SCEV cast expression? | |||
1063 | FoldingSetNodeID ID; | |||
1064 | ID.AddInteger(scPtrToInt); | |||
1065 | ID.AddPointer(Op); | |||
1066 | ||||
1067 | void *IP = nullptr; | |||
1068 | ||||
1069 | // Is there already an expression for such a cast? | |||
1070 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) | |||
1071 | return S; | |||
1072 | ||||
1073 | // It isn't legal for optimizations to construct new ptrtoint expressions | |||
1074 | // for non-integral pointers. | |||
1075 | if (getDataLayout().isNonIntegralPointerType(Op->getType())) | |||
1076 | return getCouldNotCompute(); | |||
1077 | ||||
1078 | Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); | |||
1079 | ||||
1080 | // We can only trivially model ptrtoint if SCEV's effective (integer) type | |||
1081 | // is sufficiently wide to represent all possible pointer values. | |||
1082 | // We could theoretically teach SCEV to truncate wider pointers, but | |||
1083 | // that isn't implemented for now. | |||
1084 | if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != | |||
1085 | getDataLayout().getTypeSizeInBits(IntPtrTy)) | |||
1086 | return getCouldNotCompute(); | |||
1087 | ||||
1088 | // If not, is this expression something we can't reduce any further? | |||
1089 | if (auto *U = dyn_cast<SCEVUnknown>(Op)) { | |||
1090 | // Perform some basic constant folding. If the operand of the ptr2int cast | |||
1091 | // is a null pointer, don't create a ptr2int SCEV expression (that will be | |||
1092 | // left as-is), but produce a zero constant. | |||
1093 | // NOTE: We could handle a more general case, but lack motivational cases. | |||
1094 | if (isa<ConstantPointerNull>(U->getValue())) | |||
1095 | return getZero(IntPtrTy); | |||
1096 | ||||
1097 | // Create an explicit cast node. | |||
1098 | // We can reuse the existing insert position since if we get here, | |||
1099 | // we won't have made any changes which would invalidate it. | |||
1100 | SCEV *S = new (SCEVAllocator) | |||
1101 | SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); | |||
1102 | UniqueSCEVs.InsertNode(S, IP); | |||
1103 | addToLoopUseLists(S); | |||
1104 | return S; | |||
1105 | } | |||
1106 | ||||
1107 | assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for "((void)0) | |||
1108 | "non-SCEVUnknown's.")((void)0); | |||
1109 | ||||
1110 | // Otherwise, we've got some expression that is more complex than just a | |||
1111 | // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an | |||
1112 | // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown | |||
1113 | // only, and the expressions must otherwise be integer-typed. | |||
1114 | // So sink the cast down to the SCEVUnknown's. | |||
1115 | ||||
1116 | /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, | |||
1117 | /// which computes a pointer-typed value, and rewrites the whole expression | |||
1118 | /// tree so that *all* the computations are done on integers, and the only | |||
1119 | /// pointer-typed operands in the expression are SCEVUnknown. | |||
1120 | class SCEVPtrToIntSinkingRewriter | |||
1121 | : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { | |||
1122 | using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; | |||
1123 | ||||
1124 | public: | |||
1125 | SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} | |||
1126 | ||||
1127 | static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { | |||
1128 | SCEVPtrToIntSinkingRewriter Rewriter(SE); | |||
1129 | return Rewriter.visit(Scev); | |||
1130 | } | |||
1131 | ||||
1132 | const SCEV *visit(const SCEV *S) { | |||
1133 | Type *STy = S->getType(); | |||
1134 | // If the expression is not pointer-typed, just keep it as-is. | |||
1135 | if (!STy->isPointerTy()) | |||
1136 | return S; | |||
1137 | // Else, recursively sink the cast down into it. | |||
1138 | return Base::visit(S); | |||
1139 | } | |||
1140 | ||||
1141 | const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { | |||
1142 | SmallVector<const SCEV *, 2> Operands; | |||
1143 | bool Changed = false; | |||
1144 | for (auto *Op : Expr->operands()) { | |||
1145 | Operands.push_back(visit(Op)); | |||
1146 | Changed |= Op != Operands.back(); | |||
1147 | } | |||
1148 | return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); | |||
1149 | } | |||
1150 | ||||
1151 | const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { | |||
1152 | SmallVector<const SCEV *, 2> Operands; | |||
1153 | bool Changed = false; | |||
1154 | for (auto *Op : Expr->operands()) { | |||
1155 | Operands.push_back(visit(Op)); | |||
1156 | Changed |= Op != Operands.back(); | |||
1157 | } | |||
1158 | return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); | |||
1159 | } | |||
1160 | ||||
1161 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | |||
1162 | assert(Expr->getType()->isPointerTy() &&((void)0) | |||
1163 | "Should only reach pointer-typed SCEVUnknown's.")((void)0); | |||
1164 | return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); | |||
1165 | } | |||
1166 | }; | |||
1167 | ||||
1168 | // And actually perform the cast sinking. | |||
1169 | const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); | |||
1170 | assert(IntOp->getType()->isIntegerTy() &&((void)0) | |||
1171 | "We must have succeeded in sinking the cast, "((void)0) | |||
1172 | "and ending up with an integer-typed expression!")((void)0); | |||
1173 | return IntOp; | |||
1174 | } | |||
1175 | ||||
1176 | const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { | |||
1177 | assert(Ty->isIntegerTy() && "Target type must be an integer type!")((void)0); | |||
1178 | ||||
1179 | const SCEV *IntOp = getLosslessPtrToIntExpr(Op); | |||
1180 | if (isa<SCEVCouldNotCompute>(IntOp)) | |||
1181 | return IntOp; | |||
1182 | ||||
1183 | return getTruncateOrZeroExtend(IntOp, Ty); | |||
1184 | } | |||
1185 | ||||
1186 | const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, | |||
1187 | unsigned Depth) { | |||
1188 | assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&((void)0) | |||
1189 | "This is not a truncating conversion!")((void)0); | |||
1190 | assert(isSCEVable(Ty) &&((void)0) | |||
1191 | "This is not a conversion to a SCEVable type!")((void)0); | |||
1192 | assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!")((void)0); | |||
1193 | Ty = getEffectiveSCEVType(Ty); | |||
1194 | ||||
1195 | FoldingSetNodeID ID; | |||
1196 | ID.AddInteger(scTruncate); | |||
1197 | ID.AddPointer(Op); | |||
1198 | ID.AddPointer(Ty); | |||
1199 | void *IP = nullptr; | |||
1200 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | |||
1201 | ||||
1202 | // Fold if the operand is constant. | |||
1203 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) | |||
1204 | return getConstant( | |||
1205 | cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); | |||
1206 | ||||
1207 | // trunc(trunc(x)) --> trunc(x) | |||
1208 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) | |||
1209 | return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); | |||
1210 | ||||
1211 | // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing | |||
1212 | if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) | |||
1213 | return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); | |||
1214 | ||||
1215 | // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing | |||
1216 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) | |||
1217 | return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); | |||
1218 | ||||
1219 | if (Depth > MaxCastDepth) { | |||
1220 | SCEV *S = | |||
1221 | new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); | |||
1222 | UniqueSCEVs.InsertNode(S, IP); | |||
1223 | addToLoopUseLists(S); | |||
1224 | return S; | |||
1225 | } | |||
1226 | ||||
1227 | // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and | |||
1228 | // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), | |||
1229 | // if after transforming we have at most one truncate, not counting truncates | |||
1230 | // that replace other casts. | |||
1231 | if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { | |||
1232 | auto *CommOp = cast<SCEVCommutativeExpr>(Op); | |||
1233 | SmallVector<const SCEV *, 4> Operands; | |||
1234 | unsigned numTruncs = 0; | |||
1235 | for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; | |||
1236 | ++i) { | |||
1237 | const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); | |||
1238 | if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && | |||
1239 | isa<SCEVTruncateExpr>(S)) | |||
1240 | numTruncs++; | |||
1241 | Operands.push_back(S); | |||
1242 | } | |||
1243 | if (numTruncs < 2) { | |||
1244 | if (isa<SCEVAddExpr>(Op)) | |||
1245 | return getAddExpr(Operands); | |||
1246 | else if (isa<SCEVMulExpr>(Op)) | |||
1247 | return getMulExpr(Operands); | |||
1248 | else | |||
1249 | llvm_unreachable("Unexpected SCEV type for Op.")__builtin_unreachable(); | |||
1250 | } | |||
1251 | // Although we checked in the beginning that ID is not in the cache, it is | |||
1252 | // possible that during recursion and different modification ID was inserted | |||
1253 | // into the cache. So if we find it, just return it. | |||
1254 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) | |||
1255 | return S; | |||
1256 | } | |||
1257 | ||||
1258 | // If the input value is a chrec scev, truncate the chrec's operands. | |||
1259 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { | |||
1260 | SmallVector<const SCEV *, 4> Operands; | |||
1261 | for (const SCEV *Op : AddRec->operands()) | |||
1262 | Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); | |||
1263 | return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); | |||
1264 | } | |||
1265 | ||||
1266 | // Return zero if truncating to known zeros. | |||
1267 | uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); | |||
1268 | if (MinTrailingZeros >= getTypeSizeInBits(Ty)) | |||
1269 | return getZero(Ty); | |||
1270 | ||||
1271 | // The cast wasn't folded; create an explicit cast node. We can reuse | |||
1272 | // the existing insert position since if we get here, we won't have | |||
1273 | // made any changes which would invalidate it. | |||
1274 | SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), | |||
1275 | Op, Ty); | |||
1276 | UniqueSCEVs.InsertNode(S, IP); | |||
1277 | addToLoopUseLists(S); | |||
1278 | return S; | |||
1279 | } | |||
1280 | ||||
1281 | // Get the limit of a recurrence such that incrementing by Step cannot cause | |||
1282 | // signed overflow as long as the value of the recurrence within the | |||
1283 | // loop does not exceed this limit before incrementing. | |||
1284 | static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, | |||
1285 | ICmpInst::Predicate *Pred, | |||
1286 | ScalarEvolution *SE) { | |||
1287 | unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); | |||
1288 | if (SE->isKnownPositive(Step)) { | |||
1289 | *Pred = ICmpInst::ICMP_SLT; | |||
1290 | return SE->getConstant(APInt::getSignedMinValue(BitWidth) - | |||
1291 | SE->getSignedRangeMax(Step)); | |||
1292 | } | |||
1293 | if (SE->isKnownNegative(Step)) { | |||
1294 | *Pred = ICmpInst::ICMP_SGT; | |||
1295 | return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - | |||
1296 | SE->getSignedRangeMin(Step)); | |||
1297 | } | |||
1298 | return nullptr; | |||
1299 | } | |||
1300 | ||||
1301 | // Get the limit of a recurrence such that incrementing by Step cannot cause | |||
1302 | // unsigned overflow as long as the value of the recurrence within the loop does | |||
1303 | // not exceed this limit before incrementing. | |||
1304 | static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, | |||
1305 | ICmpInst::Predicate *Pred, | |||
1306 | ScalarEvolution *SE) { | |||
1307 | unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); | |||
1308 | *Pred = ICmpInst::ICMP_ULT; | |||
1309 | ||||
1310 | return SE->getConstant(APInt::getMinValue(BitWidth) - | |||
1311 | SE->getUnsignedRangeMax(Step)); | |||
1312 | } | |||
1313 | ||||
1314 | namespace { | |||
1315 | ||||
1316 | struct ExtendOpTraitsBase { | |||
1317 | typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, | |||
1318 | unsigned); | |||
1319 | }; | |||
1320 | ||||
1321 | // Used to make code generic over signed and unsigned overflow. | |||
1322 | template <typename ExtendOp> struct ExtendOpTraits { | |||
1323 | // Members present: | |||
1324 | // | |||
1325 | // static const SCEV::NoWrapFlags WrapType; | |||
1326 | // | |||
1327 | // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; | |||
1328 | // | |||
1329 | // static const SCEV *getOverflowLimitForStep(const SCEV *Step, | |||
1330 | // ICmpInst::Predicate *Pred, | |||
1331 | // ScalarEvolution *SE); | |||
1332 | }; | |||
1333 | ||||
1334 | template <> | |||
1335 | struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { | |||
1336 | static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; | |||
1337 | ||||
1338 | static const GetExtendExprTy GetExtendExpr; | |||
1339 | ||||
1340 | static const SCEV *getOverflowLimitForStep(const SCEV *Step, | |||
1341 | ICmpInst::Predicate *Pred, | |||
1342 | ScalarEvolution *SE) { | |||
1343 | return getSignedOverflowLimitForStep(Step, Pred, SE); | |||
1344 | } | |||
1345 | }; | |||
1346 | ||||
1347 | const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< | |||
1348 | SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; | |||
1349 | ||||
1350 | template <> | |||
1351 | struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { | |||
1352 | static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; | |||
1353 | ||||
1354 | static const GetExtendExprTy GetExtendExpr; | |||
1355 | ||||
1356 | static const SCEV *getOverflowLimitForStep(const SCEV *Step, | |||
1357 | ICmpInst::Predicate *Pred, | |||
1358 | ScalarEvolution *SE) { | |||
1359 | return getUnsignedOverflowLimitForStep(Step, Pred, SE); | |||
1360 | } | |||
1361 | }; | |||
1362 | ||||
1363 | const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< | |||
1364 | SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; | |||
1365 | ||||
1366 | } // end anonymous namespace | |||
1367 | ||||
1368 | // The recurrence AR has been shown to have no signed/unsigned wrap or something | |||
1369 | // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as | |||
1370 | // easily prove NSW/NUW for its preincrement or postincrement sibling. This | |||
1371 | // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + | |||
1372 | // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the | |||
1373 | // expression "Step + sext/zext(PreIncAR)" is congruent with | |||
1374 | // "sext/zext(PostIncAR)" | |||
1375 | template <typename ExtendOpTy> | |||
1376 | static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, | |||
1377 | ScalarEvolution *SE, unsigned Depth) { | |||
1378 | auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; | |||
1379 | auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; | |||
1380 | ||||
1381 | const Loop *L = AR->getLoop(); | |||
1382 | const SCEV *Start = AR->getStart(); | |||
1383 | const SCEV *Step = AR->getStepRecurrence(*SE); | |||
1384 | ||||
1385 | // Check for a simple looking step prior to loop entry. | |||
1386 | const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); | |||
1387 | if (!SA) | |||
1388 | return nullptr; | |||
1389 | ||||
1390 | // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV | |||
1391 | // subtraction is expensive. For this purpose, perform a quick and dirty | |||
1392 | // difference, by checking for Step in the operand list. | |||
1393 | SmallVector<const SCEV *, 4> DiffOps; | |||
1394 | for (const SCEV *Op : SA->operands()) | |||
1395 | if (Op != Step) | |||
1396 | DiffOps.push_back(Op); | |||
1397 | ||||
1398 | if (DiffOps.size() == SA->getNumOperands()) | |||
1399 | return nullptr; | |||
1400 | ||||
1401 | // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + | |||
1402 | // `Step`: | |||
1403 | ||||
1404 | // 1. NSW/NUW flags on the step increment. | |||
1405 | auto PreStartFlags = | |||
1406 | ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); | |||
1407 | const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); | |||
1408 | const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( | |||
1409 | SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); | |||
1410 | ||||
1411 | // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies | |||
1412 | // "S+X does not sign/unsign-overflow". | |||
1413 | // | |||
1414 | ||||
1415 | const SCEV *BECount = SE->getBackedgeTakenCount(L); | |||
1416 | if (PreAR && PreAR->getNoWrapFlags(WrapType) && | |||
1417 | !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) | |||
1418 | return PreStart; | |||
1419 | ||||
1420 | // 2. Direct overflow check on the step operation's expression. | |||
1421 | unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); | |||
1422 | Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); | |||
1423 | const SCEV *OperandExtendedStart = | |||
1424 | SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), | |||
1425 | (SE->*GetExtendExpr)(Step, WideTy, Depth)); | |||
1426 | if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { | |||
1427 | if (PreAR && AR->getNoWrapFlags(WrapType)) { | |||
1428 | // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW | |||
1429 | // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then | |||
1430 | // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. | |||
1431 | SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); | |||
1432 | } | |||
1433 | return PreStart; | |||
1434 | } | |||
1435 | ||||
1436 | // 3. Loop precondition. | |||
1437 | ICmpInst::Predicate Pred; | |||
1438 | const SCEV *OverflowLimit = | |||
1439 | ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); | |||
1440 | ||||
1441 | if (OverflowLimit && | |||
1442 | SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) | |||
1443 | return PreStart; | |||
1444 | ||||
1445 | return nullptr; | |||
1446 | } | |||
1447 | ||||
1448 | // Get the normalized zero or sign extended expression for this AddRec's Start. | |||
1449 | template <typename ExtendOpTy> | |||
1450 | static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, | |||
1451 | ScalarEvolution *SE, | |||
1452 | unsigned Depth) { | |||
1453 | auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; | |||
1454 | ||||
1455 | const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); | |||
1456 | if (!PreStart) | |||
1457 | return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); | |||
1458 | ||||
1459 | return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, | |||
1460 | Depth), | |||
1461 | (SE->*GetExtendExpr)(PreStart, Ty, Depth)); | |||
1462 | } | |||
1463 | ||||
1464 | // Try to prove away overflow by looking at "nearby" add recurrences. A | |||
1465 | // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it | |||
1466 | // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. | |||
1467 | // | |||
1468 | // Formally: | |||
1469 | // | |||
1470 | // {S,+,X} == {S-T,+,X} + T | |||
1471 | // => Ext({S,+,X}) == Ext({S-T,+,X} + T) | |||
1472 | // | |||
1473 | // If ({S-T,+,X} + T) does not overflow ... (1) | |||
1474 | // | |||
1475 | // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) | |||
1476 | // | |||
1477 | // If {S-T,+,X} does not overflow ... (2) | |||
1478 | // | |||
1479 | // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) | |||
1480 | // == {Ext(S-T)+Ext(T),+,Ext(X)} | |||
1481 | // | |||
1482 | // If (S-T)+T does not overflow ... (3) | |||
1483 | // | |||
1484 | // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} | |||
1485 | // == {Ext(S),+,Ext(X)} == LHS | |||
1486 | // | |||
1487 | // Thus, if (1), (2) and (3) are true for some T, then | |||
1488 | // Ext({S,+,X}) == {Ext(S),+,Ext(X)} | |||
1489 | // | |||
1490 | // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) | |||
1491 | // does not overflow" restricted to the 0th iteration. Therefore we only need | |||
1492 | // to check for (1) and (2). | |||
1493 | // | |||
1494 | // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T | |||
1495 | // is `Delta` (defined below). | |||
1496 | template <typename ExtendOpTy> | |||
1497 | bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, | |||
1498 | const SCEV *Step, | |||
1499 | const Loop *L) { | |||
1500 | auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; | |||
1501 | ||||
1502 | // We restrict `Start` to a constant to prevent SCEV from spending too much | |||
1503 | // time here. It is correct (but more expensive) to continue with a | |||
1504 | // non-constant `Start` and do a general SCEV subtraction to compute | |||
1505 | // `PreStart` below. | |||
1506 | const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); | |||
1507 | if (!StartC) | |||
1508 | return false; | |||
1509 | ||||
1510 | APInt StartAI = StartC->getAPInt(); | |||
1511 | ||||
1512 | for (unsigned Delta : {-2, -1, 1, 2}) { | |||
1513 | const SCEV *PreStart = getConstant(StartAI - Delta); | |||
1514 | ||||
1515 | FoldingSetNodeID ID; | |||
1516 | ID.AddInteger(scAddRecExpr); | |||
1517 | ID.AddPointer(PreStart); | |||
1518 | ID.AddPointer(Step); | |||
1519 | ID.AddPointer(L); | |||
1520 | void *IP = nullptr; | |||
1521 | const auto *PreAR = | |||
1522 | static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); | |||
1523 | ||||
1524 | // Give up if we don't already have the add recurrence we need because | |||
1525 | // actually constructing an add recurrence is relatively expensive. | |||
1526 | if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) | |||
1527 | const SCEV *DeltaS = getConstant(StartC->getType(), Delta); | |||
1528 | ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; | |||
1529 | const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( | |||
1530 | DeltaS, &Pred, this); | |||
1531 | if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) | |||
1532 | return true; | |||
1533 | } | |||
1534 | } | |||
1535 | ||||
1536 | return false; | |||
1537 | } | |||
1538 | ||||
1539 | // Finds an integer D for an expression (C + x + y + ...) such that the top | |||
1540 | // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or | |||
1541 | // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is | |||
1542 | // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and | |||
1543 | // the (C + x + y + ...) expression is \p WholeAddExpr. | |||
1544 | static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, | |||
1545 | const SCEVConstant *ConstantTerm, | |||
1546 | const SCEVAddExpr *WholeAddExpr) { | |||
1547 | const APInt &C = ConstantTerm->getAPInt(); | |||
1548 | const unsigned BitWidth = C.getBitWidth(); | |||
1549 | // Find number of trailing zeros of (x + y + ...) w/o the C first: | |||
1550 | uint32_t TZ = BitWidth; | |||
1551 | for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) | |||
1552 | TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); | |||
1553 | if (TZ) { | |||
1554 | // Set D to be as many least significant bits of C as possible while still | |||
1555 | // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: | |||
1556 | return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; | |||
1557 | } | |||
1558 | return APInt(BitWidth, 0); | |||
1559 | } | |||
1560 | ||||
1561 | // Finds an integer D for an affine AddRec expression {C,+,x} such that the top | |||
1562 | // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the | |||
1563 | // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p | |||
1564 | // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. | |||
1565 | static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, | |||
1566 | const APInt &ConstantStart, | |||
1567 | const SCEV *Step) { | |||
1568 | const unsigned BitWidth = ConstantStart.getBitWidth(); | |||
1569 | const uint32_t TZ = SE.GetMinTrailingZeros(Step); | |||
1570 | if (TZ) | |||
1571 | return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) | |||
1572 | : ConstantStart; | |||
1573 | return APInt(BitWidth, 0); | |||
1574 | } | |||
1575 | ||||
1576 | const SCEV * | |||
1577 | ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { | |||
1578 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&((void)0) | |||
1579 | "This is not an extending conversion!")((void)0); | |||
1580 | assert(isSCEVable(Ty) &&((void)0) | |||
1581 | "This is not a conversion to a SCEVable type!")((void)0); | |||
1582 | assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")((void)0); | |||
1583 | Ty = getEffectiveSCEVType(Ty); | |||
1584 | ||||
1585 | // Fold if the operand is constant. | |||
1586 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) | |||
1587 | return getConstant( | |||
1588 | cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); | |||
1589 | ||||
1590 | // zext(zext(x)) --> zext(x) | |||
1591 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) | |||
1592 | return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); | |||
1593 | ||||
1594 | // Before doing any expensive analysis, check to see if we've already | |||
1595 | // computed a SCEV for this Op and Ty. | |||
1596 | FoldingSetNodeID ID; | |||
1597 | ID.AddInteger(scZeroExtend); | |||
1598 | ID.AddPointer(Op); | |||
1599 | ID.AddPointer(Ty); | |||
1600 | void *IP = nullptr; | |||
1601 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | |||
1602 | if (Depth > MaxCastDepth) { | |||
1603 | SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), | |||
1604 | Op, Ty); | |||
1605 | UniqueSCEVs.InsertNode(S, IP); | |||
1606 | addToLoopUseLists(S); | |||
1607 | return S; | |||
1608 | } | |||
1609 | ||||
1610 | // zext(trunc(x)) --> zext(x) or x or trunc(x) | |||
1611 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { | |||
1612 | // It's possible the bits taken off by the truncate were all zero bits. If | |||
1613 | // so, we should be able to simplify this further. | |||
1614 | const SCEV *X = ST->getOperand(); | |||
1615 | ConstantRange CR = getUnsignedRange(X); | |||
1616 | unsigned TruncBits = getTypeSizeInBits(ST->getType()); | |||
1617 | unsigned NewBits = getTypeSizeInBits(Ty); | |||
1618 | if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( | |||
1619 | CR.zextOrTrunc(NewBits))) | |||
1620 | return getTruncateOrZeroExtend(X, Ty, Depth); | |||
1621 | } | |||
1622 | ||||
1623 | // If the input value is a chrec scev, and we can prove that the value | |||
1624 | // did not overflow the old, smaller, value, we can zero extend all of the | |||
1625 | // operands (often constants). This allows analysis of something like | |||
1626 | // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } | |||
1627 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) | |||
1628 | if (AR->isAffine()) { | |||
1629 | const SCEV *Start = AR->getStart(); | |||
1630 | const SCEV *Step = AR->getStepRecurrence(*this); | |||
1631 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); | |||
1632 | const Loop *L = AR->getLoop(); | |||
1633 | ||||
1634 | if (!AR->hasNoUnsignedWrap()) { | |||
1635 | auto NewFlags = proveNoWrapViaConstantRanges(AR); | |||
1636 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); | |||
1637 | } | |||
1638 | ||||
1639 | // If we have special knowledge that this addrec won't overflow, | |||
1640 | // we don't need to do any further analysis. | |||
1641 | if (AR->hasNoUnsignedWrap()) | |||
1642 | return getAddRecExpr( | |||
1643 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), | |||
1644 | getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); | |||
1645 | ||||
1646 | // Check whether the backedge-taken count is SCEVCouldNotCompute. | |||
1647 | // Note that this serves two purposes: It filters out loops that are | |||
1648 | // simply not analyzable, and it covers the case where this code is | |||
1649 | // being called from within backedge-taken count analysis, such that | |||
1650 | // attempting to ask for the backedge-taken count would likely result | |||
1651 | // in infinite recursion. In the later case, the analysis code will | |||
1652 | // cope with a conservative value, and it will take care to purge | |||
1653 | // that value once it has finished. | |||
1654 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); | |||
1655 | if (!isa<SCEVCouldNotCompute>(MaxBECount)) { | |||
1656 | // Manually compute the final value for AR, checking for overflow. | |||
1657 | ||||
1658 | // Check whether the backedge-taken count can be losslessly casted to | |||
1659 | // the addrec's type. The count is always unsigned. | |||
1660 | const SCEV *CastedMaxBECount = | |||
1661 | getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); | |||
1662 | const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( | |||
1663 | CastedMaxBECount, MaxBECount->getType(), Depth); | |||
1664 | if (MaxBECount == RecastedMaxBECount) { | |||
1665 | Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); | |||
1666 | // Check whether Start+Step*MaxBECount has no unsigned overflow. | |||
1667 | const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, | |||
1668 | SCEV::FlagAnyWrap, Depth + 1); | |||
1669 | const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, | |||
1670 | SCEV::FlagAnyWrap, | |||
1671 | Depth + 1), | |||
1672 | WideTy, Depth + 1); | |||
1673 | const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); | |||
1674 | const SCEV *WideMaxBECount = | |||
1675 | getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); | |||
1676 | const SCEV *OperandExtendedAdd = | |||
1677 | getAddExpr(WideStart, | |||
1678 | getMulExpr(WideMaxBECount, | |||
1679 | getZeroExtendExpr(Step, WideTy, Depth + 1), | |||
1680 | SCEV::FlagAnyWrap, Depth + 1), | |||
1681 | SCEV::FlagAnyWrap, Depth + 1); | |||
1682 | if (ZAdd == OperandExtendedAdd) { | |||
1683 | // Cache knowledge of AR NUW, which is propagated to this AddRec. | |||
1684 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); | |||
1685 | // Return the expression with the addrec on the outside. | |||
1686 | return getAddRecExpr( | |||
1687 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, | |||
1688 | Depth + 1), | |||
1689 | getZeroExtendExpr(Step, Ty, Depth + 1), L, | |||
1690 | AR->getNoWrapFlags()); | |||
1691 | } | |||
1692 | // Similar to above, only this time treat the step value as signed. | |||
1693 | // This covers loops that count down. | |||
1694 | OperandExtendedAdd = | |||
1695 | getAddExpr(WideStart, | |||
1696 | getMulExpr(WideMaxBECount, | |||
1697 | getSignExtendExpr(Step, WideTy, Depth + 1), | |||
1698 | SCEV::FlagAnyWrap, Depth + 1), | |||
1699 | SCEV::FlagAnyWrap, Depth + 1); | |||
1700 | if (ZAdd == OperandExtendedAdd) { | |||
1701 | // Cache knowledge of AR NW, which is propagated to this AddRec. | |||
1702 | // Negative step causes unsigned wrap, but it still can't self-wrap. | |||
1703 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); | |||
1704 | // Return the expression with the addrec on the outside. | |||
1705 | return getAddRecExpr( | |||
1706 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, | |||
1707 | Depth + 1), | |||
1708 | getSignExtendExpr(Step, Ty, Depth + 1), L, | |||
1709 | AR->getNoWrapFlags()); | |||
1710 | } | |||
1711 | } | |||
1712 | } | |||
1713 | ||||
1714 | // Normally, in the cases we can prove no-overflow via a | |||
1715 | // backedge guarding condition, we can also compute a backedge | |||
1716 | // taken count for the loop. The exceptions are assumptions and | |||
1717 | // guards present in the loop -- SCEV is not great at exploiting | |||
1718 | // these to compute max backedge taken counts, but can still use | |||
1719 | // these to prove lack of overflow. Use this fact to avoid | |||
1720 | // doing extra work that may not pay off. | |||
1721 | if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || | |||
1722 | !AC.assumptions().empty()) { | |||
1723 | ||||
1724 | auto NewFlags = proveNoUnsignedWrapViaInduction(AR); | |||
1725 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); | |||
1726 | if (AR->hasNoUnsignedWrap()) { | |||
1727 | // Same as nuw case above - duplicated here to avoid a compile time | |||
1728 | // issue. It's not clear that the order of checks does matter, but | |||
1729 | // it's one of two issue possible causes for a change which was | |||
1730 | // reverted. Be conservative for the moment. | |||
1731 | return getAddRecExpr( | |||
1732 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, | |||
1733 | Depth + 1), | |||
1734 | getZeroExtendExpr(Step, Ty, Depth + 1), L, | |||
1735 | AR->getNoWrapFlags()); | |||
1736 | } | |||
1737 | ||||
1738 | // For a negative step, we can extend the operands iff doing so only | |||
1739 | // traverses values in the range zext([0,UINT_MAX]). | |||
1740 | if (isKnownNegative(Step)) { | |||
1741 | const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - | |||
1742 | getSignedRangeMin(Step)); | |||
1743 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || | |||
1744 | isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { | |||
1745 | // Cache knowledge of AR NW, which is propagated to this | |||
1746 | // AddRec. Negative step causes unsigned wrap, but it | |||
1747 | // still can't self-wrap. | |||
1748 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); | |||
1749 | // Return the expression with the addrec on the outside. | |||
1750 | return getAddRecExpr( | |||
1751 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, | |||
1752 | Depth + 1), | |||
1753 | getSignExtendExpr(Step, Ty, Depth + 1), L, | |||
1754 | AR->getNoWrapFlags()); | |||
1755 | } | |||
1756 | } | |||
1757 | } | |||
1758 | ||||
1759 | // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> | |||
1760 | // if D + (C - D + Step * n) could be proven to not unsigned wrap | |||
1761 | // where D maximizes the number of trailing zeros of (C - D + Step * n) | |||
1762 | if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { | |||
1763 | const APInt &C = SC->getAPInt(); | |||
1764 | const APInt &D = extractConstantWithoutWrapping(*this, C, Step); | |||
1765 | if (D != 0) { | |||
1766 | const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); | |||
1767 | const SCEV *SResidual = | |||
1768 | getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); | |||
1769 | const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); | |||
1770 | return getAddExpr(SZExtD, SZExtR, | |||
1771 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), | |||
1772 | Depth + 1); | |||
1773 | } | |||
1774 | } | |||
1775 | ||||
1776 | if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { | |||
1777 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); | |||
1778 | return getAddRecExpr( | |||
1779 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), | |||
1780 | getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); | |||
1781 | } | |||
1782 | } | |||
1783 | ||||
1784 | // zext(A % B) --> zext(A) % zext(B) | |||
1785 | { | |||
1786 | const SCEV *LHS; | |||
1787 | const SCEV *RHS; | |||
1788 | if (matchURem(Op, LHS, RHS)) | |||
1789 | return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), | |||
1790 | getZeroExtendExpr(RHS, Ty, Depth + 1)); | |||
1791 | } | |||
1792 | ||||
1793 | // zext(A / B) --> zext(A) / zext(B). | |||
1794 | if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) | |||
1795 | return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), | |||
1796 | getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); | |||
1797 | ||||
1798 | if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { | |||
1799 | // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> | |||
1800 | if (SA->hasNoUnsignedWrap()) { | |||
1801 | // If the addition does not unsign overflow then we can, by definition, | |||
1802 | // commute the zero extension with the addition operation. | |||
1803 | SmallVector<const SCEV *, 4> Ops; | |||
1804 | for (const auto *Op : SA->operands()) | |||
1805 | Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); | |||
1806 | return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); | |||
1807 | } | |||
1808 | ||||
1809 | // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) | |||
1810 | // if D + (C - D + x + y + ...) could be proven to not unsigned wrap | |||
1811 | // where D maximizes the number of trailing zeros of (C - D + x + y + ...) | |||
1812 | // | |||
1813 | // Often address arithmetics contain expressions like | |||
1814 | // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). | |||
1815 | // This transformation is useful while proving that such expressions are | |||
1816 | // equal or differ by a small constant amount, see LoadStoreVectorizer pass. | |||
1817 | if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { | |||
1818 | const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); | |||
1819 | if (D != 0) { | |||
1820 | const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); | |||
1821 | const SCEV *SResidual = | |||
1822 | getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); | |||
1823 | const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); | |||
1824 | return getAddExpr(SZExtD, SZExtR, | |||
1825 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), | |||
1826 | Depth + 1); | |||
1827 | } | |||
1828 | } | |||
1829 | } | |||
1830 | ||||
1831 | if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { | |||
1832 | // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> | |||
1833 | if (SM->hasNoUnsignedWrap()) { | |||
1834 | // If the multiply does not unsign overflow then we can, by definition, | |||
1835 | // commute the zero extension with the multiply operation. | |||
1836 | SmallVector<const SCEV *, 4> Ops; | |||
1837 | for (const auto *Op : SM->operands()) | |||
1838 | Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); | |||
1839 | return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); | |||
1840 | } | |||
1841 | ||||
1842 | // zext(2^K * (trunc X to iN)) to iM -> | |||
1843 | // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> | |||
1844 | // | |||
1845 | // Proof: | |||
1846 | // | |||
1847 | // zext(2^K * (trunc X to iN)) to iM | |||
1848 | // = zext((trunc X to iN) << K) to iM | |||
1849 | // = zext((trunc X to i{N-K}) << K)<nuw> to iM | |||
1850 | // (because shl removes the top K bits) | |||
1851 | // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM | |||
1852 | // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. | |||
1853 | // | |||
1854 | if (SM->getNumOperands() == 2) | |||
1855 | if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) | |||
1856 | if (MulLHS->getAPInt().isPowerOf2()) | |||
1857 | if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { | |||
1858 | int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - | |||
1859 | MulLHS->getAPInt().logBase2(); | |||
1860 | Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); | |||
1861 | return getMulExpr( | |||
1862 | getZeroExtendExpr(MulLHS, Ty), | |||
1863 | getZeroExtendExpr( | |||
1864 | getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), | |||
1865 | SCEV::FlagNUW, Depth + 1); | |||
1866 | } | |||
1867 | } | |||
1868 | ||||
1869 | // The cast wasn't folded; create an explicit cast node. | |||
1870 | // Recompute the insert position, as it may have been invalidated. | |||
1871 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | |||
1872 | SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), | |||
1873 | Op, Ty); | |||
1874 | UniqueSCEVs.InsertNode(S, IP); | |||
1875 | addToLoopUseLists(S); | |||
1876 | return S; | |||
1877 | } | |||
1878 | ||||
1879 | const SCEV * | |||
1880 | ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { | |||
1881 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&((void)0) | |||
1882 | "This is not an extending conversion!")((void)0); | |||
1883 | assert(isSCEVable(Ty) &&((void)0) | |||
1884 | "This is not a conversion to a SCEVable type!")((void)0); | |||
1885 | assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")((void)0); | |||
1886 | Ty = getEffectiveSCEVType(Ty); | |||
1887 | ||||
1888 | // Fold if the operand is constant. | |||
1889 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) | |||
1890 | return getConstant( | |||
1891 | cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); | |||
1892 | ||||
1893 | // sext(sext(x)) --> sext(x) | |||
1894 | if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) | |||
1895 | return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); | |||
1896 | ||||
1897 | // sext(zext(x)) --> zext(x) | |||
1898 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) | |||
1899 | return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); | |||
1900 | ||||
1901 | // Before doing any expensive analysis, check to see if we've already | |||
1902 | // computed a SCEV for this Op and Ty. | |||
1903 | FoldingSetNodeID ID; | |||
1904 | ID.AddInteger(scSignExtend); | |||
1905 | ID.AddPointer(Op); | |||
1906 | ID.AddPointer(Ty); | |||
1907 | void *IP = nullptr; | |||
1908 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | |||
1909 | // Limit recursion depth. | |||
1910 | if (Depth > MaxCastDepth) { | |||
1911 | SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), | |||
1912 | Op, Ty); | |||
1913 | UniqueSCEVs.InsertNode(S, IP); | |||
1914 | addToLoopUseLists(S); | |||
1915 | return S; | |||
1916 | } | |||
1917 | ||||
1918 | // sext(trunc(x)) --> sext(x) or x or trunc(x) | |||
1919 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { | |||
1920 | // It's possible the bits taken off by the truncate were all sign bits. If | |||
1921 | // so, we should be able to simplify this further. | |||
1922 | const SCEV *X = ST->getOperand(); | |||
1923 | ConstantRange CR = getSignedRange(X); | |||
1924 | unsigned TruncBits = getTypeSizeInBits(ST->getType()); | |||
1925 | unsigned NewBits = getTypeSizeInBits(Ty); | |||
1926 | if (CR.truncate(TruncBits).signExtend(NewBits).contains( | |||
1927 | CR.sextOrTrunc(NewBits))) | |||
1928 | return getTruncateOrSignExtend(X, Ty, Depth); | |||
1929 | } | |||
1930 | ||||
1931 | if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { | |||
1932 | // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> | |||
1933 | if (SA->hasNoSignedWrap()) { | |||
1934 | // If the addition does not sign overflow then we can, by definition, | |||
1935 | // commute the sign extension with the addition operation. | |||
1936 | SmallVector<const SCEV *, 4> Ops; | |||
1937 | for (const auto *Op : SA->operands()) | |||
1938 | Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); | |||
1939 | return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); | |||
1940 | } | |||
1941 | ||||
1942 | // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) | |||
1943 | // if D + (C - D + x + y + ...) could be proven to not signed wrap | |||
1944 | // where D maximizes the number of trailing zeros of (C - D + x + y + ...) | |||
1945 | // | |||
1946 | // For instance, this will bring two seemingly different expressions: | |||
1947 | // 1 + sext(5 + 20 * %x + 24 * %y) and | |||
1948 | // sext(6 + 20 * %x + 24 * %y) | |||
1949 | // to the same form: | |||
1950 | // 2 + sext(4 + 20 * %x + 24 * %y) | |||
1951 | if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { | |||
1952 | const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); | |||
1953 | if (D != 0) { | |||
1954 | const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); | |||
1955 | const SCEV *SResidual = | |||
1956 | getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); | |||
1957 | const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); | |||
1958 | return getAddExpr(SSExtD, SSExtR, | |||
1959 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), | |||
1960 | Depth + 1); | |||
1961 | } | |||
1962 | } | |||
1963 | } | |||
1964 | // If the input value is a chrec scev, and we can prove that the value | |||
1965 | // did not overflow the old, smaller, value, we can sign extend all of the | |||
1966 | // operands (often constants). This allows analysis of something like | |||
1967 | // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } | |||
1968 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) | |||
1969 | if (AR->isAffine()) { | |||
1970 | const SCEV *Start = AR->getStart(); | |||
1971 | const SCEV *Step = AR->getStepRecurrence(*this); | |||
1972 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); | |||
1973 | const Loop *L = AR->getLoop(); | |||
1974 | ||||
1975 | if (!AR->hasNoSignedWrap()) { | |||
1976 | auto NewFlags = proveNoWrapViaConstantRanges(AR); | |||
1977 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); | |||
1978 | } | |||
1979 | ||||
1980 | // If we have special knowledge that this addrec won't overflow, | |||
1981 | // we don't need to do any further analysis. | |||
1982 | if (AR->hasNoSignedWrap()) | |||
1983 | return getAddRecExpr( | |||
1984 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), | |||
1985 | getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); | |||
1986 | ||||
1987 | // Check whether the backedge-taken count is SCEVCouldNotCompute. | |||
1988 | // Note that this serves two purposes: It filters out loops that are | |||
1989 | // simply not analyzable, and it covers the case where this code is | |||
1990 | // being called from within backedge-taken count analysis, such that | |||
1991 | // attempting to ask for the backedge-taken count would likely result | |||
1992 | // in infinite recursion. In the later case, the analysis code will | |||
1993 | // cope with a conservative value, and it will take care to purge | |||
1994 | // that value once it has finished. | |||
1995 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); | |||
1996 | if (!isa<SCEVCouldNotCompute>(MaxBECount)) { | |||
1997 | // Manually compute the final value for AR, checking for | |||
1998 | // overflow. | |||
1999 | ||||
2000 | // Check whether the backedge-taken count can be losslessly casted to | |||
2001 | // the addrec's type. The count is always unsigned. | |||
2002 | const SCEV *CastedMaxBECount = | |||
2003 | getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); | |||
2004 | const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( | |||
2005 | CastedMaxBECount, MaxBECount->getType(), Depth); | |||
2006 | if (MaxBECount == RecastedMaxBECount) { | |||
2007 | Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); | |||
2008 | // Check whether Start+Step*MaxBECount has no signed overflow. | |||
2009 | const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, | |||
2010 | SCEV::FlagAnyWrap, Depth + 1); | |||
2011 | const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, | |||
2012 | SCEV::FlagAnyWrap, | |||
2013 | Depth + 1), | |||
2014 | WideTy, Depth + 1); | |||
2015 | const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); | |||
2016 | const SCEV *WideMaxBECount = | |||
2017 | getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); | |||
2018 | const SCEV *OperandExtendedAdd = | |||
2019 | getAddExpr(WideStart, | |||
2020 | getMulExpr(WideMaxBECount, | |||
2021 | getSignExtendExpr(Step, WideTy, Depth + 1), | |||
2022 | SCEV::FlagAnyWrap, Depth + 1), | |||
2023 | SCEV::FlagAnyWrap, Depth + 1); | |||
2024 | if (SAdd == OperandExtendedAdd) { | |||
2025 | // Cache knowledge of AR NSW, which is propagated to this AddRec. | |||
2026 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); | |||
2027 | // Return the expression with the addrec on the outside. | |||
2028 | return getAddRecExpr( | |||
2029 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, | |||
2030 | Depth + 1), | |||
2031 | getSignExtendExpr(Step, Ty, Depth + 1), L, | |||
2032 | AR->getNoWrapFlags()); | |||
2033 | } | |||
2034 | // Similar to above, only this time treat the step value as unsigned. | |||
2035 | // This covers loops that count up with an unsigned step. | |||
2036 | OperandExtendedAdd = | |||
2037 | getAddExpr(WideStart, | |||
2038 | getMulExpr(WideMaxBECount, | |||
2039 | getZeroExtendExpr(Step, WideTy, Depth + 1), | |||
2040 | SCEV::FlagAnyWrap, Depth + 1), | |||
2041 | SCEV::FlagAnyWrap, Depth + 1); | |||
2042 | if (SAdd == OperandExtendedAdd) { | |||
2043 | // If AR wraps around then | |||
2044 | // | |||
2045 | // abs(Step) * MaxBECount > unsigned-max(AR->getType()) | |||
2046 | // => SAdd != OperandExtendedAdd | |||
2047 | // | |||
2048 | // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> | |||
2049 | // (SAdd == OperandExtendedAdd => AR is NW) | |||
2050 | ||||
2051 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); | |||
2052 | ||||
2053 | // Return the expression with the addrec on the outside. | |||
2054 | return getAddRecExpr( | |||
2055 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, | |||
2056 | Depth + 1), | |||
2057 | getZeroExtendExpr(Step, Ty, Depth + 1), L, | |||
2058 | AR->getNoWrapFlags()); | |||
2059 | } | |||
2060 | } | |||
2061 | } | |||
2062 | ||||
2063 | auto NewFlags = proveNoSignedWrapViaInduction(AR); | |||
2064 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); | |||
2065 | if (AR->hasNoSignedWrap()) { | |||
2066 | // Same as nsw case above - duplicated here to avoid a compile time | |||
2067 | // issue. It's not clear that the order of checks does matter, but | |||
2068 | // it's one of two issue possible causes for a change which was | |||
2069 | // reverted. Be conservative for the moment. | |||
2070 | return getAddRecExpr( | |||
2071 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), | |||
2072 | getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); | |||
2073 | } | |||
2074 | ||||
2075 | // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> | |||
2076 | // if D + (C - D + Step * n) could be proven to not signed wrap | |||
2077 | // where D maximizes the number of trailing zeros of (C - D + Step * n) | |||
2078 | if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { | |||
2079 | const APInt &C = SC->getAPInt(); | |||
2080 | const APInt &D = extractConstantWithoutWrapping(*this, C, Step); | |||
2081 | if (D != 0) { | |||
2082 | const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); | |||
2083 | const SCEV *SResidual = | |||
2084 | getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); | |||
2085 | const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); | |||
2086 | return getAddExpr(SSExtD, SSExtR, | |||
2087 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), | |||
2088 | Depth + 1); | |||
2089 | } | |||
2090 | } | |||
2091 | ||||
2092 | if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { | |||
2093 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); | |||
2094 | return getAddRecExpr( | |||
2095 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), | |||
2096 | getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); | |||
2097 | } | |||
2098 | } | |||
2099 | ||||
2100 | // If the input value is provably positive and we could not simplify | |||
2101 | // away the sext build a zext instead. | |||
2102 | if (isKnownNonNegative(Op)) | |||
2103 | return getZeroExtendExpr(Op, Ty, Depth + 1); | |||
2104 | ||||
2105 | // The cast wasn't folded; create an explicit cast node. | |||
2106 | // Recompute the insert position, as it may have been invalidated. | |||
2107 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | |||
2108 | SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), | |||
2109 | Op, Ty); | |||
2110 | UniqueSCEVs.InsertNode(S, IP); | |||
2111 | addToLoopUseLists(S); | |||
2112 | return S; | |||
2113 | } | |||
2114 | ||||
2115 | /// getAnyExtendExpr - Return a SCEV for the given operand extended with | |||
2116 | /// unspecified bits out to the given type. | |||
2117 | const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, | |||
2118 | Type *Ty) { | |||
2119 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&((void)0) | |||
2120 | "This is not an extending conversion!")((void)0); | |||
2121 | assert(isSCEVable(Ty) &&((void)0) | |||
2122 | "This is not a conversion to a SCEVable type!")((void)0); | |||
2123 | Ty = getEffectiveSCEVType(Ty); | |||
2124 | ||||
2125 | // Sign-extend negative constants. | |||
2126 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) | |||
2127 | if (SC->getAPInt().isNegative()) | |||
2128 | return getSignExtendExpr(Op, Ty); | |||
2129 | ||||
2130 | // Peel off a truncate cast. | |||
2131 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { | |||
2132 | const SCEV *NewOp = T->getOperand(); | |||
2133 | if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) | |||
2134 | return getAnyExtendExpr(NewOp, Ty); | |||
2135 | return getTruncateOrNoop(NewOp, Ty); | |||
2136 | } | |||
2137 | ||||
2138 | // Next try a zext cast. If the cast is folded, use it. | |||
2139 | const SCEV *ZExt = getZeroExtendExpr(Op, Ty); | |||
2140 | if (!isa<SCEVZeroExtendExpr>(ZExt)) | |||
2141 | return ZExt; | |||
2142 | ||||
2143 | // Next try a sext cast. If the cast is folded, use it. | |||
2144 | const SCEV *SExt = getSignExtendExpr(Op, Ty); | |||
2145 | if (!isa<SCEVSignExtendExpr>(SExt)) | |||
2146 | return SExt; | |||
2147 | ||||
2148 | // Force the cast to be folded into the operands of an addrec. | |||
2149 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { | |||
2150 | SmallVector<const SCEV *, 4> Ops; | |||
2151 | for (const SCEV *Op : AR->operands()) | |||
2152 | Ops.push_back(getAnyExtendExpr(Op, Ty)); | |||
2153 | return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); | |||
2154 | } | |||
2155 | ||||
2156 | // If the expression is obviously signed, use the sext cast value. | |||
2157 | if (isa<SCEVSMaxExpr>(Op)) | |||
2158 | return SExt; | |||
2159 | ||||
2160 | // Absent any other information, use the zext cast value. | |||
2161 | return ZExt; | |||
2162 | } | |||
2163 | ||||
2164 | /// Process the given Ops list, which is a list of operands to be added under | |||
2165 | /// the given scale, update the given map. This is a helper function for | |||
2166 | /// getAddRecExpr. As an example of what it does, given a sequence of operands | |||
2167 | /// that would form an add expression like this: | |||
2168 | /// | |||
2169 | /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) | |||
2170 | /// | |||
2171 | /// where A and B are constants, update the map with these values: | |||
2172 | /// | |||
2173 | /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) | |||
2174 | /// | |||
2175 | /// and add 13 + A*B*29 to AccumulatedConstant. | |||
2176 | /// This will allow getAddRecExpr to produce this: | |||
2177 | /// | |||
2178 | /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) | |||
2179 | /// | |||
2180 | /// This form often exposes folding opportunities that are hidden in | |||
2181 | /// the original operand list. | |||
2182 | /// | |||
2183 | /// Return true iff it appears that any interesting folding opportunities | |||
2184 | /// may be exposed. This helps getAddRecExpr short-circuit extra work in | |||
2185 | /// the common case where no interesting opportunities are present, and | |||
2186 | /// is also used as a check to avoid infinite recursion. | |||
2187 | static bool | |||
2188 | CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, | |||
2189 | SmallVectorImpl<const SCEV *> &NewOps, | |||
2190 | APInt &AccumulatedConstant, | |||
2191 | const SCEV *const *Ops, size_t NumOperands, | |||
2192 | const APInt &Scale, | |||
2193 | ScalarEvolution &SE) { | |||
2194 | bool Interesting = false; | |||
2195 | ||||
2196 | // Iterate over the add operands. They are sorted, with constants first. | |||
2197 | unsigned i = 0; | |||
2198 | while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { | |||
2199 | ++i; | |||
2200 | // Pull a buried constant out to the outside. | |||
2201 | if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) | |||
2202 | Interesting = true; | |||
2203 | AccumulatedConstant += Scale * C->getAPInt(); | |||
2204 | } | |||
2205 | ||||
2206 | // Next comes everything else. We're especially interested in multiplies | |||
2207 | // here, but they're in the middle, so just visit the rest with one loop. | |||
2208 | for (; i != NumOperands; ++i) { | |||
2209 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); | |||
2210 | if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { | |||
2211 | APInt NewScale = | |||
2212 | Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); | |||
2213 | if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { | |||
2214 | // A multiplication of a constant with another add; recurse. | |||
2215 | const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); | |||
2216 | Interesting |= | |||
2217 | CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, | |||
2218 | Add->op_begin(), Add->getNumOperands(), | |||
2219 | NewScale, SE); | |||
2220 | } else { | |||
2221 | // A multiplication of a constant with some other value. Update | |||
2222 | // the map. | |||
2223 | SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); | |||
2224 | const SCEV *Key = SE.getMulExpr(MulOps); | |||
2225 | auto Pair = M.insert({Key, NewScale}); | |||
2226 | if (Pair.second) { | |||
2227 | NewOps.push_back(Pair.first->first); | |||
2228 | } else { | |||
2229 | Pair.first->second += NewScale; | |||
2230 | // The map already had an entry for this value, which may indicate | |||
2231 | // a folding opportunity. | |||
2232 | Interesting = true; | |||
2233 | } | |||
2234 | } | |||
2235 | } else { | |||
2236 | // An ordinary operand. Update the map. | |||
2237 | std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = | |||
2238 | M.insert({Ops[i], Scale}); | |||
2239 | if (Pair.second) { | |||
2240 | NewOps.push_back(Pair.first->first); | |||
2241 | } else { | |||
2242 | Pair.first->second += Scale; | |||
2243 | // The map already had an entry for this value, which may indicate | |||
2244 | // a folding opportunity. | |||
2245 | Interesting = true; | |||
2246 | } | |||
2247 | } | |||
2248 | } | |||
2249 | ||||
2250 | return Interesting; | |||
2251 | } | |||
2252 | ||||
2253 | bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, | |||
2254 | const SCEV *LHS, const SCEV *RHS) { | |||
2255 | const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, | |||
2256 | SCEV::NoWrapFlags, unsigned); | |||
2257 | switch (BinOp) { | |||
2258 | default: | |||
2259 | llvm_unreachable("Unsupported binary op")__builtin_unreachable(); | |||
2260 | case Instruction::Add: | |||
2261 | Operation = &ScalarEvolution::getAddExpr; | |||
2262 | break; | |||
2263 | case Instruction::Sub: | |||
2264 | Operation = &ScalarEvolution::getMinusSCEV; | |||
2265 | break; | |||
2266 | case Instruction::Mul: | |||
2267 | Operation = &ScalarEvolution::getMulExpr; | |||
2268 | break; | |||
2269 | } | |||
2270 | ||||
2271 | const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = | |||
2272 | Signed ? &ScalarEvolution::getSignExtendExpr | |||
2273 | : &ScalarEvolution::getZeroExtendExpr; | |||
2274 | ||||
2275 | // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) | |||
2276 | auto *NarrowTy = cast<IntegerType>(LHS->getType()); | |||
2277 | auto *WideTy = | |||
2278 | IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); | |||
2279 | ||||
2280 | const SCEV *A = (this->*Extension)( | |||
2281 | (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); | |||
2282 | const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0), | |||
2283 | (this->*Extension)(RHS, WideTy, 0), | |||
2284 | SCEV::FlagAnyWrap, 0); | |||
2285 | return A == B; | |||
2286 | } | |||
2287 | ||||
2288 | std::pair<SCEV::NoWrapFlags, bool /*Deduced*/> | |||
2289 | ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( | |||
2290 | const OverflowingBinaryOperator *OBO) { | |||
2291 | SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; | |||
2292 | ||||
2293 | if (OBO->hasNoUnsignedWrap()) | |||
2294 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); | |||
2295 | if (OBO->hasNoSignedWrap()) | |||
2296 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); | |||
2297 | ||||
2298 | bool Deduced = false; | |||
2299 | ||||
2300 | if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) | |||
2301 | return {Flags, Deduced}; | |||
2302 | ||||
2303 | if (OBO->getOpcode() != Instruction::Add && | |||
2304 | OBO->getOpcode() != Instruction::Sub && | |||
2305 | OBO->getOpcode() != Instruction::Mul) | |||
2306 | return {Flags, Deduced}; | |||
2307 | ||||
2308 | const SCEV *LHS = getSCEV(OBO->getOperand(0)); | |||
2309 | const SCEV *RHS = getSCEV(OBO->getOperand(1)); | |||
2310 | ||||
2311 | if (!OBO->hasNoUnsignedWrap() && | |||
2312 | willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), | |||
2313 | /* Signed */ false, LHS, RHS)) { | |||
2314 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); | |||
2315 | Deduced = true; | |||
2316 | } | |||
2317 | ||||
2318 | if (!OBO->hasNoSignedWrap() && | |||
2319 | willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), | |||
2320 | /* Signed */ true, LHS, RHS)) { | |||
2321 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); | |||
2322 | Deduced = true; | |||
2323 | } | |||
2324 | ||||
2325 | return {Flags, Deduced}; | |||
2326 | } | |||
2327 | ||||
2328 | // We're trying to construct a SCEV of type `Type' with `Ops' as operands and | |||
2329 | // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of | |||
2330 | // can't-overflow flags for the operation if possible. | |||
2331 | static SCEV::NoWrapFlags | |||
2332 | StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, | |||
2333 | const ArrayRef<const SCEV *> Ops, | |||
2334 | SCEV::NoWrapFlags Flags) { | |||
2335 | using namespace std::placeholders; | |||
2336 | ||||
2337 | using OBO = OverflowingBinaryOperator; | |||
2338 | ||||
2339 | bool CanAnalyze = | |||
2340 | Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; | |||
2341 | (void)CanAnalyze; | |||
2342 | assert(CanAnalyze && "don't call from other places!")((void)0); | |||
2343 | ||||
2344 | int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; | |||
2345 | SCEV::NoWrapFlags SignOrUnsignWrap = | |||
2346 | ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); | |||
2347 | ||||
2348 | // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. | |||
2349 | auto IsKnownNonNegative = [&](const SCEV *S) { | |||
2350 | return SE->isKnownNonNegative(S); | |||
2351 | }; | |||
2352 | ||||
2353 | if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) | |||
2354 | Flags = | |||
2355 | ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); | |||
2356 | ||||
2357 | SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); | |||
2358 | ||||
2359 | if (SignOrUnsignWrap != SignOrUnsignMask && | |||
2360 | (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && | |||
2361 | isa<SCEVConstant>(Ops[0])) { | |||
2362 | ||||
2363 | auto Opcode = [&] { | |||
2364 | switch (Type) { | |||
2365 | case scAddExpr: | |||
2366 | return Instruction::Add; | |||
2367 | case scMulExpr: | |||
2368 | return Instruction::Mul; | |||
2369 | default: | |||
2370 | llvm_unreachable("Unexpected SCEV op.")__builtin_unreachable(); | |||
2371 | } | |||
2372 | }(); | |||
2373 | ||||
2374 | const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); | |||
2375 | ||||
2376 | // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. | |||
2377 | if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { | |||
2378 | auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( | |||
2379 | Opcode, C, OBO::NoSignedWrap); | |||
2380 | if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) | |||
2381 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); | |||
2382 | } | |||
2383 | ||||
2384 | // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. | |||
2385 | if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { | |||
2386 | auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( | |||
2387 | Opcode, C, OBO::NoUnsignedWrap); | |||
2388 | if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) | |||
2389 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); | |||
2390 | } | |||
2391 | } | |||
2392 | ||||
2393 | return Flags; | |||
2394 | } | |||
2395 | ||||
2396 | bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { | |||
2397 | return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); | |||
2398 | } | |||
2399 | ||||
2400 | /// Get a canonical add expression, or something simpler if possible. | |||
2401 | const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, | |||
2402 | SCEV::NoWrapFlags OrigFlags, | |||
2403 | unsigned Depth) { | |||
2404 | assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&((void)0) | |||
2405 | "only nuw or nsw allowed")((void)0); | |||
2406 | assert(!Ops.empty() && "Cannot get empty add!")((void)0); | |||
2407 | if (Ops.size() == 1) return Ops[0]; | |||
2408 | #ifndef NDEBUG1 | |||
2409 | Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); | |||
2410 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) | |||
2411 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&((void)0) | |||
2412 | "SCEVAddExpr operand types don't match!")((void)0); | |||
2413 | unsigned NumPtrs = count_if( | |||
2414 | Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); }); | |||
2415 | assert(NumPtrs <= 1 && "add has at most one pointer operand")((void)0); | |||
2416 | #endif | |||
2417 | ||||
2418 | // Sort by complexity, this groups all similar expression types together. | |||
2419 | GroupByComplexity(Ops, &LI, DT); | |||
2420 | ||||
2421 | // If there are any constants, fold them together. | |||
2422 | unsigned Idx = 0; | |||
2423 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { | |||
2424 | ++Idx; | |||
2425 | assert(Idx < Ops.size())((void)0); | |||
2426 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { | |||
2427 | // We found two constants, fold them together! | |||
2428 | Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); | |||
2429 | if (Ops.size() == 2) return Ops[0]; | |||
2430 | Ops.erase(Ops.begin()+1); // Erase the folded element | |||
2431 | LHSC = cast<SCEVConstant>(Ops[0]); | |||
2432 | } | |||
2433 | ||||
2434 | // If we are left with a constant zero being added, strip it off. | |||
2435 | if (LHSC->getValue()->isZero()) { | |||
2436 | Ops.erase(Ops.begin()); | |||
2437 | --Idx; | |||
2438 | } | |||
2439 | ||||
2440 | if (Ops.size() == 1) return Ops[0]; | |||
2441 | } | |||
2442 | ||||
2443 | // Delay expensive flag strengthening until necessary. | |||
2444 | auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { | |||
2445 | return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); | |||
2446 | }; | |||
2447 | ||||
2448 | // Limit recursion calls depth. | |||
2449 | if (Depth > MaxArithDepth || hasHugeExpression(Ops)) | |||
2450 | return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); | |||
2451 | ||||
2452 | if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { | |||
2453 | // Don't strengthen flags if we have no new information. | |||
2454 | SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); | |||
2455 | if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) | |||
2456 | Add->setNoWrapFlags(ComputeFlags(Ops)); | |||
2457 | return S; | |||
2458 | } | |||
2459 | ||||
2460 | // Okay, check to see if the same value occurs in the operand list more than | |||
2461 | // once. If so, merge them together into an multiply expression. Since we | |||
2462 | // sorted the list, these values are required to be adjacent. | |||
2463 | Type *Ty = Ops[0]->getType(); | |||
2464 | bool FoundMatch = false; | |||
2465 | for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) | |||
2466 | if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 | |||
2467 | // Scan ahead to count how many equal operands there are. | |||
2468 | unsigned Count = 2; | |||
2469 | while (i+Count != e && Ops[i+Count] == Ops[i]) | |||
2470 | ++Count; | |||
2471 | // Merge the values into a multiply. | |||
2472 | const SCEV *Scale = getConstant(Ty, Count); | |||
2473 | const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); | |||
2474 | if (Ops.size() == Count) | |||
2475 | return Mul; | |||
2476 | Ops[i] = Mul; | |||
2477 | Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); | |||
2478 | --i; e -= Count - 1; | |||
2479 | FoundMatch = true; | |||
2480 | } | |||
2481 | if (FoundMatch) | |||
2482 | return getAddExpr(Ops, OrigFlags, Depth + 1); | |||
2483 | ||||
2484 | // Check for truncates. If all the operands are truncated from the same | |||
2485 | // type, see if factoring out the truncate would permit the result to be | |||
2486 | // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) | |||
2487 | // if the contents of the resulting outer trunc fold to something simple. | |||
2488 | auto FindTruncSrcType = [&]() -> Type * { | |||
2489 | // We're ultimately looking to fold an addrec of truncs and muls of only | |||
2490 | // constants and truncs, so if we find any other types of SCEV | |||
2491 | // as operands of the addrec then we bail and return nullptr here. | |||
2492 | // Otherwise, we return the type of the operand of a trunc that we find. | |||
2493 | if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) | |||
2494 | return T->getOperand()->getType(); | |||
2495 | if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { | |||
2496 | const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); | |||
2497 | if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) | |||
2498 | return T->getOperand()->getType(); | |||
2499 | } | |||
2500 | return nullptr; | |||
2501 | }; | |||
2502 | if (auto *SrcType = FindTruncSrcType()) { | |||
2503 | SmallVector<const SCEV *, 8> LargeOps; | |||
2504 | bool Ok = true; | |||
2505 | // Check all the operands to see if they can be represented in the | |||
2506 | // source type of the truncate. | |||
2507 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) { | |||
2508 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { | |||
2509 | if (T->getOperand()->getType() != SrcType) { | |||
2510 | Ok = false; | |||
2511 | break; | |||
2512 | } | |||
2513 | LargeOps.push_back(T->getOperand()); | |||
2514 | } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { | |||
2515 | LargeOps.push_back(getAnyExtendExpr(C, SrcType)); | |||
2516 | } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { | |||
2517 | SmallVector<const SCEV *, 8> LargeMulOps; | |||
2518 | for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { | |||
2519 | if (const SCEVTruncateExpr *T = | |||
2520 | dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { | |||
2521 | if (T->getOperand()->getType() != SrcType) { | |||
2522 | Ok = false; | |||
2523 | break; | |||
2524 | } | |||
2525 | LargeMulOps.push_back(T->getOperand()); | |||
2526 | } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { | |||
2527 | LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); | |||
2528 | } else { | |||
2529 | Ok = false; | |||
2530 | break; | |||
2531 | } | |||
2532 | } | |||
2533 | if (Ok) | |||
2534 | LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); | |||
2535 | } else { | |||
2536 | Ok = false; | |||
2537 | break; | |||
2538 | } | |||
2539 | } | |||
2540 | if (Ok) { | |||
2541 | // Evaluate the expression in the larger type. | |||
2542 | const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); | |||
2543 | // If it folds to something simple, use it. Otherwise, don't. | |||
2544 | if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) | |||
2545 | return getTruncateExpr(Fold, Ty); | |||
2546 | } | |||
2547 | } | |||
2548 | ||||
2549 | if (Ops.size() == 2) { | |||
2550 | // Check if we have an expression of the form ((X + C1) - C2), where C1 and | |||
2551 | // C2 can be folded in a way that allows retaining wrapping flags of (X + | |||
2552 | // C1). | |||
2553 | const SCEV *A = Ops[0]; | |||
2554 | const SCEV *B = Ops[1]; | |||
2555 | auto *AddExpr = dyn_cast<SCEVAddExpr>(B); | |||
2556 | auto *C = dyn_cast<SCEVConstant>(A); | |||
2557 | if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) { | |||
2558 | auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt(); | |||
2559 | auto C2 = C->getAPInt(); | |||
2560 | SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; | |||
2561 | ||||
2562 | APInt ConstAdd = C1 + C2; | |||
2563 | auto AddFlags = AddExpr->getNoWrapFlags(); | |||
2564 | // Adding a smaller constant is NUW if the original AddExpr was NUW. | |||
2565 | if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNUW) == | |||
2566 | SCEV::FlagNUW && | |||
2567 | ConstAdd.ule(C1)) { | |||
2568 | PreservedFlags = | |||
2569 | ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); | |||
2570 | } | |||
2571 | ||||
2572 | // Adding a constant with the same sign and small magnitude is NSW, if the | |||
2573 | // original AddExpr was NSW. | |||
2574 | if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNSW) == | |||
2575 | SCEV::FlagNSW && | |||
2576 | C1.isSignBitSet() == ConstAdd.isSignBitSet() && | |||
2577 | ConstAdd.abs().ule(C1.abs())) { | |||
2578 | PreservedFlags = | |||
2579 | ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); | |||
2580 | } | |||
2581 | ||||
2582 | if (PreservedFlags != SCEV::FlagAnyWrap) { | |||
2583 | SmallVector<const SCEV *, 4> NewOps(AddExpr->op_begin(), | |||
2584 | AddExpr->op_end()); | |||
2585 | NewOps[0] = getConstant(ConstAdd); | |||
2586 | return getAddExpr(NewOps, PreservedFlags); | |||
2587 | } | |||
2588 | } | |||
2589 | } | |||
2590 | ||||
2591 | // Skip past any other cast SCEVs. | |||
2592 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) | |||
2593 | ++Idx; | |||
2594 | ||||
2595 | // If there are add operands they would be next. | |||
2596 | if (Idx < Ops.size()) { | |||
2597 | bool DeletedAdd = false; | |||
2598 | // If the original flags and all inlined SCEVAddExprs are NUW, use the | |||
2599 | // common NUW flag for expression after inlining. Other flags cannot be | |||
2600 | // preserved, because they may depend on the original order of operations. | |||
2601 | SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); | |||
2602 | while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { | |||
2603 | if (Ops.size() > AddOpsInlineThreshold || | |||
2604 | Add->getNumOperands() > AddOpsInlineThreshold) | |||
2605 | break; | |||
2606 | // If we have an add, expand the add operands onto the end of the operands | |||
2607 | // list. | |||
2608 | Ops.erase(Ops.begin()+Idx); | |||
2609 | Ops.append(Add->op_begin(), Add->op_end()); | |||
2610 | DeletedAdd = true; | |||
2611 | CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); | |||
2612 | } | |||
2613 | ||||
2614 | // If we deleted at least one add, we added operands to the end of the list, | |||
2615 | // and they are not necessarily sorted. Recurse to resort and resimplify | |||
2616 | // any operands we just acquired. | |||
2617 | if (DeletedAdd) | |||
2618 | return getAddExpr(Ops, CommonFlags, Depth + 1); | |||
2619 | } | |||
2620 | ||||
2621 | // Skip over the add expression until we get to a multiply. | |||
2622 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) | |||
2623 | ++Idx; | |||
2624 | ||||
2625 | // Check to see if there are any folding opportunities present with | |||
2626 | // operands multiplied by constant values. | |||
2627 | if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { | |||
2628 | uint64_t BitWidth = getTypeSizeInBits(Ty); | |||
2629 | DenseMap<const SCEV *, APInt> M; | |||
2630 | SmallVector<const SCEV *, 8> NewOps; | |||
2631 | APInt AccumulatedConstant(BitWidth, 0); | |||
2632 | if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, | |||
2633 | Ops.data(), Ops.size(), | |||
2634 | APInt(BitWidth, 1), *this)) { | |||
2635 | struct APIntCompare { | |||
2636 | bool operator()(const APInt &LHS, const APInt &RHS) const { | |||
2637 | return LHS.ult(RHS); | |||
2638 | } | |||
2639 | }; | |||
2640 | ||||
2641 | // Some interesting folding opportunity is present, so its worthwhile to | |||
2642 | // re-generate the operands list. Group the operands by constant scale, | |||
2643 | // to avoid multiplying by the same constant scale multiple times. | |||
2644 | std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; | |||
2645 | for (const SCEV *NewOp : NewOps) | |||
2646 | MulOpLists[M.find(NewOp)->second].push_back(NewOp); | |||
2647 | // Re-generate the operands list. | |||
2648 | Ops.clear(); | |||
2649 | if (AccumulatedConstant != 0) | |||
2650 | Ops.push_back(getConstant(AccumulatedConstant)); | |||
2651 | for (auto &MulOp : MulOpLists) { | |||
2652 | if (MulOp.first == 1) { | |||
2653 | Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)); | |||
2654 | } else if (MulOp.first != 0) { | |||
2655 | Ops.push_back(getMulExpr( | |||
2656 | getConstant(MulOp.first), | |||
2657 | getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), | |||
2658 | SCEV::FlagAnyWrap, Depth + 1)); | |||
2659 | } | |||
2660 | } | |||
2661 | if (Ops.empty()) | |||
2662 | return getZero(Ty); | |||
2663 | if (Ops.size() == 1) | |||
2664 | return Ops[0]; | |||
2665 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | |||
2666 | } | |||
2667 | } | |||
2668 | ||||
2669 | // If we are adding something to a multiply expression, make sure the | |||
2670 | // something is not already an operand of the multiply. If so, merge it into | |||
2671 | // the multiply. | |||
2672 | for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { | |||
2673 | const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); | |||
2674 | for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { | |||
2675 | const SCEV *MulOpSCEV = Mul->getOperand(MulOp); | |||
2676 | if (isa<SCEVConstant>(MulOpSCEV)) | |||
2677 | continue; | |||
2678 | for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) | |||
2679 | if (MulOpSCEV == Ops[AddOp]) { | |||
2680 | // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) | |||
2681 | const SCEV *InnerMul = Mul->getOperand(MulOp == 0); | |||
2682 | if (Mul->getNumOperands() != 2) { | |||
2683 | // If the multiply has more than two operands, we must get the | |||
2684 | // Y*Z term. | |||
2685 | SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), | |||
2686 | Mul->op_begin()+MulOp); | |||
2687 | MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); | |||
2688 | InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); | |||
2689 | } | |||
2690 | SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; | |||
2691 | const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); | |||
2692 | const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, | |||
2693 | SCEV::FlagAnyWrap, Depth + 1); | |||
2694 | if (Ops.size() == 2) return OuterMul; | |||
2695 | if (AddOp < Idx) { | |||
2696 | Ops.erase(Ops.begin()+AddOp); | |||
2697 | Ops.erase(Ops.begin()+Idx-1); | |||
2698 | } else { | |||
2699 | Ops.erase(Ops.begin()+Idx); | |||
2700 | Ops.erase(Ops.begin()+AddOp-1); | |||
2701 | } | |||
2702 | Ops.push_back(OuterMul); | |||
2703 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | |||
2704 | } | |||
2705 | ||||
2706 | // Check this multiply against other multiplies being added together. | |||
2707 | for (unsigned OtherMulIdx = Idx+1; | |||
2708 | OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); | |||
2709 | ++OtherMulIdx) { | |||
2710 | const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); | |||
2711 | // If MulOp occurs in OtherMul, we can fold the two multiplies | |||
2712 | // together. | |||
2713 | for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); | |||
2714 | OMulOp != e; ++OMulOp) | |||
2715 | if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { | |||
2716 | // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) | |||
2717 | const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); | |||
2718 | if (Mul->getNumOperands() != 2) { | |||
2719 | SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), | |||
2720 | Mul->op_begin()+MulOp); | |||
2721 | MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); | |||
2722 | InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); | |||
2723 | } | |||
2724 | const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); | |||
2725 | if (OtherMul->getNumOperands() != 2) { | |||
2726 | SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), | |||
2727 | OtherMul->op_begin()+OMulOp); | |||
2728 | MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); | |||
2729 | InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); | |||
2730 | } | |||
2731 | SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; | |||
2732 | const SCEV *InnerMulSum = | |||
2733 | getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); | |||
2734 | const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, | |||
2735 | SCEV::FlagAnyWrap, Depth + 1); | |||
2736 | if (Ops.size() == 2) return OuterMul; | |||
2737 | Ops.erase(Ops.begin()+Idx); | |||
2738 | Ops.erase(Ops.begin()+OtherMulIdx-1); | |||
2739 | Ops.push_back(OuterMul); | |||
2740 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | |||
2741 | } | |||
2742 | } | |||
2743 | } | |||
2744 | } | |||
2745 | ||||
2746 | // If there are any add recurrences in the operands list, see if any other | |||
2747 | // added values are loop invariant. If so, we can fold them into the | |||
2748 | // recurrence. | |||
2749 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) | |||
2750 | ++Idx; | |||
2751 | ||||
2752 | // Scan over all recurrences, trying to fold loop invariants into them. | |||
2753 | for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { | |||
2754 | // Scan all of the other operands to this add and add them to the vector if | |||
2755 | // they are loop invariant w.r.t. the recurrence. | |||
2756 | SmallVector<const SCEV *, 8> LIOps; | |||
2757 | const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); | |||
2758 | const Loop *AddRecLoop = AddRec->getLoop(); | |||
2759 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | |||
2760 | if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { | |||
2761 | LIOps.push_back(Ops[i]); | |||
2762 | Ops.erase(Ops.begin()+i); | |||
2763 | --i; --e; | |||
2764 | } | |||
2765 | ||||
2766 | // If we found some loop invariants, fold them into the recurrence. | |||
2767 | if (!LIOps.empty()) { | |||
2768 | // Compute nowrap flags for the addition of the loop-invariant ops and | |||
2769 | // the addrec. Temporarily push it as an operand for that purpose. | |||
2770 | LIOps.push_back(AddRec); | |||
2771 | SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); | |||
2772 | LIOps.pop_back(); | |||
2773 | ||||
2774 | // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} | |||
2775 | LIOps.push_back(AddRec->getStart()); | |||
2776 | ||||
2777 | SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); | |||
2778 | // This follows from the fact that the no-wrap flags on the outer add | |||
2779 | // expression are applicable on the 0th iteration, when the add recurrence | |||
2780 | // will be equal to its start value. | |||
2781 | AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); | |||
2782 | ||||
2783 | // Build the new addrec. Propagate the NUW and NSW flags if both the | |||
2784 | // outer add and the inner addrec are guaranteed to have no overflow. | |||
2785 | // Always propagate NW. | |||
2786 | Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); | |||
2787 | const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); | |||
2788 | ||||
2789 | // If all of the other operands were loop invariant, we are done. | |||
2790 | if (Ops.size() == 1) return NewRec; | |||
2791 | ||||
2792 | // Otherwise, add the folded AddRec by the non-invariant parts. | |||
2793 | for (unsigned i = 0;; ++i) | |||
2794 | if (Ops[i] == AddRec) { | |||
2795 | Ops[i] = NewRec; | |||
2796 | break; | |||
2797 | } | |||
2798 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | |||
2799 | } | |||
2800 | ||||
2801 | // Okay, if there weren't any loop invariants to be folded, check to see if | |||
2802 | // there are multiple AddRec's with the same loop induction variable being | |||
2803 | // added together. If so, we can fold them. | |||
2804 | for (unsigned OtherIdx = Idx+1; | |||
2805 | OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); | |||
2806 | ++OtherIdx) { | |||
2807 | // We expect the AddRecExpr's to be sorted in reverse dominance order, | |||
2808 | // so that the 1st found AddRecExpr is dominated by all others. | |||
2809 | assert(DT.dominates(((void)0) | |||
2810 | cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),((void)0) | |||
2811 | AddRec->getLoop()->getHeader()) &&((void)0) | |||
2812 | "AddRecExprs are not sorted in reverse dominance order?")((void)0); | |||
2813 | if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { | |||
2814 | // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> | |||
2815 | SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); | |||
2816 | for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); | |||
2817 | ++OtherIdx) { | |||
2818 | const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); | |||
2819 | if (OtherAddRec->getLoop() == AddRecLoop) { | |||
2820 | for (unsigned i = 0, e = OtherAddRec->getNumOperands(); | |||
2821 | i != e; ++i) { | |||
2822 | if (i >= AddRecOps.size()) { | |||
2823 | AddRecOps.append(OtherAddRec->op_begin()+i, | |||
2824 | OtherAddRec->op_end()); | |||
2825 | break; | |||
2826 | } | |||
2827 | SmallVector<const SCEV *, 2> TwoOps = { | |||
2828 | AddRecOps[i], OtherAddRec->getOperand(i)}; | |||
2829 | AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); | |||
2830 | } | |||
2831 | Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; | |||
2832 | } | |||
2833 | } | |||
2834 | // Step size has changed, so we cannot guarantee no self-wraparound. | |||
2835 | Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); | |||
2836 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | |||
2837 | } | |||
2838 | } | |||
2839 | ||||
2840 | // Otherwise couldn't fold anything into this recurrence. Move onto the | |||
2841 | // next one. | |||
2842 | } | |||
2843 | ||||
2844 | // Okay, it looks like we really DO need an add expr. Check to see if we | |||
2845 | // already have one, otherwise create a new one. | |||
2846 | return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); | |||
2847 | } | |||
2848 | ||||
2849 | const SCEV * | |||
2850 | ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, | |||
2851 | SCEV::NoWrapFlags Flags) { | |||
2852 | FoldingSetNodeID ID; | |||
2853 | ID.AddInteger(scAddExpr); | |||
2854 | for (const SCEV *Op : Ops) | |||
2855 | ID.AddPointer(Op); | |||
2856 | void *IP = nullptr; | |||
2857 | SCEVAddExpr *S = | |||
2858 | static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); | |||
2859 | if (!S) { | |||
2860 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); | |||
2861 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); | |||
2862 | S = new (SCEVAllocator) | |||
2863 | SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); | |||
2864 | UniqueSCEVs.InsertNode(S, IP); | |||
2865 | addToLoopUseLists(S); | |||
2866 | } | |||
2867 | S->setNoWrapFlags(Flags); | |||
2868 | return S; | |||
2869 | } | |||
2870 | ||||
2871 | const SCEV * | |||
2872 | ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, | |||
2873 | const Loop *L, SCEV::NoWrapFlags Flags) { | |||
2874 | FoldingSetNodeID ID; | |||
2875 | ID.AddInteger(scAddRecExpr); | |||
2876 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | |||
2877 | ID.AddPointer(Ops[i]); | |||
2878 | ID.AddPointer(L); | |||
2879 | void *IP = nullptr; | |||
2880 | SCEVAddRecExpr *S = | |||
2881 | static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); | |||
2882 | if (!S) { | |||
2883 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); | |||
2884 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); | |||
2885 | S = new (SCEVAllocator) | |||
2886 | SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); | |||
2887 | UniqueSCEVs.InsertNode(S, IP); | |||
2888 | addToLoopUseLists(S); | |||
2889 | } | |||
2890 | setNoWrapFlags(S, Flags); | |||
2891 | return S; | |||
2892 | } | |||
2893 | ||||
2894 | const SCEV * | |||
2895 | ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, | |||
2896 | SCEV::NoWrapFlags Flags) { | |||
2897 | FoldingSetNodeID ID; | |||
2898 | ID.AddInteger(scMulExpr); | |||
2899 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | |||
2900 | ID.AddPointer(Ops[i]); | |||
2901 | void *IP = nullptr; | |||
2902 | SCEVMulExpr *S = | |||
2903 | static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); | |||
2904 | if (!S) { | |||
2905 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); | |||
2906 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); | |||
2907 | S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), | |||
2908 | O, Ops.size()); | |||
2909 | UniqueSCEVs.InsertNode(S, IP); | |||
2910 | addToLoopUseLists(S); | |||
2911 | } | |||
2912 | S->setNoWrapFlags(Flags); | |||
2913 | return S; | |||
2914 | } | |||
2915 | ||||
2916 | static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { | |||
2917 | uint64_t k = i*j; | |||
2918 | if (j > 1 && k / j != i) Overflow = true; | |||
2919 | return k; | |||
2920 | } | |||
2921 | ||||
2922 | /// Compute the result of "n choose k", the binomial coefficient. If an | |||
2923 | /// intermediate computation overflows, Overflow will be set and the return will | |||
2924 | /// be garbage. Overflow is not cleared on absence of overflow. | |||
2925 | static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { | |||
2926 | // We use the multiplicative formula: | |||
2927 | // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . | |||
2928 | // At each iteration, we take the n-th term of the numeral and divide by the | |||
2929 | // (k-n)th term of the denominator. This division will always produce an | |||
2930 | // integral result, and helps reduce the chance of overflow in the | |||
2931 | // intermediate computations. However, we can still overflow even when the | |||
2932 | // final result would fit. | |||
2933 | ||||
2934 | if (n == 0 || n == k) return 1; | |||
2935 | if (k > n) return 0; | |||
2936 | ||||
2937 | if (k > n/2) | |||
2938 | k = n-k; | |||
2939 | ||||
2940 | uint64_t r = 1; | |||
2941 | for (uint64_t i = 1; i <= k; ++i) { | |||
2942 | r = umul_ov(r, n-(i-1), Overflow); | |||
2943 | r /= i; | |||
2944 | } | |||
2945 | return r; | |||
2946 | } | |||
2947 | ||||
2948 | /// Determine if any of the operands in this SCEV are a constant or if | |||
2949 | /// any of the add or multiply expressions in this SCEV contain a constant. | |||
2950 | static bool containsConstantInAddMulChain(const SCEV *StartExpr) { | |||
2951 | struct FindConstantInAddMulChain { | |||
2952 | bool FoundConstant = false; | |||
2953 | ||||
2954 | bool follow(const SCEV *S) { | |||
2955 | FoundConstant |= isa<SCEVConstant>(S); | |||
2956 | return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); | |||
2957 | } | |||
2958 | ||||
2959 | bool isDone() const { | |||
2960 | return FoundConstant; | |||
2961 | } | |||
2962 | }; | |||
2963 | ||||
2964 | FindConstantInAddMulChain F; | |||
2965 | SCEVTraversal<FindConstantInAddMulChain> ST(F); | |||
2966 | ST.visitAll(StartExpr); | |||
2967 | return F.FoundConstant; | |||
2968 | } | |||
2969 | ||||
2970 | /// Get a canonical multiply expression, or something simpler if possible. | |||
2971 | const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, | |||
2972 | SCEV::NoWrapFlags OrigFlags, | |||
2973 | unsigned Depth) { | |||
2974 | assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&((void)0) | |||
2975 | "only nuw or nsw allowed")((void)0); | |||
2976 | assert(!Ops.empty() && "Cannot get empty mul!")((void)0); | |||
2977 | if (Ops.size() == 1) return Ops[0]; | |||
2978 | #ifndef NDEBUG1 | |||
2979 | Type *ETy = Ops[0]->getType(); | |||
2980 | assert(!ETy->isPointerTy())((void)0); | |||
2981 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) | |||
2982 | assert(Ops[i]->getType() == ETy &&((void)0) | |||
2983 | "SCEVMulExpr operand types don't match!")((void)0); | |||
2984 | #endif | |||
2985 | ||||
2986 | // Sort by complexity, this groups all similar expression types together. | |||
2987 | GroupByComplexity(Ops, &LI, DT); | |||
2988 | ||||
2989 | // If there are any constants, fold them together. | |||
2990 | unsigned Idx = 0; | |||
2991 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { | |||
2992 | ++Idx; | |||
2993 | assert(Idx < Ops.size())((void)0); | |||
2994 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { | |||
2995 | // We found two constants, fold them together! | |||
2996 | Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); | |||
2997 | if (Ops.size() == 2) return Ops[0]; | |||
2998 | Ops.erase(Ops.begin()+1); // Erase the folded element | |||
2999 | LHSC = cast<SCEVConstant>(Ops[0]); | |||
3000 | } | |||
3001 | ||||
3002 | // If we have a multiply of zero, it will always be zero. | |||
3003 | if (LHSC->getValue()->isZero()) | |||
3004 | return LHSC; | |||
3005 | ||||
3006 | // If we are left with a constant one being multiplied, strip it off. | |||
3007 | if (LHSC->getValue()->isOne()) { | |||
3008 | Ops.erase(Ops.begin()); | |||
3009 | --Idx; | |||
3010 | } | |||
3011 | ||||
3012 | if (Ops.size() == 1) | |||
3013 | return Ops[0]; | |||
3014 | } | |||
3015 | ||||
3016 | // Delay expensive flag strengthening until necessary. | |||
3017 | auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { | |||
3018 | return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); | |||
3019 | }; | |||
3020 | ||||
3021 | // Limit recursion calls depth. | |||
3022 | if (Depth > MaxArithDepth || hasHugeExpression(Ops)) | |||
3023 | return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); | |||
3024 | ||||
3025 | if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { | |||
3026 | // Don't strengthen flags if we have no new information. | |||
3027 | SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); | |||
3028 | if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) | |||
3029 | Mul->setNoWrapFlags(ComputeFlags(Ops)); | |||
3030 | return S; | |||
3031 | } | |||
3032 | ||||
3033 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { | |||
3034 | if (Ops.size() == 2) { | |||
3035 | // C1*(C2+V) -> C1*C2 + C1*V | |||
3036 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) | |||
3037 | // If any of Add's ops are Adds or Muls with a constant, apply this | |||
3038 | // transformation as well. | |||
3039 | // | |||
3040 | // TODO: There are some cases where this transformation is not | |||
3041 | // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of | |||
3042 | // this transformation should be narrowed down. | |||
3043 | if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) | |||
3044 | return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), | |||
3045 | SCEV::FlagAnyWrap, Depth + 1), | |||
3046 | getMulExpr(LHSC, Add->getOperand(1), | |||
3047 | SCEV::FlagAnyWrap, Depth + 1), | |||
3048 | SCEV::FlagAnyWrap, Depth + 1); | |||
3049 | ||||
3050 | if (Ops[0]->isAllOnesValue()) { | |||
3051 | // If we have a mul by -1 of an add, try distributing the -1 among the | |||
3052 | // add operands. | |||
3053 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { | |||
3054 | SmallVector<const SCEV *, 4> NewOps; | |||
3055 | bool AnyFolded = false; | |||
3056 | for (const SCEV *AddOp : Add->operands()) { | |||
3057 | const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, | |||
3058 | Depth + 1); | |||
3059 | if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; | |||
3060 | NewOps.push_back(Mul); | |||
3061 | } | |||
3062 | if (AnyFolded) | |||
3063 | return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); | |||
3064 | } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { | |||
3065 | // Negation preserves a recurrence's no self-wrap property. | |||
3066 | SmallVector<const SCEV *, 4> Operands; | |||
3067 | for (const SCEV *AddRecOp : AddRec->operands()) | |||
3068 | Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, | |||
3069 | Depth + 1)); | |||
3070 | ||||
3071 | return getAddRecExpr(Operands, AddRec->getLoop(), | |||
3072 | AddRec->getNoWrapFlags(SCEV::FlagNW)); | |||
3073 | } | |||
3074 | } | |||
3075 | } | |||
3076 | } | |||
3077 | ||||
3078 | // Skip over the add expression until we get to a multiply. | |||
3079 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) | |||
3080 | ++Idx; | |||
3081 | ||||
3082 | // If there are mul operands inline them all into this expression. | |||
3083 | if (Idx < Ops.size()) { | |||
3084 | bool DeletedMul = false; | |||
3085 | while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { | |||
3086 | if (Ops.size() > MulOpsInlineThreshold) | |||
3087 | break; | |||
3088 | // If we have an mul, expand the mul operands onto the end of the | |||
3089 | // operands list. | |||
3090 | Ops.erase(Ops.begin()+Idx); | |||
3091 | Ops.append(Mul->op_begin(), Mul->op_end()); | |||
3092 | DeletedMul = true; | |||
3093 | } | |||
3094 | ||||
3095 | // If we deleted at least one mul, we added operands to the end of the | |||
3096 | // list, and they are not necessarily sorted. Recurse to resort and | |||
3097 | // resimplify any operands we just acquired. | |||
3098 | if (DeletedMul) | |||
3099 | return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | |||
3100 | } | |||
3101 | ||||
3102 | // If there are any add recurrences in the operands list, see if any other | |||
3103 | // added values are loop invariant. If so, we can fold them into the | |||
3104 | // recurrence. | |||
3105 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) | |||
3106 | ++Idx; | |||
3107 | ||||
3108 | // Scan over all recurrences, trying to fold loop invariants into them. | |||
3109 | for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { | |||
3110 | // Scan all of the other operands to this mul and add them to the vector | |||
3111 | // if they are loop invariant w.r.t. the recurrence. | |||
3112 | SmallVector<const SCEV *, 8> LIOps; | |||
3113 | const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); | |||
3114 | const Loop *AddRecLoop = AddRec->getLoop(); | |||
3115 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | |||
3116 | if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { | |||
3117 | LIOps.push_back(Ops[i]); | |||
3118 | Ops.erase(Ops.begin()+i); | |||
3119 | --i; --e; | |||
3120 | } | |||
3121 | ||||
3122 | // If we found some loop invariants, fold them into the recurrence. | |||
3123 | if (!LIOps.empty()) { | |||
3124 | // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} | |||
3125 | SmallVector<const SCEV *, 4> NewOps; | |||
3126 | NewOps.reserve(AddRec->getNumOperands()); | |||
3127 | const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); | |||
3128 | for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) | |||
3129 | NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), | |||
3130 | SCEV::FlagAnyWrap, Depth + 1)); | |||
3131 | ||||
3132 | // Build the new addrec. Propagate the NUW and NSW flags if both the | |||
3133 | // outer mul and the inner addrec are guaranteed to have no overflow. | |||
3134 | // | |||
3135 | // No self-wrap cannot be guaranteed after changing the step size, but | |||
3136 | // will be inferred if either NUW or NSW is true. | |||
3137 | SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); | |||
3138 | const SCEV *NewRec = getAddRecExpr( | |||
3139 | NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); | |||
3140 | ||||
3141 | // If all of the other operands were loop invariant, we are done. | |||
3142 | if (Ops.size() == 1) return NewRec; | |||
3143 | ||||
3144 | // Otherwise, multiply the folded AddRec by the non-invariant parts. | |||
3145 | for (unsigned i = 0;; ++i) | |||
3146 | if (Ops[i] == AddRec) { | |||
3147 | Ops[i] = NewRec; | |||
3148 | break; | |||
3149 | } | |||
3150 | return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | |||
3151 | } | |||
3152 | ||||
3153 | // Okay, if there weren't any loop invariants to be folded, check to see | |||
3154 | // if there are multiple AddRec's with the same loop induction variable | |||
3155 | // being multiplied together. If so, we can fold them. | |||
3156 | ||||
3157 | // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> | |||
3158 | // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ | |||
3159 | // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z | |||
3160 | // ]]],+,...up to x=2n}. | |||
3161 | // Note that the arguments to choose() are always integers with values | |||
3162 | // known at compile time, never SCEV objects. | |||
3163 | // | |||
3164 | // The implementation avoids pointless extra computations when the two | |||
3165 | // addrec's are of different length (mathematically, it's equivalent to | |||
3166 | // an infinite stream of zeros on the right). | |||
3167 | bool OpsModified = false; | |||
3168 | for (unsigned OtherIdx = Idx+1; | |||
3169 | OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); | |||
3170 | ++OtherIdx) { | |||
3171 | const SCEVAddRecExpr *OtherAddRec = | |||
3172 | dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); | |||
3173 | if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) | |||
3174 | continue; | |||
3175 | ||||
3176 | // Limit max number of arguments to avoid creation of unreasonably big | |||
3177 | // SCEVAddRecs with very complex operands. | |||
3178 | if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > | |||
3179 | MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) | |||
3180 | continue; | |||
3181 | ||||
3182 | bool Overflow = false; | |||
3183 | Type *Ty = AddRec->getType(); | |||
3184 | bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; | |||
3185 | SmallVector<const SCEV*, 7> AddRecOps; | |||
3186 | for (int x = 0, xe = AddRec->getNumOperands() + | |||
3187 | OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { | |||
3188 | SmallVector <const SCEV *, 7> SumOps; | |||
3189 | for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { | |||
3190 | uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); | |||
3191 | for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), | |||
3192 | ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); | |||
3193 | z < ze && !Overflow; ++z) { | |||
3194 | uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); | |||
3195 | uint64_t Coeff; | |||
3196 | if (LargerThan64Bits) | |||
3197 | Coeff = umul_ov(Coeff1, Coeff2, Overflow); | |||
3198 | else | |||
3199 | Coeff = Coeff1*Coeff2; | |||
3200 | const SCEV *CoeffTerm = getConstant(Ty, Coeff); | |||
3201 | const SCEV *Term1 = AddRec->getOperand(y-z); | |||
3202 | const SCEV *Term2 = OtherAddRec->getOperand(z); | |||
3203 | SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, | |||
3204 | SCEV::FlagAnyWrap, Depth + 1)); | |||
3205 | } | |||
3206 | } | |||
3207 | if (SumOps.empty()) | |||
3208 | SumOps.push_back(getZero(Ty)); | |||
3209 | AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); | |||
3210 | } | |||
3211 | if (!Overflow) { | |||
3212 | const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, | |||
3213 | SCEV::FlagAnyWrap); | |||
3214 | if (Ops.size() == 2) return NewAddRec; | |||
3215 | Ops[Idx] = NewAddRec; | |||
3216 | Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; | |||
3217 | OpsModified = true; | |||
3218 | AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); | |||
3219 | if (!AddRec) | |||
3220 | break; | |||
3221 | } | |||
3222 | } | |||
3223 | if (OpsModified) | |||
3224 | return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); | |||
3225 | ||||
3226 | // Otherwise couldn't fold anything into this recurrence. Move onto the | |||
3227 | // next one. | |||
3228 | } | |||
3229 | ||||
3230 | // Okay, it looks like we really DO need an mul expr. Check to see if we | |||
3231 | // already have one, otherwise create a new one. | |||
3232 | return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); | |||
3233 | } | |||
3234 | ||||
3235 | /// Represents an unsigned remainder expression based on unsigned division. | |||
3236 | const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, | |||
3237 | const SCEV *RHS) { | |||
3238 | assert(getEffectiveSCEVType(LHS->getType()) ==((void)0) | |||
3239 | getEffectiveSCEVType(RHS->getType()) &&((void)0) | |||
3240 | "SCEVURemExpr operand types don't match!")((void)0); | |||
3241 | ||||
3242 | // Short-circuit easy cases | |||
3243 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { | |||
3244 | // If constant is one, the result is trivial | |||
3245 | if (RHSC->getValue()->isOne()) | |||
3246 | return getZero(LHS->getType()); // X urem 1 --> 0 | |||
3247 | ||||
3248 | // If constant is a power of two, fold into a zext(trunc(LHS)). | |||
3249 | if (RHSC->getAPInt().isPowerOf2()) { | |||
3250 | Type *FullTy = LHS->getType(); | |||
3251 | Type *TruncTy = | |||
3252 | IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); | |||
3253 | return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); | |||
3254 | } | |||
3255 | } | |||
3256 | ||||
3257 | // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) | |||
3258 | const SCEV *UDiv = getUDivExpr(LHS, RHS); | |||
3259 | const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); | |||
3260 | return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); | |||
3261 | } | |||
3262 | ||||
3263 | /// Get a canonical unsigned division expression, or something simpler if | |||
3264 | /// possible. | |||
3265 | const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, | |||
3266 | const SCEV *RHS) { | |||
3267 | assert(!LHS->getType()->isPointerTy() &&((void)0) | |||
3268 | "SCEVUDivExpr operand can't be pointer!")((void)0); | |||
3269 | assert(LHS->getType() == RHS->getType() &&((void)0) | |||
3270 | "SCEVUDivExpr operand types don't match!")((void)0); | |||
3271 | ||||
3272 | FoldingSetNodeID ID; | |||
3273 | ID.AddInteger(scUDivExpr); | |||
3274 | ID.AddPointer(LHS); | |||
3275 | ID.AddPointer(RHS); | |||
3276 | void *IP = nullptr; | |||
3277 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) | |||
3278 | return S; | |||
3279 | ||||
3280 | // 0 udiv Y == 0 | |||
3281 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) | |||
3282 | if (LHSC->getValue()->isZero()) | |||
3283 | return LHS; | |||
3284 | ||||
3285 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { | |||
3286 | if (RHSC->getValue()->isOne()) | |||
3287 | return LHS; // X udiv 1 --> x | |||
3288 | // If the denominator is zero, the result of the udiv is undefined. Don't | |||
3289 | // try to analyze it, because the resolution chosen here may differ from | |||
3290 | // the resolution chosen in other parts of the compiler. | |||
3291 | if (!RHSC->getValue()->isZero()) { | |||
3292 | // Determine if the division can be folded into the operands of | |||
3293 | // its operands. | |||
3294 | // TODO: Generalize this to non-constants by using known-bits information. | |||
3295 | Type *Ty = LHS->getType(); | |||
3296 | unsigned LZ = RHSC->getAPInt().countLeadingZeros(); | |||
3297 | unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; | |||
3298 | // For non-power-of-two values, effectively round the value up to the | |||
3299 | // nearest power of two. | |||
3300 | if (!RHSC->getAPInt().isPowerOf2()) | |||
3301 | ++MaxShiftAmt; | |||
3302 | IntegerType *ExtTy = | |||
3303 | IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); | |||
3304 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) | |||
3305 | if (const SCEVConstant *Step = | |||
3306 | dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { | |||
3307 | // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. | |||
3308 | const APInt &StepInt = Step->getAPInt(); | |||
3309 | const APInt &DivInt = RHSC->getAPInt(); | |||
3310 | if (!StepInt.urem(DivInt) && | |||
3311 | getZeroExtendExpr(AR, ExtTy) == | |||
3312 | getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), | |||
3313 | getZeroExtendExpr(Step, ExtTy), | |||
3314 | AR->getLoop(), SCEV::FlagAnyWrap)) { | |||
3315 | SmallVector<const SCEV *, 4> Operands; | |||
3316 | for (const SCEV *Op : AR->operands()) | |||
3317 | Operands.push_back(getUDivExpr(Op, RHS)); | |||
3318 | return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); | |||
3319 | } | |||
3320 | /// Get a canonical UDivExpr for a recurrence. | |||
3321 | /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. | |||
3322 | // We can currently only fold X%N if X is constant. | |||
3323 | const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); | |||
3324 | if (StartC && !DivInt.urem(StepInt) && | |||
3325 | getZeroExtendExpr(AR, ExtTy) == | |||
3326 | getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), | |||
3327 | getZeroExtendExpr(Step, ExtTy), | |||
3328 | AR->getLoop(), SCEV::FlagAnyWrap)) { | |||
3329 | const APInt &StartInt = StartC->getAPInt(); | |||
3330 | const APInt &StartRem = StartInt.urem(StepInt); | |||
3331 | if (StartRem != 0) { | |||
3332 | const SCEV *NewLHS = | |||
3333 | getAddRecExpr(getConstant(StartInt - StartRem), Step, | |||
3334 | AR->getLoop(), SCEV::FlagNW); | |||
3335 | if (LHS != NewLHS) { | |||
3336 | LHS = NewLHS; | |||
3337 | ||||
3338 | // Reset the ID to include the new LHS, and check if it is | |||
3339 | // already cached. | |||
3340 | ID.clear(); | |||
3341 | ID.AddInteger(scUDivExpr); | |||
3342 | ID.AddPointer(LHS); | |||
3343 | ID.AddPointer(RHS); | |||
3344 | IP = nullptr; | |||
3345 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) | |||
3346 | return S; | |||
3347 | } | |||
3348 | } | |||
3349 | } | |||
3350 | } | |||
3351 | // (A*B)/C --> A*(B/C) if safe and B/C can be folded. | |||
3352 | if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { | |||
3353 | SmallVector<const SCEV *, 4> Operands; | |||
3354 | for (const SCEV *Op : M->operands()) | |||
3355 | Operands.push_back(getZeroExtendExpr(Op, ExtTy)); | |||
3356 | if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) | |||
3357 | // Find an operand that's safely divisible. | |||
3358 | for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { | |||
3359 | const SCEV *Op = M->getOperand(i); | |||
3360 | const SCEV *Div = getUDivExpr(Op, RHSC); | |||
3361 | if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { | |||
3362 | Operands = SmallVector<const SCEV *, 4>(M->operands()); | |||
3363 | Operands[i] = Div; | |||
3364 | return getMulExpr(Operands); | |||
3365 | } | |||
3366 | } | |||
3367 | } | |||
3368 | ||||
3369 | // (A/B)/C --> A/(B*C) if safe and B*C can be folded. | |||
3370 | if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { | |||
3371 | if (auto *DivisorConstant = | |||
3372 | dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { | |||
3373 | bool Overflow = false; | |||
3374 | APInt NewRHS = | |||
3375 | DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); | |||
3376 | if (Overflow) { | |||
3377 | return getConstant(RHSC->getType(), 0, false); | |||
3378 | } | |||
3379 | return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); | |||
3380 | } | |||
3381 | } | |||
3382 | ||||
3383 | // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. | |||
3384 | if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { | |||
3385 | SmallVector<const SCEV *, 4> Operands; | |||
3386 | for (const SCEV *Op : A->operands()) | |||
3387 | Operands.push_back(getZeroExtendExpr(Op, ExtTy)); | |||
3388 | if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { | |||
3389 | Operands.clear(); | |||
3390 | for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { | |||
3391 | const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); | |||
3392 | if (isa<SCEVUDivExpr>(Op) || | |||
3393 | getMulExpr(Op, RHS) != A->getOperand(i)) | |||
3394 | break; | |||
3395 | Operands.push_back(Op); | |||
3396 | } | |||
3397 | if (Operands.size() == A->getNumOperands()) | |||
3398 | return getAddExpr(Operands); | |||
3399 | } | |||
3400 | } | |||
3401 | ||||
3402 | // Fold if both operands are constant. | |||
3403 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { | |||
3404 | Constant *LHSCV = LHSC->getValue(); | |||
3405 | Constant *RHSCV = RHSC->getValue(); | |||
3406 | return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, | |||
3407 | RHSCV))); | |||
3408 | } | |||
3409 | } | |||
3410 | } | |||
3411 | ||||
3412 | // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs | |||
3413 | // changes). Make sure we get a new one. | |||
3414 | IP = nullptr; | |||
3415 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; | |||
3416 | SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), | |||
3417 | LHS, RHS); | |||
3418 | UniqueSCEVs.InsertNode(S, IP); | |||
3419 | addToLoopUseLists(S); | |||
3420 | return S; | |||
3421 | } | |||
3422 | ||||
3423 | static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { | |||
3424 | APInt A = C1->getAPInt().abs(); | |||
3425 | APInt B = C2->getAPInt().abs(); | |||
3426 | uint32_t ABW = A.getBitWidth(); | |||
3427 | uint32_t BBW = B.getBitWidth(); | |||
3428 | ||||
3429 | if (ABW > BBW) | |||
3430 | B = B.zext(ABW); | |||
3431 | else if (ABW < BBW) | |||
3432 | A = A.zext(BBW); | |||
3433 | ||||
3434 | return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); | |||
3435 | } | |||
3436 | ||||
3437 | /// Get a canonical unsigned division expression, or something simpler if | |||
3438 | /// possible. There is no representation for an exact udiv in SCEV IR, but we | |||
3439 | /// can attempt to remove factors from the LHS and RHS. We can't do this when | |||
3440 | /// it's not exact because the udiv may be clearing bits. | |||
3441 | const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, | |||
3442 | const SCEV *RHS) { | |||
3443 | // TODO: we could try to find factors in all sorts of things, but for now we | |||
3444 | // just deal with u/exact (multiply, constant). See SCEVDivision towards the | |||
3445 | // end of this file for inspiration. | |||
3446 | ||||
3447 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); | |||
3448 | if (!Mul || !Mul->hasNoUnsignedWrap()) | |||
3449 | return getUDivExpr(LHS, RHS); | |||
3450 | ||||
3451 | if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { | |||
3452 | // If the mulexpr multiplies by a constant, then that constant must be the | |||
3453 | // first element of the mulexpr. | |||
3454 | if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { | |||
3455 | if (LHSCst == RHSCst) { | |||
3456 | SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); | |||
3457 | return getMulExpr(Operands); | |||
3458 | } | |||
3459 | ||||
3460 | // We can't just assume that LHSCst divides RHSCst cleanly, it could be | |||
3461 | // that there's a factor provided by one of the other terms. We need to | |||
3462 | // check. | |||
3463 | APInt Factor = gcd(LHSCst, RHSCst); | |||
3464 | if (!Factor.isIntN(1)) { | |||
3465 | LHSCst = | |||
3466 | cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); | |||
3467 | RHSCst = | |||
3468 | cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); | |||
3469 | SmallVector<const SCEV *, 2> Operands; | |||
3470 | Operands.push_back(LHSCst); | |||
3471 | Operands.append(Mul->op_begin() + 1, Mul->op_end()); | |||
3472 | LHS = getMulExpr(Operands); | |||
3473 | RHS = RHSCst; | |||
3474 | Mul = dyn_cast<SCEVMulExpr>(LHS); | |||
3475 | if (!Mul) | |||
3476 | return getUDivExactExpr(LHS, RHS); | |||
3477 | } | |||
3478 | } | |||
3479 | } | |||
3480 | ||||
3481 | for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { | |||
3482 | if (Mul->getOperand(i) == RHS) { | |||
3483 | SmallVector<const SCEV *, 2> Operands; | |||
3484 | Operands.append(Mul->op_begin(), Mul->op_begin() + i); | |||
3485 | Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); | |||
3486 | return getMulExpr(Operands); | |||
3487 | } | |||
3488 | } | |||
3489 | ||||
3490 | return getUDivExpr(LHS, RHS); | |||
3491 | } | |||
3492 | ||||
3493 | /// Get an add recurrence expression for the specified loop. Simplify the | |||
3494 | /// expression as much as possible. | |||
3495 | const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, | |||
3496 | const Loop *L, | |||
3497 | SCEV::NoWrapFlags Flags) { | |||
3498 | SmallVector<const SCEV *, 4> Operands; | |||
3499 | Operands.push_back(Start); | |||
3500 | if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) | |||
3501 | if (StepChrec->getLoop() == L) { | |||
3502 | Operands.append(StepChrec->op_begin(), StepChrec->op_end()); | |||
3503 | return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); | |||
3504 | } | |||
3505 | ||||
3506 | Operands.push_back(Step); | |||
3507 | return getAddRecExpr(Operands, L, Flags); | |||
3508 | } | |||
3509 | ||||
3510 | /// Get an add recurrence expression for the specified loop. Simplify the | |||
3511 | /// expression as much as possible. | |||
3512 | const SCEV * | |||
3513 | ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, | |||
3514 | const Loop *L, SCEV::NoWrapFlags Flags) { | |||
3515 | if (Operands.size() == 1) return Operands[0]; | |||
3516 | #ifndef NDEBUG1 | |||
3517 | Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); | |||
3518 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) { | |||
3519 | assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&((void)0) | |||
3520 | "SCEVAddRecExpr operand types don't match!")((void)0); | |||
3521 | assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer")((void)0); | |||
3522 | } | |||
3523 | for (unsigned i = 0, e = Operands.size(); i != e; ++i) | |||
3524 | assert(isLoopInvariant(Operands[i], L) &&((void)0) | |||
3525 | "SCEVAddRecExpr operand is not loop-invariant!")((void)0); | |||
3526 | #endif | |||
3527 | ||||
3528 | if (Operands.back()->isZero()) { | |||
3529 | Operands.pop_back(); | |||
3530 | return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X | |||
3531 | } | |||
3532 | ||||
3533 | // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and | |||
3534 | // use that information to infer NUW and NSW flags. However, computing a | |||
3535 | // BE count requires calling getAddRecExpr, so we may not yet have a | |||
3536 | // meaningful BE count at this point (and if we don't, we'd be stuck | |||
3537 | // with a SCEVCouldNotCompute as the cached BE count). | |||
3538 | ||||
3539 | Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); | |||
3540 | ||||
3541 | // Canonicalize nested AddRecs in by nesting them in order of loop depth. | |||
3542 | if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { | |||
3543 | const Loop *NestedLoop = NestedAR->getLoop(); | |||
3544 | if (L->contains(NestedLoop) | |||
3545 | ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) | |||
3546 | : (!NestedLoop->contains(L) && | |||
3547 | DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { | |||
3548 | SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); | |||
3549 | Operands[0] = NestedAR->getStart(); | |||
3550 | // AddRecs require their operands be loop-invariant with respect to their | |||
3551 | // loops. Don't perform this transformation if it would break this | |||
3552 | // requirement. | |||
3553 | bool AllInvariant = all_of( | |||
3554 | Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); | |||
3555 | ||||
3556 | if (AllInvariant) { | |||
3557 | // Create a recurrence for the outer loop with the same step size. | |||
3558 | // | |||
3559 | // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the | |||
3560 | // inner recurrence has the same property. | |||
3561 | SCEV::NoWrapFlags OuterFlags = | |||
3562 | maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); | |||
3563 | ||||
3564 | NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); | |||
3565 | AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { | |||
3566 | return isLoopInvariant(Op, NestedLoop); | |||
3567 | }); | |||
3568 | ||||
3569 | if (AllInvariant) { | |||
3570 | // Ok, both add recurrences are valid after the transformation. | |||
3571 | // | |||
3572 | // The inner recurrence keeps its NW flag but only keeps NUW/NSW if | |||
3573 | // the outer recurrence has the same property. | |||
3574 | SCEV::NoWrapFlags InnerFlags = | |||
3575 | maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); | |||
3576 | return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); | |||
3577 | } | |||
3578 | } | |||
3579 | // Reset Operands to its original state. | |||
3580 | Operands[0] = NestedAR; | |||
3581 | } | |||
3582 | } | |||
3583 | ||||
3584 | // Okay, it looks like we really DO need an addrec expr. Check to see if we | |||
3585 | // already have one, otherwise create a new one. | |||
3586 | return getOrCreateAddRecExpr(Operands, L, Flags); | |||
3587 | } | |||
3588 | ||||
3589 | const SCEV * | |||
3590 | ScalarEvolution::getGEPExpr(GEPOperator *GEP, | |||
3591 | const SmallVectorImpl<const SCEV *> &IndexExprs) { | |||
3592 | const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); | |||
3593 | // getSCEV(Base)->getType() has the same address space as Base->getType() | |||
3594 | // because SCEV::getType() preserves the address space. | |||
3595 | Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); | |||
3596 | // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP | |||
3597 | // instruction to its SCEV, because the Instruction may be guarded by control | |||
3598 | // flow and the no-overflow bits may not be valid for the expression in any | |||
3599 | // context. This can be fixed similarly to how these flags are handled for | |||
3600 | // adds. | |||
3601 | SCEV::NoWrapFlags OffsetWrap = | |||
3602 | GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; | |||
3603 | ||||
3604 | Type *CurTy = GEP->getType(); | |||
3605 | bool FirstIter = true; | |||
3606 | SmallVector<const SCEV *, 4> Offsets; | |||
3607 | for (const SCEV *IndexExpr : IndexExprs) { | |||
3608 | // Compute the (potentially symbolic) offset in bytes for this index. | |||
3609 | if (StructType *STy = dyn_cast<StructType>(CurTy)) { | |||
3610 | // For a struct, add the member offset. | |||
3611 | ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); | |||
3612 | unsigned FieldNo = Index->getZExtValue(); | |||
3613 | const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); | |||
3614 | Offsets.push_back(FieldOffset); | |||
3615 | ||||
3616 | // Update CurTy to the type of the field at Index. | |||
3617 | CurTy = STy->getTypeAtIndex(Index); | |||
3618 | } else { | |||
3619 | // Update CurTy to its element type. | |||
3620 | if (FirstIter) { | |||
3621 | assert(isa<PointerType>(CurTy) &&((void)0) | |||
3622 | "The first index of a GEP indexes a pointer")((void)0); | |||
3623 | CurTy = GEP->getSourceElementType(); | |||
3624 | FirstIter = false; | |||
3625 | } else { | |||
3626 | CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); | |||
3627 | } | |||
3628 | // For an array, add the element offset, explicitly scaled. | |||
3629 | const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); | |||
3630 | // Getelementptr indices are signed. | |||
3631 | IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); | |||
3632 | ||||
3633 | // Multiply the index by the element size to compute the element offset. | |||
3634 | const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); | |||
3635 | Offsets.push_back(LocalOffset); | |||
3636 | } | |||
3637 | } | |||
3638 | ||||
3639 | // Handle degenerate case of GEP without offsets. | |||
3640 | if (Offsets.empty()) | |||
3641 | return BaseExpr; | |||
3642 | ||||
3643 | // Add the offsets together, assuming nsw if inbounds. | |||
3644 | const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); | |||
3645 | // Add the base address and the offset. We cannot use the nsw flag, as the | |||
3646 | // base address is unsigned. However, if we know that the offset is | |||
3647 | // non-negative, we can use nuw. | |||
3648 | SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset) | |||
3649 | ? SCEV::FlagNUW : SCEV::FlagAnyWrap; | |||
3650 | return getAddExpr(BaseExpr, Offset, BaseWrap); | |||
3651 | } | |||
3652 | ||||
3653 | std::tuple<SCEV *, FoldingSetNodeID, void *> | |||
3654 | ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, | |||
3655 | ArrayRef<const SCEV *> Ops) { | |||
3656 | FoldingSetNodeID ID; | |||
3657 | void *IP = nullptr; | |||
3658 | ID.AddInteger(SCEVType); | |||
3659 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | |||
3660 | ID.AddPointer(Ops[i]); | |||
3661 | return std::tuple<SCEV *, FoldingSetNodeID, void *>( | |||
3662 | UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); | |||
3663 | } | |||
3664 | ||||
3665 | const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { | |||
3666 | SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; | |||
3667 | return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); | |||
3668 | } | |||
3669 | ||||
3670 | const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, | |||
3671 | SmallVectorImpl<const SCEV *> &Ops) { | |||
3672 | assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!")((void)0); | |||
3673 | if (Ops.size() == 1) return Ops[0]; | |||
3674 | #ifndef NDEBUG1 | |||
3675 | Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); | |||
3676 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) { | |||
3677 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&((void)0) | |||
3678 | "Operand types don't match!")((void)0); | |||
3679 | assert(Ops[0]->getType()->isPointerTy() ==((void)0) | |||
3680 | Ops[i]->getType()->isPointerTy() &&((void)0) | |||
3681 | "min/max should be consistently pointerish")((void)0); | |||
3682 | } | |||
3683 | #endif | |||
3684 | ||||
3685 | bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; | |||
3686 | bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; | |||
3687 | ||||
3688 | // Sort by complexity, this groups all similar expression types together. | |||
3689 | GroupByComplexity(Ops, &LI, DT); | |||
3690 | ||||
3691 | // Check if we have created the same expression before. | |||
3692 | if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { | |||
3693 | return S; | |||
3694 | } | |||
3695 | ||||
3696 | // If there are any constants, fold them together. | |||
3697 | unsigned Idx = 0; | |||
3698 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { | |||
3699 | ++Idx; | |||
3700 | assert(Idx < Ops.size())((void)0); | |||
3701 | auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { | |||
3702 | if (Kind == scSMaxExpr) | |||
3703 | return APIntOps::smax(LHS, RHS); | |||
3704 | else if (Kind == scSMinExpr) | |||
3705 | return APIntOps::smin(LHS, RHS); | |||
3706 | else if (Kind == scUMaxExpr) | |||
3707 | return APIntOps::umax(LHS, RHS); | |||
3708 | else if (Kind == scUMinExpr) | |||
3709 | return APIntOps::umin(LHS, RHS); | |||
3710 | llvm_unreachable("Unknown SCEV min/max opcode")__builtin_unreachable(); | |||
3711 | }; | |||
3712 | ||||
3713 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { | |||
3714 | // We found two constants, fold them together! | |||
3715 | ConstantInt *Fold = ConstantInt::get( | |||
3716 | getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); | |||
3717 | Ops[0] = getConstant(Fold); | |||
3718 | Ops.erase(Ops.begin()+1); // Erase the folded element | |||
3719 | if (Ops.size() == 1) return Ops[0]; | |||
3720 | LHSC = cast<SCEVConstant>(Ops[0]); | |||
3721 | } | |||
3722 | ||||
3723 | bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); | |||
3724 | bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); | |||
3725 | ||||
3726 | if (IsMax ? IsMinV : IsMaxV) { | |||
3727 | // If we are left with a constant minimum(/maximum)-int, strip it off. | |||
3728 | Ops.erase(Ops.begin()); | |||
3729 | --Idx; | |||
3730 | } else if (IsMax ? IsMaxV : IsMinV) { | |||
3731 | // If we have a max(/min) with a constant maximum(/minimum)-int, | |||
3732 | // it will always be the extremum. | |||
3733 | return LHSC; | |||
3734 | } | |||
3735 | ||||
3736 | if (Ops.size() == 1) return Ops[0]; | |||
3737 | } | |||
3738 | ||||
3739 | // Find the first operation of the same kind | |||
3740 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) | |||
3741 | ++Idx; | |||
3742 | ||||
3743 | // Check to see if one of the operands is of the same kind. If so, expand its | |||
3744 | // operands onto our operand list, and recurse to simplify. | |||
3745 | if (Idx < Ops.size()) { | |||
3746 | bool DeletedAny = false; | |||
3747 | while (Ops[Idx]->getSCEVType() == Kind) { | |||
3748 | const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); | |||
3749 | Ops.erase(Ops.begin()+Idx); | |||
3750 | Ops.append(SMME->op_begin(), SMME->op_end()); | |||
3751 | DeletedAny = true; | |||
3752 | } | |||
3753 | ||||
3754 | if (DeletedAny) | |||
3755 | return getMinMaxExpr(Kind, Ops); | |||
3756 | } | |||
3757 | ||||
3758 | // Okay, check to see if the same value occurs in the operand list twice. If | |||
3759 | // so, delete one. Since we sorted the list, these values are required to | |||
3760 | // be adjacent. | |||
3761 | llvm::CmpInst::Predicate GEPred = | |||
3762 | IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; | |||
3763 | llvm::CmpInst::Predicate LEPred = | |||
3764 | IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; | |||
3765 | llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; | |||
3766 | llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; | |||
3767 | for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { | |||
3768 | if (Ops[i] == Ops[i + 1] || | |||
3769 | isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { | |||
3770 | // X op Y op Y --> X op Y | |||
3771 | // X op Y --> X, if we know X, Y are ordered appropriately | |||
3772 | Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); | |||
3773 | --i; | |||
3774 | --e; | |||
3775 | } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], | |||
3776 | Ops[i + 1])) { | |||
3777 | // X op Y --> Y, if we know X, Y are ordered appropriately | |||
3778 | Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); | |||
3779 | --i; | |||
3780 | --e; | |||
3781 | } | |||
3782 | } | |||
3783 | ||||
3784 | if (Ops.size() == 1) return Ops[0]; | |||
3785 | ||||
3786 | assert(!Ops.empty() && "Reduced smax down to nothing!")((void)0); | |||
3787 | ||||
3788 | // Okay, it looks like we really DO need an expr. Check to see if we | |||
3789 | // already have one, otherwise create a new one. | |||
3790 | const SCEV *ExistingSCEV; | |||
3791 | FoldingSetNodeID ID; | |||
3792 | void *IP; | |||
3793 | std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); | |||
3794 | if (ExistingSCEV) | |||
3795 | return ExistingSCEV; | |||
3796 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); | |||
3797 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); | |||
3798 | SCEV *S = new (SCEVAllocator) | |||
3799 | SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); | |||
3800 | ||||
3801 | UniqueSCEVs.InsertNode(S, IP); | |||
3802 | addToLoopUseLists(S); | |||
3803 | return S; | |||
3804 | } | |||
3805 | ||||
3806 | const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { | |||
3807 | SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; | |||
3808 | return getSMaxExpr(Ops); | |||
3809 | } | |||
3810 | ||||
3811 | const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { | |||
3812 | return getMinMaxExpr(scSMaxExpr, Ops); | |||
3813 | } | |||
3814 | ||||
3815 | const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { | |||
3816 | SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; | |||
3817 | return getUMaxExpr(Ops); | |||
3818 | } | |||
3819 | ||||
3820 | const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { | |||
3821 | return getMinMaxExpr(scUMaxExpr, Ops); | |||
3822 | } | |||
3823 | ||||
3824 | const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, | |||
3825 | const SCEV *RHS) { | |||
3826 | SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; | |||
3827 | return getSMinExpr(Ops); | |||
3828 | } | |||
3829 | ||||
3830 | const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { | |||
3831 | return getMinMaxExpr(scSMinExpr, Ops); | |||
3832 | } | |||
3833 | ||||
3834 | const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, | |||
3835 | const SCEV *RHS) { | |||
3836 | SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; | |||
3837 | return getUMinExpr(Ops); | |||
3838 | } | |||
3839 | ||||
3840 | const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { | |||
3841 | return getMinMaxExpr(scUMinExpr, Ops); | |||
3842 | } | |||
3843 | ||||
3844 | const SCEV * | |||
3845 | ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, | |||
3846 | ScalableVectorType *ScalableTy) { | |||
3847 | Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); | |||
3848 | Constant *One = ConstantInt::get(IntTy, 1); | |||
3849 | Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); | |||
3850 | // Note that the expression we created is the final expression, we don't | |||
3851 | // want to simplify it any further Also, if we call a normal getSCEV(), | |||
3852 | // we'll end up in an endless recursion. So just create an SCEVUnknown. | |||
3853 | return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); | |||
3854 | } | |||
3855 | ||||
3856 | const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { | |||
3857 | if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) | |||
3858 | return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); | |||
3859 | // We can bypass creating a target-independent constant expression and then | |||
3860 | // folding it back into a ConstantInt. This is just a compile-time | |||
3861 | // optimization. | |||
3862 | return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); | |||
3863 | } | |||
3864 | ||||
3865 | const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { | |||
3866 | if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) | |||
3867 | return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); | |||
3868 | // We can bypass creating a target-independent constant expression and then | |||
3869 | // folding it back into a ConstantInt. This is just a compile-time | |||
3870 | // optimization. | |||
3871 | return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); | |||
3872 | } | |||
3873 | ||||
3874 | const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, | |||
3875 | StructType *STy, | |||
3876 | unsigned FieldNo) { | |||
3877 | // We can bypass creating a target-independent constant expression and then | |||
3878 | // folding it back into a ConstantInt. This is just a compile-time | |||
3879 | // optimization. | |||
3880 | return getConstant( | |||
3881 | IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); | |||
3882 | } | |||
3883 | ||||
3884 | const SCEV *ScalarEvolution::getUnknown(Value *V) { | |||
3885 | // Don't attempt to do anything other than create a SCEVUnknown object | |||
3886 | // here. createSCEV only calls getUnknown after checking for all other | |||
3887 | // interesting possibilities, and any other code that calls getUnknown | |||
3888 | // is doing so in order to hide a value from SCEV canonicalization. | |||
3889 | ||||
3890 | FoldingSetNodeID ID; | |||
3891 | ID.AddInteger(scUnknown); | |||
3892 | ID.AddPointer(V); | |||
3893 | void *IP = nullptr; | |||
3894 | if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { | |||
3895 | assert(cast<SCEVUnknown>(S)->getValue() == V &&((void)0) | |||
3896 | "Stale SCEVUnknown in uniquing map!")((void)0); | |||
3897 | return S; | |||
3898 | } | |||
3899 | SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, | |||
3900 | FirstUnknown); | |||
3901 | FirstUnknown = cast<SCEVUnknown>(S); | |||
3902 | UniqueSCEVs.InsertNode(S, IP); | |||
3903 | return S; | |||
3904 | } | |||
3905 | ||||
3906 | //===----------------------------------------------------------------------===// | |||
3907 | // Basic SCEV Analysis and PHI Idiom Recognition Code | |||
3908 | // | |||
3909 | ||||
3910 | /// Test if values of the given type are analyzable within the SCEV | |||
3911 | /// framework. This primarily includes integer types, and it can optionally | |||
3912 | /// include pointer types if the ScalarEvolution class has access to | |||
3913 | /// target-specific information. | |||
3914 | bool ScalarEvolution::isSCEVable(Type *Ty) const { | |||
3915 | // Integers and pointers are always SCEVable. | |||
3916 | return Ty->isIntOrPtrTy(); | |||
3917 | } | |||
3918 | ||||
3919 | /// Return the size in bits of the specified type, for which isSCEVable must | |||
3920 | /// return true. | |||
3921 | uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { | |||
3922 | assert(isSCEVable(Ty) && "Type is not SCEVable!")((void)0); | |||
3923 | if (Ty->isPointerTy()) | |||
| ||||
3924 | return getDataLayout().getIndexTypeSizeInBits(Ty); | |||
3925 | return getDataLayout().getTypeSizeInBits(Ty); | |||
3926 | } | |||
3927 | ||||
3928 | /// Return a type with the same bitwidth as the given type and which represents | |||
3929 | /// how SCEV will treat the given type, for which isSCEVable must return | |||
3930 | /// true. For pointer types, this is the pointer index sized integer type. | |||
3931 | Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { | |||
3932 | assert(isSCEVable(Ty) && "Type is not SCEVable!")((void)0); | |||
3933 | ||||
3934 | if (Ty->isIntegerTy()) | |||
3935 | return Ty; | |||
3936 | ||||
3937 | // The only other support type is pointer. | |||
3938 | assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!")((void)0); | |||
3939 | return getDataLayout().getIndexType(Ty); | |||
3940 | } | |||
3941 | ||||
3942 | Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { | |||
3943 | return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; | |||
3944 | } | |||
3945 | ||||
3946 | const SCEV *ScalarEvolution::getCouldNotCompute() { | |||
3947 | return CouldNotCompute.get(); | |||
3948 | } | |||
3949 | ||||
3950 | bool ScalarEvolution::checkValidity(const SCEV *S) const { | |||
3951 | bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { | |||
3952 | auto *SU = dyn_cast<SCEVUnknown>(S); | |||
3953 | return SU && SU->getValue() == nullptr; | |||
3954 | }); | |||
3955 | ||||
3956 | return !ContainsNulls; | |||
3957 | } | |||
3958 | ||||
3959 | bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { | |||
3960 | HasRecMapType::iterator I = HasRecMap.find(S); | |||
3961 | if (I != HasRecMap.end()) | |||
3962 | return I->second; | |||
3963 | ||||
3964 | bool FoundAddRec = | |||
3965 | SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); | |||
3966 | HasRecMap.insert({S, FoundAddRec}); | |||
3967 | return FoundAddRec; | |||
3968 | } | |||
3969 | ||||
3970 | /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. | |||
3971 | /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an | |||
3972 | /// offset I, then return {S', I}, else return {\p S, nullptr}. | |||
3973 | static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { | |||
3974 | const auto *Add = dyn_cast<SCEVAddExpr>(S); | |||
3975 | if (!Add) | |||
3976 | return {S, nullptr}; | |||
3977 | ||||
3978 | if (Add->getNumOperands() != 2) | |||
3979 | return {S, nullptr}; | |||
3980 | ||||
3981 | auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); | |||
3982 | if (!ConstOp) | |||
3983 | return {S, nullptr}; | |||
3984 | ||||
3985 | return {Add->getOperand(1), ConstOp->getValue()}; | |||
3986 | } | |||
3987 | ||||
3988 | /// Return the ValueOffsetPair set for \p S. \p S can be represented | |||
3989 | /// by the value and offset from any ValueOffsetPair in the set. | |||
3990 | ScalarEvolution::ValueOffsetPairSetVector * | |||
3991 | ScalarEvolution::getSCEVValues(const SCEV *S) { | |||
3992 | ExprValueMapType::iterator SI = ExprValueMap.find_as(S); | |||
3993 | if (SI == ExprValueMap.end()) | |||
3994 | return nullptr; | |||
3995 | #ifndef NDEBUG1 | |||
3996 | if (VerifySCEVMap) { | |||
3997 | // Check there is no dangling Value in the set returned. | |||
3998 | for (const auto &VE : SI->second) | |||
3999 | assert(ValueExprMap.count(VE.first))((void)0); | |||
4000 | } | |||
4001 | #endif | |||
4002 | return &SI->second; | |||
4003 | } | |||
4004 | ||||
4005 | /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) | |||
4006 | /// cannot be used separately. eraseValueFromMap should be used to remove | |||
4007 | /// V from ValueExprMap and ExprValueMap at the same time. | |||
4008 | void ScalarEvolution::eraseValueFromMap(Value *V) { | |||
4009 | ValueExprMapType::iterator I = ValueExprMap.find_as(V); | |||
4010 | if (I != ValueExprMap.end()) { | |||
4011 | const SCEV *S = I->second; | |||
4012 | // Remove {V, 0} from the set of ExprValueMap[S] | |||
4013 | if (auto *SV = getSCEVValues(S)) | |||
4014 | SV->remove({V, nullptr}); | |||
4015 | ||||
4016 | // Remove {V, Offset} from the set of ExprValueMap[Stripped] | |||
4017 | const SCEV *Stripped; | |||
4018 | ConstantInt *Offset; | |||
4019 | std::tie(Stripped, Offset) = splitAddExpr(S); | |||
4020 | if (Offset != nullptr) { | |||
4021 | if (auto *SV = getSCEVValues(Stripped)) | |||
4022 | SV->remove({V, Offset}); | |||
4023 | } | |||
4024 | ValueExprMap.erase(V); | |||
4025 | } | |||
4026 | } | |||
4027 | ||||
4028 | /// Check whether value has nuw/nsw/exact set but SCEV does not. | |||
4029 | /// TODO: In reality it is better to check the poison recursively | |||
4030 | /// but this is better than nothing. | |||
4031 | static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { | |||
4032 | if (auto *I = dyn_cast<Instruction>(V)) { | |||
4033 | if (isa<OverflowingBinaryOperator>(I)) { | |||
4034 | if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { | |||
4035 | if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) | |||
4036 | return true; | |||
4037 | if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) | |||
4038 | return true; | |||
4039 | } | |||
4040 | } else if (isa<PossiblyExactOperator>(I) && I->isExact()) | |||
4041 | return true; | |||
4042 | } | |||
4043 | return false; | |||
4044 | } | |||
4045 | ||||
4046 | /// Return an existing SCEV if it exists, otherwise analyze the expression and | |||
4047 | /// create a new one. | |||
4048 | const SCEV *ScalarEvolution::getSCEV(Value *V) { | |||
4049 | assert(isSCEVable(V->getType()) && "Value is not SCEVable!")((void)0); | |||
4050 | ||||
4051 | const SCEV *S = getExistingSCEV(V); | |||
4052 | if (S == nullptr) { | |||
4053 | S = createSCEV(V); | |||
4054 | // During PHI resolution, it is possible to create two SCEVs for the same | |||
4055 | // V, so it is needed to double check whether V->S is inserted into | |||
4056 | // ValueExprMap before insert S->{V, 0} into ExprValueMap. | |||
4057 | std::pair<ValueExprMapType::iterator, bool> Pair = | |||
4058 | ValueExprMap.insert({SCEVCallbackVH(V, this), S}); | |||
4059 | if (Pair.second && !SCEVLostPoisonFlags(S, V)) { | |||
4060 | ExprValueMap[S].insert({V, nullptr}); | |||
4061 | ||||
4062 | // If S == Stripped + Offset, add Stripped -> {V, Offset} into | |||
4063 | // ExprValueMap. | |||
4064 | const SCEV *Stripped = S; | |||
4065 | ConstantInt *Offset = nullptr; | |||
4066 | std::tie(Stripped, Offset) = splitAddExpr(S); | |||
4067 | // If stripped is SCEVUnknown, don't bother to save | |||
4068 | // Stripped -> {V, offset}. It doesn't simplify and sometimes even | |||
4069 | // increase the complexity of the expansion code. | |||
4070 | // If V is GetElementPtrInst, don't save Stripped -> {V, offset} | |||
4071 | // because it may generate add/sub instead of GEP in SCEV expansion. | |||
4072 | if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && | |||
4073 | !isa<GetElementPtrInst>(V)) | |||
4074 | ExprValueMap[Stripped].insert({V, Offset}); | |||
4075 | } | |||
4076 | } | |||
4077 | return S; | |||
4078 | } | |||
4079 | ||||
4080 | const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { | |||
4081 | assert(isSCEVable(V->getType()) && "Value is not SCEVable!")((void)0); | |||
4082 | ||||
4083 | ValueExprMapType::iterator I = ValueExprMap.find_as(V); | |||
4084 | if (I != ValueExprMap.end()) { | |||
4085 | const SCEV *S = I->second; | |||
4086 | if (checkValidity(S)) | |||
4087 | return S; | |||
4088 | eraseValueFromMap(V); | |||
4089 | forgetMemoizedResults(S); | |||
4090 | } | |||
4091 | return nullptr; | |||
4092 | } | |||
4093 | ||||
4094 | /// Return a SCEV corresponding to -V = -1*V | |||
4095 | const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, | |||
4096 | SCEV::NoWrapFlags Flags) { | |||
4097 | if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) | |||
4098 | return getConstant( | |||
4099 | cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); | |||
4100 | ||||
4101 | Type *Ty = V->getType(); | |||
4102 | Ty = getEffectiveSCEVType(Ty); | |||
4103 | return getMulExpr(V, getMinusOne(Ty), Flags); | |||
4104 | } | |||
4105 | ||||
4106 | /// If Expr computes ~A, return A else return nullptr | |||
4107 | static const SCEV *MatchNotExpr(const SCEV *Expr) { | |||
4108 | const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); | |||
4109 | if (!Add || Add->getNumOperands() != 2 || | |||
4110 | !Add->getOperand(0)->isAllOnesValue()) | |||
4111 | return nullptr; | |||
4112 | ||||
4113 | const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); | |||
4114 | if (!AddRHS || AddRHS->getNumOperands() != 2 || | |||
4115 | !AddRHS->getOperand(0)->isAllOnesValue()) | |||
4116 | return nullptr; | |||
4117 | ||||
4118 | return AddRHS->getOperand(1); | |||
4119 | } | |||
4120 | ||||
4121 | /// Return a SCEV corresponding to ~V = -1-V | |||
4122 | const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { | |||
4123 | if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) | |||
4124 | return getConstant( | |||
4125 | cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); | |||
4126 | ||||
4127 | // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) | |||
4128 | if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { | |||
4129 | auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { | |||
4130 | SmallVector<const SCEV *, 2> MatchedOperands; | |||
4131 | for (const SCEV *Operand : MME->operands()) { | |||
4132 | const SCEV *Matched = MatchNotExpr(Operand); | |||
4133 | if (!Matched) | |||
4134 | return (const SCEV *)nullptr; | |||
4135 | MatchedOperands.push_back(Matched); | |||
4136 | } | |||
4137 | return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), | |||
4138 | MatchedOperands); | |||
4139 | }; | |||
4140 | if (const SCEV *Replaced = MatchMinMaxNegation(MME)) | |||
4141 | return Replaced; | |||
4142 | } | |||
4143 | ||||
4144 | Type *Ty = V->getType(); | |||
4145 | Ty = getEffectiveSCEVType(Ty); | |||
4146 | return getMinusSCEV(getMinusOne(Ty), V); | |||
4147 | } | |||
4148 | ||||
4149 | /// Compute an expression equivalent to S - getPointerBase(S). | |||
4150 | static const SCEV *removePointerBase(ScalarEvolution *SE, const SCEV *P) { | |||
4151 | assert(P->getType()->isPointerTy())((void)0); | |||
4152 | ||||
4153 | if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) { | |||
4154 | // The base of an AddRec is the first operand. | |||
4155 | SmallVector<const SCEV *> Ops{AddRec->operands()}; | |||
4156 | Ops[0] = removePointerBase(SE, Ops[0]); | |||
4157 | // Don't try to transfer nowrap flags for now. We could in some cases | |||
4158 | // (for example, if pointer operand of the AddRec is a SCEVUnknown). | |||
4159 | return SE->getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap); | |||
4160 | } | |||
4161 | if (auto *Add = dyn_cast<SCEVAddExpr>(P)) { | |||
4162 | // The base of an Add is the pointer operand. | |||
4163 | SmallVector<const SCEV *> Ops{Add->operands()}; | |||
4164 | const SCEV **PtrOp = nullptr; | |||
4165 | for (const SCEV *&AddOp : Ops) { | |||
4166 | if (AddOp->getType()->isPointerTy()) { | |||
4167 | // If we find an Add with multiple pointer operands, treat it as a | |||
4168 | // pointer base to be consistent with getPointerBase. Eventually | |||
4169 | // we should be able to assert this is impossible. | |||
4170 | if (PtrOp) | |||
4171 | return SE->getZero(P->getType()); | |||
4172 | PtrOp = &AddOp; | |||
4173 | } | |||
4174 | } | |||
4175 | *PtrOp = removePointerBase(SE, *PtrOp); | |||
4176 | // Don't try to transfer nowrap flags for now. We could in some cases | |||
4177 | // (for example, if the pointer operand of the Add is a SCEVUnknown). | |||
4178 | return SE->getAddExpr(Ops); | |||
4179 | } | |||
4180 | // Any other expression must be a pointer base. | |||
4181 | return SE->getZero(P->getType()); | |||
4182 | } | |||
4183 | ||||
4184 | const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, | |||
4185 | SCEV::NoWrapFlags Flags, | |||
4186 | unsigned Depth) { | |||
4187 | // Fast path: X - X --> 0. | |||
4188 | if (LHS == RHS) | |||
4189 | return getZero(LHS->getType()); | |||
4190 | ||||
4191 | // If we subtract two pointers with different pointer bases, bail. | |||
4192 | // Eventually, we're going to add an assertion to getMulExpr that we | |||
4193 | // can't multiply by a pointer. | |||
4194 | if (RHS->getType()->isPointerTy()) { | |||
4195 | if (!LHS->getType()->isPointerTy() || | |||
4196 | getPointerBase(LHS) != getPointerBase(RHS)) | |||
4197 | return getCouldNotCompute(); | |||
4198 | LHS = removePointerBase(this, LHS); | |||
4199 | RHS = removePointerBase(this, RHS); | |||
4200 | } | |||
4201 | ||||
4202 | // We represent LHS - RHS as LHS + (-1)*RHS. This transformation | |||
4203 | // makes it so that we cannot make much use of NUW. | |||
4204 | auto AddFlags = SCEV::FlagAnyWrap; | |||
4205 | const bool RHSIsNotMinSigned = | |||
4206 | !getSignedRangeMin(RHS).isMinSignedValue(); | |||
4207 | if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { | |||
4208 | // Let M be the minimum representable signed value. Then (-1)*RHS | |||
4209 | // signed-wraps if and only if RHS is M. That can happen even for | |||
4210 | // a NSW subtraction because e.g. (-1)*M signed-wraps even though | |||
4211 | // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + | |||
4212 | // (-1)*RHS, we need to prove that RHS != M. | |||
4213 | // | |||
4214 | // If LHS is non-negative and we know that LHS - RHS does not | |||
4215 | // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap | |||
4216 | // either by proving that RHS > M or that LHS >= 0. | |||
4217 | if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { | |||
4218 | AddFlags = SCEV::FlagNSW; | |||
4219 | } | |||
4220 | } | |||
4221 | ||||
4222 | // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - | |||
4223 | // RHS is NSW and LHS >= 0. | |||
4224 | // | |||
4225 | // The difficulty here is that the NSW flag may have been proven | |||
4226 | // relative to a loop that is to be found in a recurrence in LHS and | |||
4227 | // not in RHS. Applying NSW to (-1)*M may then let the NSW have a | |||
4228 | // larger scope than intended. | |||
4229 | auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; | |||
4230 | ||||
4231 | return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); | |||
4232 | } | |||
4233 | ||||
4234 | const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, | |||
4235 | unsigned Depth) { | |||
4236 | Type *SrcTy = V->getType(); | |||
4237 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | |||
4238 | "Cannot truncate or zero extend with non-integer arguments!")((void)0); | |||
4239 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | |||
4240 | return V; // No conversion | |||
4241 | if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) | |||
4242 | return getTruncateExpr(V, Ty, Depth); | |||
4243 | return getZeroExtendExpr(V, Ty, Depth); | |||
4244 | } | |||
4245 | ||||
4246 | const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, | |||
4247 | unsigned Depth) { | |||
4248 | Type *SrcTy = V->getType(); | |||
4249 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | |||
4250 | "Cannot truncate or zero extend with non-integer arguments!")((void)0); | |||
4251 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | |||
4252 | return V; // No conversion | |||
4253 | if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) | |||
4254 | return getTruncateExpr(V, Ty, Depth); | |||
4255 | return getSignExtendExpr(V, Ty, Depth); | |||
4256 | } | |||
4257 | ||||
4258 | const SCEV * | |||
4259 | ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { | |||
4260 | Type *SrcTy = V->getType(); | |||
4261 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | |||
4262 | "Cannot noop or zero extend with non-integer arguments!")((void)0); | |||
4263 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&((void)0) | |||
4264 | "getNoopOrZeroExtend cannot truncate!")((void)0); | |||
4265 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | |||
4266 | return V; // No conversion | |||
4267 | return getZeroExtendExpr(V, Ty); | |||
4268 | } | |||
4269 | ||||
4270 | const SCEV * | |||
4271 | ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { | |||
4272 | Type *SrcTy = V->getType(); | |||
4273 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | |||
4274 | "Cannot noop or sign extend with non-integer arguments!")((void)0); | |||
4275 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&((void)0) | |||
4276 | "getNoopOrSignExtend cannot truncate!")((void)0); | |||
4277 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | |||
4278 | return V; // No conversion | |||
4279 | return getSignExtendExpr(V, Ty); | |||
4280 | } | |||
4281 | ||||
4282 | const SCEV * | |||
4283 | ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { | |||
4284 | Type *SrcTy = V->getType(); | |||
4285 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | |||
4286 | "Cannot noop or any extend with non-integer arguments!")((void)0); | |||
4287 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&((void)0) | |||
4288 | "getNoopOrAnyExtend cannot truncate!")((void)0); | |||
4289 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | |||
4290 | return V; // No conversion | |||
4291 | return getAnyExtendExpr(V, Ty); | |||
4292 | } | |||
4293 | ||||
4294 | const SCEV * | |||
4295 | ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { | |||
4296 | Type *SrcTy = V->getType(); | |||
4297 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&((void)0) | |||
4298 | "Cannot truncate or noop with non-integer arguments!")((void)0); | |||
4299 | assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&((void)0) | |||
4300 | "getTruncateOrNoop cannot extend!")((void)0); | |||
4301 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) | |||
4302 | return V; // No conversion | |||
4303 | return getTruncateExpr(V, Ty); | |||
4304 | } | |||
4305 | ||||
4306 | const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, | |||
4307 | const SCEV *RHS) { | |||
4308 | const SCEV *PromotedLHS = LHS; | |||
4309 | const SCEV *PromotedRHS = RHS; | |||
4310 | ||||
4311 | if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) | |||
4312 | PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); | |||
4313 | else | |||
4314 | PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); | |||
4315 | ||||
4316 | return getUMaxExpr(PromotedLHS, PromotedRHS); | |||
4317 | } | |||
4318 | ||||
4319 | const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, | |||
4320 | const SCEV *RHS) { | |||
4321 | SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; | |||
4322 | return getUMinFromMismatchedTypes(Ops); | |||
4323 | } | |||
4324 | ||||
4325 | const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( | |||
4326 | SmallVectorImpl<const SCEV *> &Ops) { | |||
4327 | assert(!Ops.empty() && "At least one operand must be!")((void)0); | |||
4328 | // Trivial case. | |||
4329 | if (Ops.size() == 1) | |||
4330 | return Ops[0]; | |||
4331 | ||||
4332 | // Find the max type first. | |||
4333 | Type *MaxType = nullptr; | |||
4334 | for (auto *S : Ops) | |||
4335 | if (MaxType) | |||
4336 | MaxType = getWiderType(MaxType, S->getType()); | |||
4337 | else | |||
4338 | MaxType = S->getType(); | |||
4339 | assert(MaxType && "Failed to find maximum type!")((void)0); | |||
4340 | ||||
4341 | // Extend all ops to max type. | |||
4342 | SmallVector<const SCEV *, 2> PromotedOps; | |||
4343 | for (auto *S : Ops) | |||
4344 | PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); | |||
4345 | ||||
4346 | // Generate umin. | |||
4347 | return getUMinExpr(PromotedOps); | |||
4348 | } | |||
4349 | ||||
4350 | const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { | |||
4351 | // A pointer operand may evaluate to a nonpointer expression, such as null. | |||
4352 | if (!V->getType()->isPointerTy()) | |||
4353 | return V; | |||
4354 | ||||
4355 | while (true) { | |||
4356 | if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { | |||
4357 | V = AddRec->getStart(); | |||
4358 | } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) { | |||
4359 | const SCEV *PtrOp = nullptr; | |||
4360 | for (const SCEV *AddOp : Add->operands()) { | |||
4361 | if (AddOp->getType()->isPointerTy()) { | |||
4362 | // Cannot find the base of an expression with multiple pointer ops. | |||
4363 | if (PtrOp) | |||
4364 | return V; | |||
4365 | PtrOp = AddOp; | |||
4366 | } | |||
4367 | } | |||
4368 | if (!PtrOp) // All operands were non-pointer. | |||
4369 | return V; | |||
4370 | V = PtrOp; | |||
4371 | } else // Not something we can look further into. | |||
4372 | return V; | |||
4373 | } | |||
4374 | } | |||
4375 | ||||
4376 | /// Push users of the given Instruction onto the given Worklist. | |||
4377 | static void | |||
4378 | PushDefUseChildren(Instruction *I, | |||
4379 | SmallVectorImpl<Instruction *> &Worklist) { | |||
4380 | // Push the def-use children onto the Worklist stack. | |||
4381 | for (User *U : I->users()) | |||
4382 | Worklist.push_back(cast<Instruction>(U)); | |||
4383 | } | |||
4384 | ||||
4385 | void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { | |||
4386 | SmallVector<Instruction *, 16> Worklist; | |||
4387 | PushDefUseChildren(PN, Worklist); | |||
4388 | ||||
4389 | SmallPtrSet<Instruction *, 8> Visited; | |||
4390 | Visited.insert(PN); | |||
4391 | while (!Worklist.empty()) { | |||
4392 | Instruction *I = Worklist.pop_back_val(); | |||
4393 | if (!Visited.insert(I).second) | |||
4394 | continue; | |||
4395 | ||||
4396 | auto It = ValueExprMap.find_as(static_cast<Value *>(I)); | |||
4397 | if (It != ValueExprMap.end()) { | |||
4398 | const SCEV *Old = It->second; | |||
4399 | ||||
4400 | // Short-circuit the def-use traversal if the symbolic name | |||
4401 | // ceases to appear in expressions. | |||
4402 | if (Old != SymName && !hasOperand(Old, SymName)) | |||
4403 | continue; | |||
4404 | ||||
4405 | // SCEVUnknown for a PHI either means that it has an unrecognized | |||
4406 | // structure, it's a PHI that's in the progress of being computed | |||
4407 | // by createNodeForPHI, or it's a single-value PHI. In the first case, | |||
4408 | // additional loop trip count information isn't going to change anything. | |||
4409 | // In the second case, createNodeForPHI will perform the necessary | |||
4410 | // updates on its own when it gets to that point. In the third, we do | |||
4411 | // want to forget the SCEVUnknown. | |||
4412 | if (!isa<PHINode>(I) || | |||
4413 | !isa<SCEVUnknown>(Old) || | |||
4414 | (I != PN && Old == SymName)) { | |||
4415 | eraseValueFromMap(It->first); | |||
4416 | forgetMemoizedResults(Old); | |||
4417 | } | |||
4418 | } | |||
4419 | ||||
4420 | PushDefUseChildren(I, Worklist); | |||
4421 | } | |||
4422 | } | |||
4423 | ||||
4424 | namespace { | |||
4425 | ||||
4426 | /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start | |||
4427 | /// expression in case its Loop is L. If it is not L then | |||
4428 | /// if IgnoreOtherLoops is true then use AddRec itself | |||
4429 | /// otherwise rewrite cannot be done. | |||
4430 | /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. | |||
4431 | class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { | |||
4432 | public: | |||
4433 | static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, | |||
4434 | bool IgnoreOtherLoops = true) { | |||
4435 | SCEVInitRewriter Rewriter(L, SE); | |||
4436 | const SCEV *Result = Rewriter.visit(S); | |||
4437 | if (Rewriter.hasSeenLoopVariantSCEVUnknown()) | |||
4438 | return SE.getCouldNotCompute(); | |||
4439 | return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops | |||
4440 | ? SE.getCouldNotCompute() | |||
4441 | : Result; | |||
4442 | } | |||
4443 | ||||
4444 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | |||
4445 | if (!SE.isLoopInvariant(Expr, L)) | |||
4446 | SeenLoopVariantSCEVUnknown = true; | |||
4447 | return Expr; | |||
4448 | } | |||
4449 | ||||
4450 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { | |||
4451 | // Only re-write AddRecExprs for this loop. | |||
4452 | if (Expr->getLoop() == L) | |||
4453 | return Expr->getStart(); | |||
4454 | SeenOtherLoops = true; | |||
4455 | return Expr; | |||
4456 | } | |||
4457 | ||||
4458 | bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } | |||
4459 | ||||
4460 | bool hasSeenOtherLoops() { return SeenOtherLoops; } | |||
4461 | ||||
4462 | private: | |||
4463 | explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) | |||
4464 | : SCEVRewriteVisitor(SE), L(L) {} | |||
4465 | ||||
4466 | const Loop *L; | |||
4467 | bool SeenLoopVariantSCEVUnknown = false; | |||
4468 | bool SeenOtherLoops = false; | |||
4469 | }; | |||
4470 | ||||
4471 | /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post | |||
4472 | /// increment expression in case its Loop is L. If it is not L then | |||
4473 | /// use AddRec itself. | |||
4474 | /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. | |||
4475 | class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { | |||
4476 | public: | |||
4477 | static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { | |||
4478 | SCEVPostIncRewriter Rewriter(L, SE); | |||
4479 | const SCEV *Result = Rewriter.visit(S); | |||
4480 | return Rewriter.hasSeenLoopVariantSCEVUnknown() | |||
4481 | ? SE.getCouldNotCompute() | |||
4482 | : Result; | |||
4483 | } | |||
4484 | ||||
4485 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | |||
4486 | if (!SE.isLoopInvariant(Expr, L)) | |||
4487 | SeenLoopVariantSCEVUnknown = true; | |||
4488 | return Expr; | |||
4489 | } | |||
4490 | ||||
4491 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { | |||
4492 | // Only re-write AddRecExprs for this loop. | |||
4493 | if (Expr->getLoop() == L) | |||
4494 | return Expr->getPostIncExpr(SE); | |||
4495 | SeenOtherLoops = true; | |||
4496 | return Expr; | |||
4497 | } | |||
4498 | ||||
4499 | bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } | |||
4500 | ||||
4501 | bool hasSeenOtherLoops() { return SeenOtherLoops; } | |||
4502 | ||||
4503 | private: | |||
4504 | explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) | |||
4505 | : SCEVRewriteVisitor(SE), L(L) {} | |||
4506 | ||||
4507 | const Loop *L; | |||
4508 | bool SeenLoopVariantSCEVUnknown = false; | |||
4509 | bool SeenOtherLoops = false; | |||
4510 | }; | |||
4511 | ||||
4512 | /// This class evaluates the compare condition by matching it against the | |||
4513 | /// condition of loop latch. If there is a match we assume a true value | |||
4514 | /// for the condition while building SCEV nodes. | |||
4515 | class SCEVBackedgeConditionFolder | |||
4516 | : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { | |||
4517 | public: | |||
4518 | static const SCEV *rewrite(const SCEV *S, const Loop *L, | |||
4519 | ScalarEvolution &SE) { | |||
4520 | bool IsPosBECond = false; | |||
4521 | Value *BECond = nullptr; | |||
4522 | if (BasicBlock *Latch = L->getLoopLatch()) { | |||
4523 | BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); | |||
4524 | if (BI && BI->isConditional()) { | |||
4525 | assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&((void)0) | |||
4526 | "Both outgoing branches should not target same header!")((void)0); | |||
4527 | BECond = BI->getCondition(); | |||
4528 | IsPosBECond = BI->getSuccessor(0) == L->getHeader(); | |||
4529 | } else { | |||
4530 | return S; | |||
4531 | } | |||
4532 | } | |||
4533 | SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); | |||
4534 | return Rewriter.visit(S); | |||
4535 | } | |||
4536 | ||||
4537 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | |||
4538 | const SCEV *Result = Expr; | |||
4539 | bool InvariantF = SE.isLoopInvariant(Expr, L); | |||
4540 | ||||
4541 | if (!InvariantF) { | |||
4542 | Instruction *I = cast<Instruction>(Expr->getValue()); | |||
4543 | switch (I->getOpcode()) { | |||
4544 | case Instruction::Select: { | |||
4545 | SelectInst *SI = cast<SelectInst>(I); | |||
4546 | Optional<const SCEV *> Res = | |||
4547 | compareWithBackedgeCondition(SI->getCondition()); | |||
4548 | if (Res.hasValue()) { | |||
4549 | bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); | |||
4550 | Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); | |||
4551 | } | |||
4552 | break; | |||
4553 | } | |||
4554 | default: { | |||
4555 | Optional<const SCEV *> Res = compareWithBackedgeCondition(I); | |||
4556 | if (Res.hasValue()) | |||
4557 | Result = Res.getValue(); | |||
4558 | break; | |||
4559 | } | |||
4560 | } | |||
4561 | } | |||
4562 | return Result; | |||
4563 | } | |||
4564 | ||||
4565 | private: | |||
4566 | explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, | |||
4567 | bool IsPosBECond, ScalarEvolution &SE) | |||
4568 | : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), | |||
4569 | IsPositiveBECond(IsPosBECond) {} | |||
4570 | ||||
4571 | Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); | |||
4572 | ||||
4573 | const Loop *L; | |||
4574 | /// Loop back condition. | |||
4575 | Value *BackedgeCond = nullptr; | |||
4576 | /// Set to true if loop back is on positive branch condition. | |||
4577 | bool IsPositiveBECond; | |||
4578 | }; | |||
4579 | ||||
4580 | Optional<const SCEV *> | |||
4581 | SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { | |||
4582 | ||||
4583 | // If value matches the backedge condition for loop latch, | |||
4584 | // then return a constant evolution node based on loopback | |||
4585 | // branch taken. | |||
4586 | if (BackedgeCond == IC) | |||
4587 | return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) | |||
4588 | : SE.getZero(Type::getInt1Ty(SE.getContext())); | |||
4589 | return None; | |||
4590 | } | |||
4591 | ||||
4592 | class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { | |||
4593 | public: | |||
4594 | static const SCEV *rewrite(const SCEV *S, const Loop *L, | |||
4595 | ScalarEvolution &SE) { | |||
4596 | SCEVShiftRewriter Rewriter(L, SE); | |||
4597 | const SCEV *Result = Rewriter.visit(S); | |||
4598 | return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); | |||
4599 | } | |||
4600 | ||||
4601 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { | |||
4602 | // Only allow AddRecExprs for this loop. | |||
4603 | if (!SE.isLoopInvariant(Expr, L)) | |||
4604 | Valid = false; | |||
4605 | return Expr; | |||
4606 | } | |||
4607 | ||||
4608 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { | |||
4609 | if (Expr->getLoop() == L && Expr->isAffine()) | |||
4610 | return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); | |||
4611 | Valid = false; | |||
4612 | return Expr; | |||
4613 | } | |||
4614 | ||||
4615 | bool isValid() { return Valid; } | |||
4616 | ||||
4617 | private: | |||
4618 | explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) | |||
4619 | : SCEVRewriteVisitor(SE), L(L) {} | |||
4620 | ||||
4621 | const Loop *L; | |||
4622 | bool Valid = true; | |||
4623 | }; | |||
4624 | ||||
4625 | } // end anonymous namespace | |||
4626 | ||||
4627 | SCEV::NoWrapFlags | |||
4628 | ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { | |||
4629 | if (!AR->isAffine()) | |||
4630 | return SCEV::FlagAnyWrap; | |||
4631 | ||||
4632 | using OBO = OverflowingBinaryOperator; | |||
4633 | ||||
4634 | SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; | |||
4635 | ||||
4636 | if (!AR->hasNoSignedWrap()) { | |||
4637 | ConstantRange AddRecRange = getSignedRange(AR); | |||
4638 | ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); | |||
4639 | ||||
4640 | auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( | |||
4641 | Instruction::Add, IncRange, OBO::NoSignedWrap); | |||
4642 | if (NSWRegion.contains(AddRecRange)) | |||
4643 | Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); | |||
4644 | } | |||
4645 | ||||
4646 | if (!AR->hasNoUnsignedWrap()) { | |||
4647 | ConstantRange AddRecRange = getUnsignedRange(AR); | |||
4648 | ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); | |||
4649 | ||||
4650 | auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( | |||
4651 | Instruction::Add, IncRange, OBO::NoUnsignedWrap); | |||
4652 | if (NUWRegion.contains(AddRecRange)) | |||
4653 | Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); | |||
4654 | } | |||
4655 | ||||
4656 | return Result; | |||
4657 | } | |||
4658 | ||||
4659 | SCEV::NoWrapFlags | |||
4660 | ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { | |||
4661 | SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); | |||
4662 | ||||
4663 | if (AR->hasNoSignedWrap()) | |||
4664 | return Result; | |||
4665 | ||||
4666 | if (!AR->isAffine()) | |||
4667 | return Result; | |||
4668 | ||||
4669 | const SCEV *Step = AR->getStepRecurrence(*this); | |||
4670 | const Loop *L = AR->getLoop(); | |||
4671 | ||||
4672 | // Check whether the backedge-taken count is SCEVCouldNotCompute. | |||
4673 | // Note that this serves two purposes: It filters out loops that are | |||
4674 | // simply not analyzable, and it covers the case where this code is | |||
4675 | // being called from within backedge-taken count analysis, such that | |||
4676 | // attempting to ask for the backedge-taken count would likely result | |||
4677 | // in infinite recursion. In the later case, the analysis code will | |||
4678 | // cope with a conservative value, and it will take care to purge | |||
4679 | // that value once it has finished. | |||
4680 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); | |||
4681 | ||||
4682 | // Normally, in the cases we can prove no-overflow via a | |||
4683 | // backedge guarding condition, we can also compute a backedge | |||
4684 | // taken count for the loop. The exceptions are assumptions and | |||
4685 | // guards present in the loop -- SCEV is not great at exploiting | |||
4686 | // these to compute max backedge taken counts, but can still use | |||
4687 | // these to prove lack of overflow. Use this fact to avoid | |||
4688 | // doing extra work that may not pay off. | |||
4689 | ||||
4690 | if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && | |||
4691 | AC.assumptions().empty()) | |||
4692 | return Result; | |||
4693 | ||||
4694 | // If the backedge is guarded by a comparison with the pre-inc value the | |||
4695 | // addrec is safe. Also, if the entry is guarded by a comparison with the | |||
4696 | // start value and the backedge is guarded by a comparison with the post-inc | |||
4697 | // value, the addrec is safe. | |||
4698 | ICmpInst::Predicate Pred; | |||
4699 | const SCEV *OverflowLimit = | |||
4700 | getSignedOverflowLimitForStep(Step, &Pred, this); | |||
4701 | if (OverflowLimit && | |||
4702 | (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || | |||
4703 | isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { | |||
4704 | Result = setFlags(Result, SCEV::FlagNSW); | |||
4705 | } | |||
4706 | return Result; | |||
4707 | } | |||
4708 | SCEV::NoWrapFlags | |||
4709 | ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { | |||
4710 | SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); | |||
4711 | ||||
4712 | if (AR->hasNoUnsignedWrap()) | |||
4713 | return Result; | |||
4714 | ||||
4715 | if (!AR->isAffine()) | |||
4716 | return Result; | |||
4717 | ||||
4718 | const SCEV *Step = AR->getStepRecurrence(*this); | |||
4719 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); | |||
4720 | const Loop *L = AR->getLoop(); | |||
4721 | ||||
4722 | // Check whether the backedge-taken count is SCEVCouldNotCompute. | |||
4723 | // Note that this serves two purposes: It filters out loops that are | |||
4724 | // simply not analyzable, and it covers the case where this code is | |||
4725 | // being called from within backedge-taken count analysis, such that | |||
4726 | // attempting to ask for the backedge-taken count would likely result | |||
4727 | // in infinite recursion. In the later case, the analysis code will | |||
4728 | // cope with a conservative value, and it will take care to purge | |||
4729 | // that value once it has finished. | |||
4730 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); | |||
4731 | ||||
4732 | // Normally, in the cases we can prove no-overflow via a | |||
4733 | // backedge guarding condition, we can also compute a backedge | |||
4734 | // taken count for the loop. The exceptions are assumptions and | |||
4735 | // guards present in the loop -- SCEV is not great at exploiting | |||
4736 | // these to compute max backedge taken counts, but can still use | |||
4737 | // these to prove lack of overflow. Use this fact to avoid | |||
4738 | // doing extra work that may not pay off. | |||
4739 | ||||
4740 | if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && | |||
4741 | AC.assumptions().empty()) | |||
4742 | return Result; | |||
4743 | ||||
4744 | // If the backedge is guarded by a comparison with the pre-inc value the | |||
4745 | // addrec is safe. Also, if the entry is guarded by a comparison with the | |||
4746 | // start value and the backedge is guarded by a comparison with the post-inc | |||
4747 | // value, the addrec is safe. | |||
4748 | if (isKnownPositive(Step)) { | |||
4749 | const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - | |||
4750 | getUnsignedRangeMax(Step)); | |||
4751 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || | |||
4752 | isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { | |||
4753 | Result = setFlags(Result, SCEV::FlagNUW); | |||
4754 | } | |||
4755 | } | |||
4756 | ||||
4757 | return Result; | |||
4758 | } | |||
4759 | ||||
4760 | namespace { | |||
4761 | ||||
4762 | /// Represents an abstract binary operation. This may exist as a | |||
4763 | /// normal instruction or constant expression, or may have been | |||
4764 | /// derived from an expression tree. | |||
4765 | struct BinaryOp { | |||
4766 | unsigned Opcode; | |||
4767 | Value *LHS; | |||
4768 | Value *RHS; | |||
4769 | bool IsNSW = false; | |||
4770 | bool IsNUW = false; | |||
4771 | ||||
4772 | /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or | |||
4773 | /// constant expression. | |||
4774 | Operator *Op = nullptr; | |||
4775 | ||||
4776 | explicit BinaryOp(Operator *Op) | |||
4777 | : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), | |||
4778 | Op(Op) { | |||
4779 | if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { | |||
4780 | IsNSW = OBO->hasNoSignedWrap(); | |||
4781 | IsNUW = OBO->hasNoUnsignedWrap(); | |||
4782 | } | |||
4783 | } | |||
4784 | ||||
4785 | explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, | |||
4786 | bool IsNUW = false) | |||
4787 | : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} | |||
4788 | }; | |||
4789 | ||||
4790 | } // end anonymous namespace | |||
4791 | ||||
4792 | /// Try to map \p V into a BinaryOp, and return \c None on failure. | |||
4793 | static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { | |||
4794 | auto *Op = dyn_cast<Operator>(V); | |||
4795 | if (!Op) | |||
4796 | return None; | |||
4797 | ||||
4798 | // Implementation detail: all the cleverness here should happen without | |||
4799 | // creating new SCEV expressions -- our caller knowns tricks to avoid creating | |||
4800 | // SCEV expressions when possible, and we should not break that. | |||
4801 | ||||
4802 | switch (Op->getOpcode()) { | |||
4803 | case Instruction::Add: | |||
4804 | case Instruction::Sub: | |||
4805 | case Instruction::Mul: | |||
4806 | case Instruction::UDiv: | |||
4807 | case Instruction::URem: | |||
4808 | case Instruction::And: | |||
4809 | case Instruction::Or: | |||
4810 | case Instruction::AShr: | |||
4811 | case Instruction::Shl: | |||
4812 | return BinaryOp(Op); | |||
4813 | ||||
4814 | case Instruction::Xor: | |||
4815 | if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) | |||
4816 | // If the RHS of the xor is a signmask, then this is just an add. | |||
4817 | // Instcombine turns add of signmask into xor as a strength reduction step. | |||
4818 | if (RHSC->getValue().isSignMask()) | |||
4819 | return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); | |||
4820 | return BinaryOp(Op); | |||
4821 | ||||
4822 | case Instruction::LShr: | |||
4823 | // Turn logical shift right of a constant into a unsigned divide. | |||
4824 | if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { | |||
4825 | uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); | |||
4826 | ||||
4827 | // If the shift count is not less than the bitwidth, the result of | |||
4828 | // the shift is undefined. Don't try to analyze it, because the | |||
4829 | // resolution chosen here may differ from the resolution chosen in | |||
4830 | // other parts of the compiler. | |||
4831 | if (SA->getValue().ult(BitWidth)) { | |||
4832 | Constant *X = | |||
4833 | ConstantInt::get(SA->getContext(), | |||
4834 | APInt::getOneBitSet(BitWidth, SA->getZExtValue())); | |||
4835 | return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); | |||
4836 | } | |||
4837 | } | |||
4838 | return BinaryOp(Op); | |||
4839 | ||||
4840 | case Instruction::ExtractValue: { | |||
4841 | auto *EVI = cast<ExtractValueInst>(Op); | |||
4842 | if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) | |||
4843 | break; | |||
4844 | ||||
4845 | auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); | |||
4846 | if (!WO) | |||
4847 | break; | |||
4848 | ||||
4849 | Instruction::BinaryOps BinOp = WO->getBinaryOp(); | |||
4850 | bool Signed = WO->isSigned(); | |||
4851 | // TODO: Should add nuw/nsw flags for mul as well. | |||
4852 | if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) | |||
4853 | return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); | |||
4854 | ||||
4855 | // Now that we know that all uses of the arithmetic-result component of | |||
4856 | // CI are guarded by the overflow check, we can go ahead and pretend | |||
4857 | // that the arithmetic is non-overflowing. | |||
4858 | return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), | |||
4859 | /* IsNSW = */ Signed, /* IsNUW = */ !Signed); | |||
4860 | } | |||
4861 | ||||
4862 | default: | |||
4863 | break; | |||
4864 | } | |||
4865 | ||||
4866 | // Recognise intrinsic loop.decrement.reg, and as this has exactly the same | |||
4867 | // semantics as a Sub, return a binary sub expression. | |||
4868 | if (auto *II = dyn_cast<IntrinsicInst>(V)) | |||
4869 | if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) | |||
4870 | return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); | |||
4871 | ||||
4872 | return None; | |||
4873 | } | |||
4874 | ||||
4875 | /// Helper function to createAddRecFromPHIWithCasts. We have a phi | |||
4876 | /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via | |||
4877 | /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the | |||
4878 | /// way. This function checks if \p Op, an operand of this SCEVAddExpr, | |||
4879 | /// follows one of the following patterns: | |||
4880 | /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) | |||
4881 | /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) | |||
4882 | /// If the SCEV expression of \p Op conforms with one of the expected patterns | |||
4883 | /// we return the type of the truncation operation, and indicate whether the | |||
4884 | /// truncated type should be treated as signed/unsigned by setting | |||
4885 | /// \p Signed to true/false, respectively. | |||
4886 | static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, | |||
4887 | bool &Signed, ScalarEvolution &SE) { | |||
4888 | // The case where Op == SymbolicPHI (that is, with no type conversions on | |||
4889 | // the way) is handled by the regular add recurrence creating logic and | |||
4890 | // would have already been triggered in createAddRecForPHI. Reaching it here | |||
4891 | // means that createAddRecFromPHI had failed for this PHI before (e.g., | |||
4892 | // because one of the other operands of the SCEVAddExpr updating this PHI is | |||
4893 | // not invariant). | |||
4894 | // | |||
4895 | // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in | |||
4896 | // this case predicates that allow us to prove that Op == SymbolicPHI will | |||
4897 | // be added. | |||
4898 | if (Op == SymbolicPHI) | |||
4899 | return nullptr; | |||
4900 | ||||
4901 | unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); | |||
4902 | unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); | |||
4903 | if (SourceBits != NewBits) | |||
4904 | return nullptr; | |||
4905 | ||||
4906 | const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); | |||
4907 | const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); | |||
4908 | if (!SExt && !ZExt) | |||
4909 | return nullptr; | |||
4910 | const SCEVTruncateExpr *Trunc = | |||
4911 | SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) | |||
4912 | : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); | |||
4913 | if (!Trunc) | |||
4914 | return nullptr; | |||
4915 | const SCEV *X = Trunc->getOperand(); | |||
4916 | if (X != SymbolicPHI) | |||
4917 | return nullptr; | |||
4918 | Signed = SExt != nullptr; | |||
4919 | return Trunc->getType(); | |||
4920 | } | |||
4921 | ||||
4922 | static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { | |||
4923 | if (!PN->getType()->isIntegerTy()) | |||
4924 | return nullptr; | |||
4925 | const Loop *L = LI.getLoopFor(PN->getParent()); | |||
4926 | if (!L || L->getHeader() != PN->getParent()) | |||
4927 | return nullptr; | |||
4928 | return L; | |||
4929 | } | |||
4930 | ||||
4931 | // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the | |||
4932 | // computation that updates the phi follows the following pattern: | |||
4933 | // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum | |||
4934 | // which correspond to a phi->trunc->sext/zext->add->phi update chain. | |||
4935 | // If so, try to see if it can be rewritten as an AddRecExpr under some | |||
4936 | // Predicates. If successful, return them as a pair. Also cache the results | |||
4937 | // of the analysis. | |||
4938 | // | |||
4939 | // Example usage scenario: | |||
4940 | // Say the Rewriter is called for the following SCEV: | |||
4941 | // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) | |||
4942 | // where: | |||
4943 | // %X = phi i64 (%Start, %BEValue) | |||
4944 | // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), | |||
4945 | // and call this function with %SymbolicPHI = %X. | |||
4946 | // | |||
4947 | // The analysis will find that the value coming around the backedge has | |||
4948 | // the following SCEV: | |||
4949 | // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) | |||
4950 | // Upon concluding that this matches the desired pattern, the function | |||
4951 | // will return the pair {NewAddRec, SmallPredsVec} where: | |||
4952 | // NewAddRec = {%Start,+,%Step} | |||
4953 | // SmallPredsVec = {P1, P2, P3} as follows: | |||
4954 | // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> | |||
4955 | // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) | |||
4956 | // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) | |||
4957 | // The returned pair means that SymbolicPHI can be rewritten into NewAddRec | |||
4958 | // under the predicates {P1,P2,P3}. | |||
4959 | // This predicated rewrite will be cached in PredicatedSCEVRewrites: | |||
4960 | // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} | |||
4961 | // | |||
4962 | // TODO's: | |||
4963 | // | |||
4964 | // 1) Extend the Induction descriptor to also support inductions that involve | |||
4965 | // casts: When needed (namely, when we are called in the context of the | |||
4966 | // vectorizer induction analysis), a Set of cast instructions will be | |||
4967 | // populated by this method, and provided back to isInductionPHI. This is | |||
4968 | // needed to allow the vectorizer to properly record them to be ignored by | |||
4969 | // the cost model and to avoid vectorizing them (otherwise these casts, | |||
4970 | // which are redundant under the runtime overflow checks, will be | |||
4971 | // vectorized, which can be costly). | |||
4972 | // | |||
4973 | // 2) Support additional induction/PHISCEV patterns: We also want to support | |||
4974 | // inductions where the sext-trunc / zext-trunc operations (partly) occur | |||
4975 | // after the induction update operation (the induction increment): | |||
4976 | // | |||
4977 | // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) | |||
4978 | // which correspond to a phi->add->trunc->sext/zext->phi update chain. | |||
4979 | // | |||
4980 | // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) | |||
4981 | // which correspond to a phi->trunc->add->sext/zext->phi update chain. | |||
4982 | // | |||
4983 | // 3) Outline common code with createAddRecFromPHI to avoid duplication. | |||
4984 | Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> | |||
4985 | ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { | |||
4986 | SmallVector<const SCEVPredicate *, 3> Predicates; | |||
4987 | ||||
4988 | // *** Part1: Analyze if we have a phi-with-cast pattern for which we can | |||
4989 | // return an AddRec expression under some predicate. | |||
4990 | ||||
4991 | auto *PN = cast<PHINode>(SymbolicPHI->getValue()); | |||
4992 | const Loop *L = isIntegerLoopHeaderPHI(PN, LI); | |||
4993 | assert(L && "Expecting an integer loop header phi")((void)0); | |||
4994 | ||||
4995 | // The loop may have multiple entrances or multiple exits; we can analyze | |||
4996 | // this phi as an addrec if it has a unique entry value and a unique | |||
4997 | // backedge value. | |||
4998 | Value *BEValueV = nullptr, *StartValueV = nullptr; | |||
4999 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
5000 | Value *V = PN->getIncomingValue(i); | |||
5001 | if (L->contains(PN->getIncomingBlock(i))) { | |||
5002 | if (!BEValueV) { | |||
5003 | BEValueV = V; | |||
5004 | } else if (BEValueV != V) { | |||
5005 | BEValueV = nullptr; | |||
5006 | break; | |||
5007 | } | |||
5008 | } else if (!StartValueV) { | |||
5009 | StartValueV = V; | |||
5010 | } else if (StartValueV != V) { | |||
5011 | StartValueV = nullptr; | |||
5012 | break; | |||
5013 | } | |||
5014 | } | |||
5015 | if (!BEValueV || !StartValueV) | |||
5016 | return None; | |||
5017 | ||||
5018 | const SCEV *BEValue = getSCEV(BEValueV); | |||
5019 | ||||
5020 | // If the value coming around the backedge is an add with the symbolic | |||
5021 | // value we just inserted, possibly with casts that we can ignore under | |||
5022 | // an appropriate runtime guard, then we found a simple induction variable! | |||
5023 | const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); | |||
5024 | if (!Add) | |||
5025 | return None; | |||
5026 | ||||
5027 | // If there is a single occurrence of the symbolic value, possibly | |||
5028 | // casted, replace it with a recurrence. | |||
5029 | unsigned FoundIndex = Add->getNumOperands(); | |||
5030 | Type *TruncTy = nullptr; | |||
5031 | bool Signed; | |||
5032 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) | |||
5033 | if ((TruncTy = | |||
5034 | isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) | |||
5035 | if (FoundIndex == e) { | |||
5036 | FoundIndex = i; | |||
5037 | break; | |||
5038 | } | |||
5039 | ||||
5040 | if (FoundIndex == Add->getNumOperands()) | |||
5041 | return None; | |||
5042 | ||||
5043 | // Create an add with everything but the specified operand. | |||
5044 | SmallVector<const SCEV *, 8> Ops; | |||
5045 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) | |||
5046 | if (i != FoundIndex) | |||
5047 | Ops.push_back(Add->getOperand(i)); | |||
5048 | const SCEV *Accum = getAddExpr(Ops); | |||
5049 | ||||
5050 | // The runtime checks will not be valid if the step amount is | |||
5051 | // varying inside the loop. | |||
5052 | if (!isLoopInvariant(Accum, L)) | |||
5053 | return None; | |||
5054 | ||||
5055 | // *** Part2: Create the predicates | |||
5056 | ||||
5057 | // Analysis was successful: we have a phi-with-cast pattern for which we | |||
5058 | // can return an AddRec expression under the following predicates: | |||
5059 | // | |||
5060 | // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) | |||
5061 | // fits within the truncated type (does not overflow) for i = 0 to n-1. | |||
5062 | // P2: An Equal predicate that guarantees that | |||
5063 | // Start = (Ext ix (Trunc iy (Start) to ix) to iy) | |||
5064 | // P3: An Equal predicate that guarantees that | |||
5065 | // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) | |||
5066 | // | |||
5067 | // As we next prove, the above predicates guarantee that: | |||
5068 | // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) | |||
5069 | // | |||
5070 | // | |||
5071 | // More formally, we want to prove that: | |||
5072 | // Expr(i+1) = Start + (i+1) * Accum | |||
5073 | // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum | |||
5074 | // | |||
5075 | // Given that: | |||
5076 | // 1) Expr(0) = Start | |||
5077 | // 2) Expr(1) = Start + Accum | |||
5078 | // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 | |||
5079 | // 3) Induction hypothesis (step i): | |||
5080 | // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum | |||
5081 | // | |||
5082 | // Proof: | |||
5083 | // Expr(i+1) = | |||
5084 | // = Start + (i+1)*Accum | |||
5085 | // = (Start + i*Accum) + Accum | |||
5086 | // = Expr(i) + Accum | |||
5087 | // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum | |||
5088 | // :: from step i | |||
5089 | // | |||
5090 | // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum | |||
5091 | // | |||
5092 | // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) | |||
5093 | // + (Ext ix (Trunc iy (Accum) to ix) to iy) | |||
5094 | // + Accum :: from P3 | |||
5095 | // | |||
5096 | // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) | |||
5097 | // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) | |||
5098 | // | |||
5099 | // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum | |||
5100 | // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum | |||
5101 | // | |||
5102 | // By induction, the same applies to all iterations 1<=i<n: | |||
5103 | // | |||
5104 | ||||
5105 | // Create a truncated addrec for which we will add a no overflow check (P1). | |||
5106 | const SCEV *StartVal = getSCEV(StartValueV); | |||
5107 | const SCEV *PHISCEV = | |||
5108 | getAddRecExpr(getTruncateExpr(StartVal, TruncTy), | |||
5109 | getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); | |||
5110 | ||||
5111 | // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. | |||
5112 | // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV | |||
5113 | // will be constant. | |||
5114 | // | |||
5115 | // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't | |||
5116 | // add P1. | |||
5117 | if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { | |||
5118 | SCEVWrapPredicate::IncrementWrapFlags AddedFlags = | |||
5119 | Signed ? SCEVWrapPredicate::IncrementNSSW | |||
5120 | : SCEVWrapPredicate::IncrementNUSW; | |||
5121 | const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); | |||
5122 | Predicates.push_back(AddRecPred); | |||
5123 | } | |||
5124 | ||||
5125 | // Create the Equal Predicates P2,P3: | |||
5126 | ||||
5127 | // It is possible that the predicates P2 and/or P3 are computable at | |||
5128 | // compile time due to StartVal and/or Accum being constants. | |||
5129 | // If either one is, then we can check that now and escape if either P2 | |||
5130 | // or P3 is false. | |||
5131 | ||||
5132 | // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) | |||
5133 | // for each of StartVal and Accum | |||
5134 | auto getExtendedExpr = [&](const SCEV *Expr, | |||
5135 | bool CreateSignExtend) -> const SCEV * { | |||
5136 | assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant")((void)0); | |||
5137 | const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); | |||
5138 | const SCEV *ExtendedExpr = | |||
5139 | CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) | |||
5140 | : getZeroExtendExpr(TruncatedExpr, Expr->getType()); | |||
5141 | return ExtendedExpr; | |||
5142 | }; | |||
5143 | ||||
5144 | // Given: | |||
5145 | // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy | |||
5146 | // = getExtendedExpr(Expr) | |||
5147 | // Determine whether the predicate P: Expr == ExtendedExpr | |||
5148 | // is known to be false at compile time | |||
5149 | auto PredIsKnownFalse = [&](const SCEV *Expr, | |||
5150 | const SCEV *ExtendedExpr) -> bool { | |||
5151 | return Expr != ExtendedExpr && | |||
5152 | isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); | |||
5153 | }; | |||
5154 | ||||
5155 | const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); | |||
5156 | if (PredIsKnownFalse(StartVal, StartExtended)) { | |||
5157 | LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";)do { } while (false); | |||
5158 | return None; | |||
5159 | } | |||
5160 | ||||
5161 | // The Step is always Signed (because the overflow checks are either | |||
5162 | // NSSW or NUSW) | |||
5163 | const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); | |||
5164 | if (PredIsKnownFalse(Accum, AccumExtended)) { | |||
5165 | LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";)do { } while (false); | |||
5166 | return None; | |||
5167 | } | |||
5168 | ||||
5169 | auto AppendPredicate = [&](const SCEV *Expr, | |||
5170 | const SCEV *ExtendedExpr) -> void { | |||
5171 | if (Expr != ExtendedExpr && | |||
5172 | !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { | |||
5173 | const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); | |||
5174 | LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred)do { } while (false); | |||
5175 | Predicates.push_back(Pred); | |||
5176 | } | |||
5177 | }; | |||
5178 | ||||
5179 | AppendPredicate(StartVal, StartExtended); | |||
5180 | AppendPredicate(Accum, AccumExtended); | |||
5181 | ||||
5182 | // *** Part3: Predicates are ready. Now go ahead and create the new addrec in | |||
5183 | // which the casts had been folded away. The caller can rewrite SymbolicPHI | |||
5184 | // into NewAR if it will also add the runtime overflow checks specified in | |||
5185 | // Predicates. | |||
5186 | auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); | |||
5187 | ||||
5188 | std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = | |||
5189 | std::make_pair(NewAR, Predicates); | |||
5190 | // Remember the result of the analysis for this SCEV at this locayyytion. | |||
5191 | PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; | |||
5192 | return PredRewrite; | |||
5193 | } | |||
5194 | ||||
5195 | Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> | |||
5196 | ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { | |||
5197 | auto *PN = cast<PHINode>(SymbolicPHI->getValue()); | |||
5198 | const Loop *L = isIntegerLoopHeaderPHI(PN, LI); | |||
5199 | if (!L) | |||
5200 | return None; | |||
5201 | ||||
5202 | // Check to see if we already analyzed this PHI. | |||
5203 | auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); | |||
5204 | if (I != PredicatedSCEVRewrites.end()) { | |||
5205 | std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = | |||
5206 | I->second; | |||
5207 | // Analysis was done before and failed to create an AddRec: | |||
5208 | if (Rewrite.first == SymbolicPHI) | |||
5209 | return None; | |||
5210 | // Analysis was done before and succeeded to create an AddRec under | |||
5211 | // a predicate: | |||
5212 | assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec")((void)0); | |||
5213 | assert(!(Rewrite.second).empty() && "Expected to find Predicates")((void)0); | |||
5214 | return Rewrite; | |||
5215 | } | |||
5216 | ||||
5217 | Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> | |||
5218 | Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); | |||
5219 | ||||
5220 | // Record in the cache that the analysis failed | |||
5221 | if (!Rewrite) { | |||
5222 | SmallVector<const SCEVPredicate *, 3> Predicates; | |||
5223 | PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; | |||
5224 | return None; | |||
5225 | } | |||
5226 | ||||
5227 | return Rewrite; | |||
5228 | } | |||
5229 | ||||
5230 | // FIXME: This utility is currently required because the Rewriter currently | |||
5231 | // does not rewrite this expression: | |||
5232 | // {0, +, (sext ix (trunc iy to ix) to iy)} | |||
5233 | // into {0, +, %step}, | |||
5234 | // even when the following Equal predicate exists: | |||
5235 | // "%step == (sext ix (trunc iy to ix) to iy)". | |||
5236 | bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( | |||
5237 | const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { | |||
5238 | if (AR1 == AR2) | |||
5239 | return true; | |||
5240 | ||||
5241 | auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { | |||
5242 | if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && | |||
5243 | !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) | |||
5244 | return false; | |||
5245 | return true; | |||
5246 | }; | |||
5247 | ||||
5248 | if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || | |||
5249 | !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) | |||
5250 | return false; | |||
5251 | return true; | |||
5252 | } | |||
5253 | ||||
5254 | /// A helper function for createAddRecFromPHI to handle simple cases. | |||
5255 | /// | |||
5256 | /// This function tries to find an AddRec expression for the simplest (yet most | |||
5257 | /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). | |||
5258 | /// If it fails, createAddRecFromPHI will use a more general, but slow, | |||
5259 | /// technique for finding the AddRec expression. | |||
5260 | const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, | |||
5261 | Value *BEValueV, | |||
5262 | Value *StartValueV) { | |||
5263 | const Loop *L = LI.getLoopFor(PN->getParent()); | |||
5264 | assert(L && L->getHeader() == PN->getParent())((void)0); | |||
5265 | assert(BEValueV && StartValueV)((void)0); | |||
5266 | ||||
5267 | auto BO = MatchBinaryOp(BEValueV, DT); | |||
5268 | if (!BO) | |||
5269 | return nullptr; | |||
5270 | ||||
5271 | if (BO->Opcode != Instruction::Add) | |||
5272 | return nullptr; | |||
5273 | ||||
5274 | const SCEV *Accum = nullptr; | |||
5275 | if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) | |||
5276 | Accum = getSCEV(BO->RHS); | |||
5277 | else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) | |||
5278 | Accum = getSCEV(BO->LHS); | |||
5279 | ||||
5280 | if (!Accum) | |||
5281 | return nullptr; | |||
5282 | ||||
5283 | SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; | |||
5284 | if (BO->IsNUW) | |||
5285 | Flags = setFlags(Flags, SCEV::FlagNUW); | |||
5286 | if (BO->IsNSW) | |||
5287 | Flags = setFlags(Flags, SCEV::FlagNSW); | |||
5288 | ||||
5289 | const SCEV *StartVal = getSCEV(StartValueV); | |||
5290 | const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); | |||
5291 | ||||
5292 | ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; | |||
5293 | ||||
5294 | // We can add Flags to the post-inc expression only if we | |||
5295 | // know that it is *undefined behavior* for BEValueV to | |||
5296 | // overflow. | |||
5297 | if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) | |||
5298 | if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) | |||
5299 | (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); | |||
5300 | ||||
5301 | return PHISCEV; | |||
5302 | } | |||
5303 | ||||
5304 | const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { | |||
5305 | const Loop *L = LI.getLoopFor(PN->getParent()); | |||
5306 | if (!L || L->getHeader() != PN->getParent()) | |||
5307 | return nullptr; | |||
5308 | ||||
5309 | // The loop may have multiple entrances or multiple exits; we can analyze | |||
5310 | // this phi as an addrec if it has a unique entry value and a unique | |||
5311 | // backedge value. | |||
5312 | Value *BEValueV = nullptr, *StartValueV = nullptr; | |||
5313 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
5314 | Value *V = PN->getIncomingValue(i); | |||
5315 | if (L->contains(PN->getIncomingBlock(i))) { | |||
5316 | if (!BEValueV) { | |||
5317 | BEValueV = V; | |||
5318 | } else if (BEValueV != V) { | |||
5319 | BEValueV = nullptr; | |||
5320 | break; | |||
5321 | } | |||
5322 | } else if (!StartValueV) { | |||
5323 | StartValueV = V; | |||
5324 | } else if (StartValueV != V) { | |||
5325 | StartValueV = nullptr; | |||
5326 | break; | |||
5327 | } | |||
5328 | } | |||
5329 | if (!BEValueV || !StartValueV) | |||
5330 | return nullptr; | |||
5331 | ||||
5332 | assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&((void)0) | |||
5333 | "PHI node already processed?")((void)0); | |||
5334 | ||||
5335 | // First, try to find AddRec expression without creating a fictituos symbolic | |||
5336 | // value for PN. | |||
5337 | if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) | |||
5338 | return S; | |||
5339 | ||||
5340 | // Handle PHI node value symbolically. | |||
5341 | const SCEV *SymbolicName = getUnknown(PN); | |||
5342 | ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); | |||
5343 | ||||
5344 | // Using this symbolic name for the PHI, analyze the value coming around | |||
5345 | // the back-edge. | |||
5346 | const SCEV *BEValue = getSCEV(BEValueV); | |||
5347 | ||||
5348 | // NOTE: If BEValue is loop invariant, we know that the PHI node just | |||
5349 | // has a special value for the first iteration of the loop. | |||
5350 | ||||
5351 | // If the value coming around the backedge is an add with the symbolic | |||
5352 | // value we just inserted, then we found a simple induction variable! | |||
5353 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { | |||
5354 | // If there is a single occurrence of the symbolic value, replace it | |||
5355 | // with a recurrence. | |||
5356 | unsigned FoundIndex = Add->getNumOperands(); | |||
5357 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) | |||
5358 | if (Add->getOperand(i) == SymbolicName) | |||
5359 | if (FoundIndex == e) { | |||
5360 | FoundIndex = i; | |||
5361 | break; | |||
5362 | } | |||
5363 | ||||
5364 | if (FoundIndex != Add->getNumOperands()) { | |||
5365 | // Create an add with everything but the specified operand. | |||
5366 | SmallVector<const SCEV *, 8> Ops; | |||
5367 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) | |||
5368 | if (i != FoundIndex) | |||
5369 | Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), | |||
5370 | L, *this)); | |||
5371 | const SCEV *Accum = getAddExpr(Ops); | |||
5372 | ||||
5373 | // This is not a valid addrec if the step amount is varying each | |||
5374 | // loop iteration, but is not itself an addrec in this loop. | |||
5375 | if (isLoopInvariant(Accum, L) || | |||
5376 | (isa<SCEVAddRecExpr>(Accum) && | |||
5377 | cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { | |||
5378 | SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; | |||
5379 | ||||
5380 | if (auto BO = MatchBinaryOp(BEValueV, DT)) { | |||
5381 | if (BO->Opcode == Instruction::Add && BO->LHS == PN) { | |||
5382 | if (BO->IsNUW) | |||
5383 | Flags = setFlags(Flags, SCEV::FlagNUW); | |||
5384 | if (BO->IsNSW) | |||
5385 | Flags = setFlags(Flags, SCEV::FlagNSW); | |||
5386 | } | |||
5387 | } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { | |||
5388 | // If the increment is an inbounds GEP, then we know the address | |||
5389 | // space cannot be wrapped around. We cannot make any guarantee | |||
5390 | // about signed or unsigned overflow because pointers are | |||
5391 | // unsigned but we may have a negative index from the base | |||
5392 | // pointer. We can guarantee that no unsigned wrap occurs if the | |||
5393 | // indices form a positive value. | |||
5394 | if (GEP->isInBounds() && GEP->getOperand(0) == PN) { | |||
5395 | Flags = setFlags(Flags, SCEV::FlagNW); | |||
5396 | ||||
5397 | const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); | |||
5398 | if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) | |||
5399 | Flags = setFlags(Flags, SCEV::FlagNUW); | |||
5400 | } | |||
5401 | ||||
5402 | // We cannot transfer nuw and nsw flags from subtraction | |||
5403 | // operations -- sub nuw X, Y is not the same as add nuw X, -Y | |||
5404 | // for instance. | |||
5405 | } | |||
5406 | ||||
5407 | const SCEV *StartVal = getSCEV(StartValueV); | |||
5408 | const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); | |||
5409 | ||||
5410 | // Okay, for the entire analysis of this edge we assumed the PHI | |||
5411 | // to be symbolic. We now need to go back and purge all of the | |||
5412 | // entries for the scalars that use the symbolic expression. | |||
5413 | forgetSymbolicName(PN, SymbolicName); | |||
5414 | ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; | |||
5415 | ||||
5416 | // We can add Flags to the post-inc expression only if we | |||
5417 | // know that it is *undefined behavior* for BEValueV to | |||
5418 | // overflow. | |||
5419 | if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) | |||
5420 | if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) | |||
5421 | (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); | |||
5422 | ||||
5423 | return PHISCEV; | |||
5424 | } | |||
5425 | } | |||
5426 | } else { | |||
5427 | // Otherwise, this could be a loop like this: | |||
5428 | // i = 0; for (j = 1; ..; ++j) { .... i = j; } | |||
5429 | // In this case, j = {1,+,1} and BEValue is j. | |||
5430 | // Because the other in-value of i (0) fits the evolution of BEValue | |||
5431 | // i really is an addrec evolution. | |||
5432 | // | |||
5433 | // We can generalize this saying that i is the shifted value of BEValue | |||
5434 | // by one iteration: | |||
5435 | // PHI(f(0), f({1,+,1})) --> f({0,+,1}) | |||
5436 | const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); | |||
5437 | const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); | |||
5438 | if (Shifted != getCouldNotCompute() && | |||
5439 | Start != getCouldNotCompute()) { | |||
5440 | const SCEV *StartVal = getSCEV(StartValueV); | |||
5441 | if (Start == StartVal) { | |||
5442 | // Okay, for the entire analysis of this edge we assumed the PHI | |||
5443 | // to be symbolic. We now need to go back and purge all of the | |||
5444 | // entries for the scalars that use the symbolic expression. | |||
5445 | forgetSymbolicName(PN, SymbolicName); | |||
5446 | ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; | |||
5447 | return Shifted; | |||
5448 | } | |||
5449 | } | |||
5450 | } | |||
5451 | ||||
5452 | // Remove the temporary PHI node SCEV that has been inserted while intending | |||
5453 | // to create an AddRecExpr for this PHI node. We can not keep this temporary | |||
5454 | // as it will prevent later (possibly simpler) SCEV expressions to be added | |||
5455 | // to the ValueExprMap. | |||
5456 | eraseValueFromMap(PN); | |||
5457 | ||||
5458 | return nullptr; | |||
5459 | } | |||
5460 | ||||
5461 | // Checks if the SCEV S is available at BB. S is considered available at BB | |||
5462 | // if S can be materialized at BB without introducing a fault. | |||
5463 | static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, | |||
5464 | BasicBlock *BB) { | |||
5465 | struct CheckAvailable { | |||
5466 | bool TraversalDone = false; | |||
5467 | bool Available = true; | |||
5468 | ||||
5469 | const Loop *L = nullptr; // The loop BB is in (can be nullptr) | |||
5470 | BasicBlock *BB = nullptr; | |||
5471 | DominatorTree &DT; | |||
5472 | ||||
5473 | CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) | |||
5474 | : L(L), BB(BB), DT(DT) {} | |||
5475 | ||||
5476 | bool setUnavailable() { | |||
5477 | TraversalDone = true; | |||
5478 | Available = false; | |||
5479 | return false; | |||
5480 | } | |||
5481 | ||||
5482 | bool follow(const SCEV *S) { | |||
5483 | switch (S->getSCEVType()) { | |||
5484 | case scConstant: | |||
5485 | case scPtrToInt: | |||
5486 | case scTruncate: | |||
5487 | case scZeroExtend: | |||
5488 | case scSignExtend: | |||
5489 | case scAddExpr: | |||
5490 | case scMulExpr: | |||
5491 | case scUMaxExpr: | |||
5492 | case scSMaxExpr: | |||
5493 | case scUMinExpr: | |||
5494 | case scSMinExpr: | |||
5495 | // These expressions are available if their operand(s) is/are. | |||
5496 | return true; | |||
5497 | ||||
5498 | case scAddRecExpr: { | |||
5499 | // We allow add recurrences that are on the loop BB is in, or some | |||
5500 | // outer loop. This guarantees availability because the value of the | |||
5501 | // add recurrence at BB is simply the "current" value of the induction | |||
5502 | // variable. We can relax this in the future; for instance an add | |||
5503 | // recurrence on a sibling dominating loop is also available at BB. | |||
5504 | const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); | |||
5505 | if (L && (ARLoop == L || ARLoop->contains(L))) | |||
5506 | return true; | |||
5507 | ||||
5508 | return setUnavailable(); | |||
5509 | } | |||
5510 | ||||
5511 | case scUnknown: { | |||
5512 | // For SCEVUnknown, we check for simple dominance. | |||
5513 | const auto *SU = cast<SCEVUnknown>(S); | |||
5514 | Value *V = SU->getValue(); | |||
5515 | ||||
5516 | if (isa<Argument>(V)) | |||
5517 | return false; | |||
5518 | ||||
5519 | if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) | |||
5520 | return false; | |||
5521 | ||||
5522 | return setUnavailable(); | |||
5523 | } | |||
5524 | ||||
5525 | case scUDivExpr: | |||
5526 | case scCouldNotCompute: | |||
5527 | // We do not try to smart about these at all. | |||
5528 | return setUnavailable(); | |||
5529 | } | |||
5530 | llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable(); | |||
5531 | } | |||
5532 | ||||
5533 | bool isDone() { return TraversalDone; } | |||
5534 | }; | |||
5535 | ||||
5536 | CheckAvailable CA(L, BB, DT); | |||
5537 | SCEVTraversal<CheckAvailable> ST(CA); | |||
5538 | ||||
5539 | ST.visitAll(S); | |||
5540 | return CA.Available; | |||
5541 | } | |||
5542 | ||||
5543 | // Try to match a control flow sequence that branches out at BI and merges back | |||
5544 | // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful | |||
5545 | // match. | |||
5546 | static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, | |||
5547 | Value *&C, Value *&LHS, Value *&RHS) { | |||
5548 | C = BI->getCondition(); | |||
5549 | ||||
5550 | BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); | |||
5551 | BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); | |||
5552 | ||||
5553 | if (!LeftEdge.isSingleEdge()) | |||
5554 | return false; | |||
5555 | ||||
5556 | assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()")((void)0); | |||
5557 | ||||
5558 | Use &LeftUse = Merge->getOperandUse(0); | |||
5559 | Use &RightUse = Merge->getOperandUse(1); | |||
5560 | ||||
5561 | if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { | |||
5562 | LHS = LeftUse; | |||
5563 | RHS = RightUse; | |||
5564 | return true; | |||
5565 | } | |||
5566 | ||||
5567 | if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { | |||
5568 | LHS = RightUse; | |||
5569 | RHS = LeftUse; | |||
5570 | return true; | |||
5571 | } | |||
5572 | ||||
5573 | return false; | |||
5574 | } | |||
5575 | ||||
5576 | const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { | |||
5577 | auto IsReachable = | |||
5578 | [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; | |||
5579 | if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { | |||
5580 | const Loop *L = LI.getLoopFor(PN->getParent()); | |||
5581 | ||||
5582 | // We don't want to break LCSSA, even in a SCEV expression tree. | |||
5583 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) | |||
5584 | if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) | |||
5585 | return nullptr; | |||
5586 | ||||
5587 | // Try to match | |||
5588 | // | |||
5589 | // br %cond, label %left, label %right | |||
5590 | // left: | |||
5591 | // br label %merge | |||
5592 | // right: | |||
5593 | // br label %merge | |||
5594 | // merge: | |||
5595 | // V = phi [ %x, %left ], [ %y, %right ] | |||
5596 | // | |||
5597 | // as "select %cond, %x, %y" | |||
5598 | ||||
5599 | BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); | |||
5600 | assert(IDom && "At least the entry block should dominate PN")((void)0); | |||
5601 | ||||
5602 | auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); | |||
5603 | Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; | |||
5604 | ||||
5605 | if (BI && BI->isConditional() && | |||
5606 | BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && | |||
5607 | IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && | |||
5608 | IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) | |||
5609 | return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); | |||
5610 | } | |||
5611 | ||||
5612 | return nullptr; | |||
5613 | } | |||
5614 | ||||
5615 | const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { | |||
5616 | if (const SCEV *S = createAddRecFromPHI(PN)) | |||
5617 | return S; | |||
5618 | ||||
5619 | if (const SCEV *S = createNodeFromSelectLikePHI(PN)) | |||
5620 | return S; | |||
5621 | ||||
5622 | // If the PHI has a single incoming value, follow that value, unless the | |||
5623 | // PHI's incoming blocks are in a different loop, in which case doing so | |||
5624 | // risks breaking LCSSA form. Instcombine would normally zap these, but | |||
5625 | // it doesn't have DominatorTree information, so it may miss cases. | |||
5626 | if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) | |||
5627 | if (LI.replacementPreservesLCSSAForm(PN, V)) | |||
5628 | return getSCEV(V); | |||
5629 | ||||
5630 | // If it's not a loop phi, we can't handle it yet. | |||
5631 | return getUnknown(PN); | |||
5632 | } | |||
5633 | ||||
5634 | const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, | |||
5635 | Value *Cond, | |||
5636 | Value *TrueVal, | |||
5637 | Value *FalseVal) { | |||
5638 | // Handle "constant" branch or select. This can occur for instance when a | |||
5639 | // loop pass transforms an inner loop and moves on to process the outer loop. | |||
5640 | if (auto *CI = dyn_cast<ConstantInt>(Cond)) | |||
5641 | return getSCEV(CI->isOne() ? TrueVal : FalseVal); | |||
5642 | ||||
5643 | // Try to match some simple smax or umax patterns. | |||
5644 | auto *ICI = dyn_cast<ICmpInst>(Cond); | |||
5645 | if (!ICI) | |||
5646 | return getUnknown(I); | |||
5647 | ||||
5648 | Value *LHS = ICI->getOperand(0); | |||
5649 | Value *RHS = ICI->getOperand(1); | |||
5650 | ||||
5651 | switch (ICI->getPredicate()) { | |||
5652 | case ICmpInst::ICMP_SLT: | |||
5653 | case ICmpInst::ICMP_SLE: | |||
5654 | case ICmpInst::ICMP_ULT: | |||
5655 | case ICmpInst::ICMP_ULE: | |||
5656 | std::swap(LHS, RHS); | |||
5657 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
5658 | case ICmpInst::ICMP_SGT: | |||
5659 | case ICmpInst::ICMP_SGE: | |||
5660 | case ICmpInst::ICMP_UGT: | |||
5661 | case ICmpInst::ICMP_UGE: | |||
5662 | // a > b ? a+x : b+x -> max(a, b)+x | |||
5663 | // a > b ? b+x : a+x -> min(a, b)+x | |||
5664 | if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { | |||
5665 | bool Signed = ICI->isSigned(); | |||
5666 | const SCEV *LA = getSCEV(TrueVal); | |||
5667 | const SCEV *RA = getSCEV(FalseVal); | |||
5668 | const SCEV *LS = getSCEV(LHS); | |||
5669 | const SCEV *RS = getSCEV(RHS); | |||
5670 | if (LA->getType()->isPointerTy()) { | |||
5671 | // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA. | |||
5672 | // Need to make sure we can't produce weird expressions involving | |||
5673 | // negated pointers. | |||
5674 | if (LA == LS && RA == RS) | |||
5675 | return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS); | |||
5676 | if (LA == RS && RA == LS) | |||
5677 | return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS); | |||
5678 | } | |||
5679 | auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * { | |||
5680 | if (Op->getType()->isPointerTy()) { | |||
5681 | Op = getLosslessPtrToIntExpr(Op); | |||
5682 | if (isa<SCEVCouldNotCompute>(Op)) | |||
5683 | return Op; | |||
5684 | } | |||
5685 | if (Signed) | |||
5686 | Op = getNoopOrSignExtend(Op, I->getType()); | |||
5687 | else | |||
5688 | Op = getNoopOrZeroExtend(Op, I->getType()); | |||
5689 | return Op; | |||
5690 | }; | |||
5691 | LS = CoerceOperand(LS); | |||
5692 | RS = CoerceOperand(RS); | |||
5693 | if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS)) | |||
5694 | break; | |||
5695 | const SCEV *LDiff = getMinusSCEV(LA, LS); | |||
5696 | const SCEV *RDiff = getMinusSCEV(RA, RS); | |||
5697 | if (LDiff == RDiff) | |||
5698 | return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), | |||
5699 | LDiff); | |||
5700 | LDiff = getMinusSCEV(LA, RS); | |||
5701 | RDiff = getMinusSCEV(RA, LS); | |||
5702 | if (LDiff == RDiff) | |||
5703 | return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), | |||
5704 | LDiff); | |||
5705 | } | |||
5706 | break; | |||
5707 | case ICmpInst::ICMP_NE: | |||
5708 | // n != 0 ? n+x : 1+x -> umax(n, 1)+x | |||
5709 | if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && | |||
5710 | isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { | |||
5711 | const SCEV *One = getOne(I->getType()); | |||
5712 | const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); | |||
5713 | const SCEV *LA = getSCEV(TrueVal); | |||
5714 | const SCEV *RA = getSCEV(FalseVal); | |||
5715 | const SCEV *LDiff = getMinusSCEV(LA, LS); | |||
5716 | const SCEV *RDiff = getMinusSCEV(RA, One); | |||
5717 | if (LDiff == RDiff) | |||
5718 | return getAddExpr(getUMaxExpr(One, LS), LDiff); | |||
5719 | } | |||
5720 | break; | |||
5721 | case ICmpInst::ICMP_EQ: | |||
5722 | // n == 0 ? 1+x : n+x -> umax(n, 1)+x | |||
5723 | if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && | |||
5724 | isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { | |||
5725 | const SCEV *One = getOne(I->getType()); | |||
5726 | const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); | |||
5727 | const SCEV *LA = getSCEV(TrueVal); | |||
5728 | const SCEV *RA = getSCEV(FalseVal); | |||
5729 | const SCEV *LDiff = getMinusSCEV(LA, One); | |||
5730 | const SCEV *RDiff = getMinusSCEV(RA, LS); | |||
5731 | if (LDiff == RDiff) | |||
5732 | return getAddExpr(getUMaxExpr(One, LS), LDiff); | |||
5733 | } | |||
5734 | break; | |||
5735 | default: | |||
5736 | break; | |||
5737 | } | |||
5738 | ||||
5739 | return getUnknown(I); | |||
5740 | } | |||
5741 | ||||
5742 | /// Expand GEP instructions into add and multiply operations. This allows them | |||
5743 | /// to be analyzed by regular SCEV code. | |||
5744 | const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { | |||
5745 | // Don't attempt to analyze GEPs over unsized objects. | |||
5746 | if (!GEP->getSourceElementType()->isSized()) | |||
5747 | return getUnknown(GEP); | |||
5748 | ||||
5749 | SmallVector<const SCEV *, 4> IndexExprs; | |||
5750 | for (Value *Index : GEP->indices()) | |||
5751 | IndexExprs.push_back(getSCEV(Index)); | |||
5752 | return getGEPExpr(GEP, IndexExprs); | |||
5753 | } | |||
5754 | ||||
5755 | uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { | |||
5756 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) | |||
5757 | return C->getAPInt().countTrailingZeros(); | |||
5758 | ||||
5759 | if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) | |||
5760 | return GetMinTrailingZeros(I->getOperand()); | |||
5761 | ||||
5762 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) | |||
5763 | return std::min(GetMinTrailingZeros(T->getOperand()), | |||
5764 | (uint32_t)getTypeSizeInBits(T->getType())); | |||
5765 | ||||
5766 | if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { | |||
5767 | uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); | |||
5768 | return OpRes == getTypeSizeInBits(E->getOperand()->getType()) | |||
5769 | ? getTypeSizeInBits(E->getType()) | |||
5770 | : OpRes; | |||
5771 | } | |||
5772 | ||||
5773 | if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { | |||
5774 | uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); | |||
5775 | return OpRes == getTypeSizeInBits(E->getOperand()->getType()) | |||
5776 | ? getTypeSizeInBits(E->getType()) | |||
5777 | : OpRes; | |||
5778 | } | |||
5779 | ||||
5780 | if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { | |||
5781 | // The result is the min of all operands results. | |||
5782 | uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); | |||
5783 | for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) | |||
5784 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); | |||
5785 | return MinOpRes; | |||
5786 | } | |||
5787 | ||||
5788 | if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { | |||
5789 | // The result is the sum of all operands results. | |||
5790 | uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); | |||
5791 | uint32_t BitWidth = getTypeSizeInBits(M->getType()); | |||
5792 | for (unsigned i = 1, e = M->getNumOperands(); | |||
5793 | SumOpRes != BitWidth && i != e; ++i) | |||
5794 | SumOpRes = | |||
5795 | std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); | |||
5796 | return SumOpRes; | |||
5797 | } | |||
5798 | ||||
5799 | if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { | |||
5800 | // The result is the min of all operands results. | |||
5801 | uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); | |||
5802 | for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) | |||
5803 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); | |||
5804 | return MinOpRes; | |||
5805 | } | |||
5806 | ||||
5807 | if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { | |||
5808 | // The result is the min of all operands results. | |||
5809 | uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); | |||
5810 | for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) | |||
5811 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); | |||
5812 | return MinOpRes; | |||
5813 | } | |||
5814 | ||||
5815 | if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { | |||
5816 | // The result is the min of all operands results. | |||
5817 | uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); | |||
5818 | for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) | |||
5819 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); | |||
5820 | return MinOpRes; | |||
5821 | } | |||
5822 | ||||
5823 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { | |||
5824 | // For a SCEVUnknown, ask ValueTracking. | |||
5825 | KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); | |||
5826 | return Known.countMinTrailingZeros(); | |||
5827 | } | |||
5828 | ||||
5829 | // SCEVUDivExpr | |||
5830 | return 0; | |||
5831 | } | |||
5832 | ||||
5833 | uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { | |||
5834 | auto I = MinTrailingZerosCache.find(S); | |||
5835 | if (I != MinTrailingZerosCache.end()) | |||
5836 | return I->second; | |||
5837 | ||||
5838 | uint32_t Result = GetMinTrailingZerosImpl(S); | |||
5839 | auto InsertPair = MinTrailingZerosCache.insert({S, Result}); | |||
5840 | assert(InsertPair.second && "Should insert a new key")((void)0); | |||
5841 | return InsertPair.first->second; | |||
5842 | } | |||
5843 | ||||
5844 | /// Helper method to assign a range to V from metadata present in the IR. | |||
5845 | static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { | |||
5846 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
5847 | if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) | |||
5848 | return getConstantRangeFromMetadata(*MD); | |||
5849 | ||||
5850 | return None; | |||
5851 | } | |||
5852 | ||||
5853 | void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, | |||
5854 | SCEV::NoWrapFlags Flags) { | |||
5855 | if (AddRec->getNoWrapFlags(Flags) != Flags) { | |||
5856 | AddRec->setNoWrapFlags(Flags); | |||
5857 | UnsignedRanges.erase(AddRec); | |||
5858 | SignedRanges.erase(AddRec); | |||
5859 | } | |||
5860 | } | |||
5861 | ||||
5862 | ConstantRange ScalarEvolution:: | |||
5863 | getRangeForUnknownRecurrence(const SCEVUnknown *U) { | |||
5864 | const DataLayout &DL = getDataLayout(); | |||
5865 | ||||
5866 | unsigned BitWidth = getTypeSizeInBits(U->getType()); | |||
5867 | const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); | |||
5868 | ||||
5869 | // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then | |||
5870 | // use information about the trip count to improve our available range. Note | |||
5871 | // that the trip count independent cases are already handled by known bits. | |||
5872 | // WARNING: The definition of recurrence used here is subtly different than | |||
5873 | // the one used by AddRec (and thus most of this file). Step is allowed to | |||
5874 | // be arbitrarily loop varying here, where AddRec allows only loop invariant | |||
5875 | // and other addrecs in the same loop (for non-affine addrecs). The code | |||
5876 | // below intentionally handles the case where step is not loop invariant. | |||
5877 | auto *P = dyn_cast<PHINode>(U->getValue()); | |||
5878 | if (!P) | |||
5879 | return FullSet; | |||
5880 | ||||
5881 | // Make sure that no Phi input comes from an unreachable block. Otherwise, | |||
5882 | // even the values that are not available in these blocks may come from them, | |||
5883 | // and this leads to false-positive recurrence test. | |||
5884 | for (auto *Pred : predecessors(P->getParent())) | |||
5885 | if (!DT.isReachableFromEntry(Pred)) | |||
5886 | return FullSet; | |||
5887 | ||||
5888 | BinaryOperator *BO; | |||
5889 | Value *Start, *Step; | |||
5890 | if (!matchSimpleRecurrence(P, BO, Start, Step)) | |||
5891 | return FullSet; | |||
5892 | ||||
5893 | // If we found a recurrence in reachable code, we must be in a loop. Note | |||
5894 | // that BO might be in some subloop of L, and that's completely okay. | |||
5895 | auto *L = LI.getLoopFor(P->getParent()); | |||
5896 | assert(L && L->getHeader() == P->getParent())((void)0); | |||
5897 | if (!L->contains(BO->getParent())) | |||
5898 | // NOTE: This bailout should be an assert instead. However, asserting | |||
5899 | // the condition here exposes a case where LoopFusion is querying SCEV | |||
5900 | // with malformed loop information during the midst of the transform. | |||
5901 | // There doesn't appear to be an obvious fix, so for the moment bailout | |||
5902 | // until the caller issue can be fixed. PR49566 tracks the bug. | |||
5903 | return FullSet; | |||
5904 | ||||
5905 | // TODO: Extend to other opcodes such as mul, and div | |||
5906 | switch (BO->getOpcode()) { | |||
5907 | default: | |||
5908 | return FullSet; | |||
5909 | case Instruction::AShr: | |||
5910 | case Instruction::LShr: | |||
5911 | case Instruction::Shl: | |||
5912 | break; | |||
5913 | }; | |||
5914 | ||||
5915 | if (BO->getOperand(0) != P) | |||