File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis/ValueTracking.h |
Warning: | line 282, column 49 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- AssumeBundleBuilder.cpp - tools to preserve informations -*- C++ -*-===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | |||||
9 | #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" | ||||
10 | #include "llvm/ADT/DepthFirstIterator.h" | ||||
11 | #include "llvm/ADT/MapVector.h" | ||||
12 | #include "llvm/ADT/Statistic.h" | ||||
13 | #include "llvm/Analysis/AssumeBundleQueries.h" | ||||
14 | #include "llvm/Analysis/AssumptionCache.h" | ||||
15 | #include "llvm/Analysis/ValueTracking.h" | ||||
16 | #include "llvm/IR/Dominators.h" | ||||
17 | #include "llvm/IR/Function.h" | ||||
18 | #include "llvm/IR/InstIterator.h" | ||||
19 | #include "llvm/IR/IntrinsicInst.h" | ||||
20 | #include "llvm/IR/Module.h" | ||||
21 | #include "llvm/InitializePasses.h" | ||||
22 | #include "llvm/Support/CommandLine.h" | ||||
23 | #include "llvm/Support/DebugCounter.h" | ||||
24 | #include "llvm/Transforms/Utils/Local.h" | ||||
25 | |||||
26 | using namespace llvm; | ||||
27 | |||||
28 | namespace llvm { | ||||
29 | cl::opt<bool> ShouldPreserveAllAttributes( | ||||
30 | "assume-preserve-all", cl::init(false), cl::Hidden, | ||||
31 | cl::desc("enable preservation of all attrbitues. even those that are " | ||||
32 | "unlikely to be usefull")); | ||||
33 | |||||
34 | cl::opt<bool> EnableKnowledgeRetention( | ||||
35 | "enable-knowledge-retention", cl::init(false), cl::Hidden, | ||||
36 | cl::desc( | ||||
37 | "enable preservation of attributes throughout code transformation")); | ||||
38 | } // namespace llvm | ||||
39 | |||||
40 | #define DEBUG_TYPE"assume-builder" "assume-builder" | ||||
41 | |||||
42 | STATISTIC(NumAssumeBuilt, "Number of assume built by the assume builder")static llvm::Statistic NumAssumeBuilt = {"assume-builder", "NumAssumeBuilt" , "Number of assume built by the assume builder"}; | ||||
43 | STATISTIC(NumBundlesInAssumes, "Total number of Bundles in the assume built")static llvm::Statistic NumBundlesInAssumes = {"assume-builder" , "NumBundlesInAssumes", "Total number of Bundles in the assume built" }; | ||||
44 | STATISTIC(NumAssumesMerged,static llvm::Statistic NumAssumesMerged = {"assume-builder", "NumAssumesMerged" , "Number of assume merged by the assume simplify pass"} | ||||
45 | "Number of assume merged by the assume simplify pass")static llvm::Statistic NumAssumesMerged = {"assume-builder", "NumAssumesMerged" , "Number of assume merged by the assume simplify pass"}; | ||||
46 | STATISTIC(NumAssumesRemoved,static llvm::Statistic NumAssumesRemoved = {"assume-builder", "NumAssumesRemoved", "Number of assume removed by the assume simplify pass" } | ||||
47 | "Number of assume removed by the assume simplify pass")static llvm::Statistic NumAssumesRemoved = {"assume-builder", "NumAssumesRemoved", "Number of assume removed by the assume simplify pass" }; | ||||
48 | |||||
49 | DEBUG_COUNTER(BuildAssumeCounter, "assume-builder-counter",static const unsigned BuildAssumeCounter = DebugCounter::registerCounter ("assume-builder-counter", "Controls which assumes gets created" ) | ||||
50 | "Controls which assumes gets created")static const unsigned BuildAssumeCounter = DebugCounter::registerCounter ("assume-builder-counter", "Controls which assumes gets created" ); | ||||
51 | |||||
52 | namespace { | ||||
53 | |||||
54 | bool isUsefullToPreserve(Attribute::AttrKind Kind) { | ||||
55 | switch (Kind) { | ||||
56 | case Attribute::NonNull: | ||||
57 | case Attribute::NoUndef: | ||||
58 | case Attribute::Alignment: | ||||
59 | case Attribute::Dereferenceable: | ||||
60 | case Attribute::DereferenceableOrNull: | ||||
61 | case Attribute::Cold: | ||||
62 | return true; | ||||
63 | default: | ||||
64 | return false; | ||||
65 | } | ||||
66 | } | ||||
67 | |||||
68 | /// This function will try to transform the given knowledge into a more | ||||
69 | /// canonical one. the canonical knowledge maybe the given one. | ||||
70 | RetainedKnowledge canonicalizedKnowledge(RetainedKnowledge RK, DataLayout DL) { | ||||
71 | switch (RK.AttrKind) { | ||||
72 | default: | ||||
73 | return RK; | ||||
74 | case Attribute::NonNull: | ||||
75 | RK.WasOn = getUnderlyingObject(RK.WasOn); | ||||
76 | return RK; | ||||
77 | case Attribute::Alignment: { | ||||
78 | Value *V = RK.WasOn->stripInBoundsOffsets([&](const Value *Strip) { | ||||
79 | if (auto *GEP = dyn_cast<GEPOperator>(Strip)) | ||||
80 | RK.ArgValue = | ||||
81 | MinAlign(RK.ArgValue, GEP->getMaxPreservedAlignment(DL).value()); | ||||
82 | }); | ||||
83 | RK.WasOn = V; | ||||
84 | return RK; | ||||
85 | } | ||||
86 | case Attribute::Dereferenceable: | ||||
87 | case Attribute::DereferenceableOrNull: { | ||||
88 | int64_t Offset = 0; | ||||
89 | Value *V = GetPointerBaseWithConstantOffset(RK.WasOn, Offset, DL, | ||||
90 | /*AllowNonInBounds*/ false); | ||||
91 | if (Offset < 0) | ||||
92 | return RK; | ||||
93 | RK.ArgValue = RK.ArgValue + Offset; | ||||
94 | RK.WasOn = V; | ||||
95 | } | ||||
96 | } | ||||
97 | return RK; | ||||
98 | } | ||||
99 | |||||
100 | /// This class contain all knowledge that have been gather while building an | ||||
101 | /// llvm.assume and the function to manipulate it. | ||||
102 | struct AssumeBuilderState { | ||||
103 | Module *M; | ||||
104 | |||||
105 | using MapKey = std::pair<Value *, Attribute::AttrKind>; | ||||
106 | SmallMapVector<MapKey, unsigned, 8> AssumedKnowledgeMap; | ||||
107 | Instruction *InstBeingModified = nullptr; | ||||
108 | AssumptionCache* AC = nullptr; | ||||
109 | DominatorTree* DT = nullptr; | ||||
110 | |||||
111 | AssumeBuilderState(Module *M, Instruction *I = nullptr, | ||||
112 | AssumptionCache *AC = nullptr, DominatorTree *DT = nullptr) | ||||
113 | : M(M), InstBeingModified(I), AC(AC), DT(DT) {} | ||||
114 | |||||
115 | bool tryToPreserveWithoutAddingAssume(RetainedKnowledge RK) { | ||||
116 | if (!InstBeingModified || !RK.WasOn) | ||||
117 | return false; | ||||
118 | bool HasBeenPreserved = false; | ||||
119 | Use* ToUpdate = nullptr; | ||||
120 | getKnowledgeForValue( | ||||
121 | RK.WasOn, {RK.AttrKind}, AC, | ||||
122 | [&](RetainedKnowledge RKOther, Instruction *Assume, | ||||
123 | const CallInst::BundleOpInfo *Bundle) { | ||||
124 | if (!isValidAssumeForContext(Assume, InstBeingModified, DT)) | ||||
125 | return false; | ||||
126 | if (RKOther.ArgValue >= RK.ArgValue) { | ||||
127 | HasBeenPreserved = true; | ||||
128 | return true; | ||||
129 | } else if (isValidAssumeForContext(InstBeingModified, Assume, DT)) { | ||||
130 | HasBeenPreserved = true; | ||||
131 | IntrinsicInst *Intr = cast<IntrinsicInst>(Assume); | ||||
132 | ToUpdate = &Intr->op_begin()[Bundle->Begin + ABA_Argument]; | ||||
133 | return true; | ||||
134 | } | ||||
135 | return false; | ||||
136 | }); | ||||
137 | if (ToUpdate) | ||||
138 | ToUpdate->set( | ||||
139 | ConstantInt::get(Type::getInt64Ty(M->getContext()), RK.ArgValue)); | ||||
140 | return HasBeenPreserved; | ||||
141 | } | ||||
142 | |||||
143 | bool isKnowledgeWorthPreserving(RetainedKnowledge RK) { | ||||
144 | if (!RK) | ||||
145 | return false; | ||||
146 | if (!RK.WasOn) | ||||
147 | return true; | ||||
148 | if (RK.WasOn->getType()->isPointerTy()) { | ||||
149 | Value *UnderlyingPtr = getUnderlyingObject(RK.WasOn); | ||||
150 | if (isa<AllocaInst>(UnderlyingPtr) || isa<GlobalValue>(UnderlyingPtr)) | ||||
151 | return false; | ||||
152 | } | ||||
153 | if (auto *Arg = dyn_cast<Argument>(RK.WasOn)) { | ||||
154 | if (Arg->hasAttribute(RK.AttrKind) && | ||||
155 | (!Attribute::isIntAttrKind(RK.AttrKind) || | ||||
156 | Arg->getAttribute(RK.AttrKind).getValueAsInt() >= RK.ArgValue)) | ||||
157 | return false; | ||||
158 | return true; | ||||
159 | } | ||||
160 | if (auto *Inst = dyn_cast<Instruction>(RK.WasOn)) | ||||
161 | if (wouldInstructionBeTriviallyDead(Inst)) { | ||||
162 | if (RK.WasOn->use_empty()) | ||||
163 | return false; | ||||
164 | Use *SingleUse = RK.WasOn->getSingleUndroppableUse(); | ||||
165 | if (SingleUse && SingleUse->getUser() == InstBeingModified) | ||||
166 | return false; | ||||
167 | } | ||||
168 | return true; | ||||
169 | } | ||||
170 | |||||
171 | void addKnowledge(RetainedKnowledge RK) { | ||||
172 | RK = canonicalizedKnowledge(RK, M->getDataLayout()); | ||||
173 | |||||
174 | if (!isKnowledgeWorthPreserving(RK)) | ||||
175 | return; | ||||
176 | |||||
177 | if (tryToPreserveWithoutAddingAssume(RK)) | ||||
178 | return; | ||||
179 | MapKey Key{RK.WasOn, RK.AttrKind}; | ||||
180 | auto Lookup = AssumedKnowledgeMap.find(Key); | ||||
181 | if (Lookup == AssumedKnowledgeMap.end()) { | ||||
182 | AssumedKnowledgeMap[Key] = RK.ArgValue; | ||||
183 | return; | ||||
184 | } | ||||
185 | assert(((Lookup->second == 0 && RK.ArgValue == 0) ||((void)0) | ||||
186 | (Lookup->second != 0 && RK.ArgValue != 0)) &&((void)0) | ||||
187 | "inconsistent argument value")((void)0); | ||||
188 | |||||
189 | /// This is only desirable because for all attributes taking an argument | ||||
190 | /// higher is better. | ||||
191 | Lookup->second = std::max(Lookup->second, RK.ArgValue); | ||||
192 | } | ||||
193 | |||||
194 | void addAttribute(Attribute Attr, Value *WasOn) { | ||||
195 | if (Attr.isTypeAttribute() || Attr.isStringAttribute() || | ||||
196 | (!ShouldPreserveAllAttributes && | ||||
197 | !isUsefullToPreserve(Attr.getKindAsEnum()))) | ||||
198 | return; | ||||
199 | unsigned AttrArg = 0; | ||||
200 | if (Attr.isIntAttribute()) | ||||
201 | AttrArg = Attr.getValueAsInt(); | ||||
202 | addKnowledge({Attr.getKindAsEnum(), AttrArg, WasOn}); | ||||
203 | } | ||||
204 | |||||
205 | void addCall(const CallBase *Call) { | ||||
206 | auto addAttrList = [&](AttributeList AttrList) { | ||||
207 | for (unsigned Idx = AttributeList::FirstArgIndex; | ||||
208 | Idx < AttrList.getNumAttrSets(); Idx++) | ||||
209 | for (Attribute Attr : AttrList.getAttributes(Idx)) { | ||||
210 | bool IsPoisonAttr = Attr.hasAttribute(Attribute::NonNull) || | ||||
211 | Attr.hasAttribute(Attribute::Alignment); | ||||
212 | if (!IsPoisonAttr || Call->isPassingUndefUB(Idx - 1)) | ||||
213 | addAttribute(Attr, Call->getArgOperand(Idx - 1)); | ||||
214 | } | ||||
215 | for (Attribute Attr : AttrList.getFnAttributes()) | ||||
216 | addAttribute(Attr, nullptr); | ||||
217 | }; | ||||
218 | addAttrList(Call->getAttributes()); | ||||
219 | if (Function *Fn = Call->getCalledFunction()) | ||||
220 | addAttrList(Fn->getAttributes()); | ||||
221 | } | ||||
222 | |||||
223 | AssumeInst *build() { | ||||
224 | if (AssumedKnowledgeMap.empty()) | ||||
225 | return nullptr; | ||||
226 | if (!DebugCounter::shouldExecute(BuildAssumeCounter)) | ||||
227 | return nullptr; | ||||
228 | Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume); | ||||
229 | LLVMContext &C = M->getContext(); | ||||
230 | SmallVector<OperandBundleDef, 8> OpBundle; | ||||
231 | for (auto &MapElem : AssumedKnowledgeMap) { | ||||
232 | SmallVector<Value *, 2> Args; | ||||
233 | if (MapElem.first.first) | ||||
234 | Args.push_back(MapElem.first.first); | ||||
235 | |||||
236 | /// This is only valid because for all attribute that currently exist a | ||||
237 | /// value of 0 is useless. and should not be preserved. | ||||
238 | if (MapElem.second) | ||||
239 | Args.push_back(ConstantInt::get(Type::getInt64Ty(M->getContext()), | ||||
240 | MapElem.second)); | ||||
241 | OpBundle.push_back(OperandBundleDefT<Value *>( | ||||
242 | std::string(Attribute::getNameFromAttrKind(MapElem.first.second)), | ||||
243 | Args)); | ||||
244 | NumBundlesInAssumes++; | ||||
245 | } | ||||
246 | NumAssumeBuilt++; | ||||
247 | return cast<AssumeInst>(CallInst::Create( | ||||
248 | FnAssume, ArrayRef<Value *>({ConstantInt::getTrue(C)}), OpBundle)); | ||||
249 | } | ||||
250 | |||||
251 | void addAccessedPtr(Instruction *MemInst, Value *Pointer, Type *AccType, | ||||
252 | MaybeAlign MA) { | ||||
253 | unsigned DerefSize = MemInst->getModule() | ||||
254 | ->getDataLayout() | ||||
255 | .getTypeStoreSize(AccType) | ||||
256 | .getKnownMinSize(); | ||||
257 | if (DerefSize != 0) { | ||||
258 | addKnowledge({Attribute::Dereferenceable, DerefSize, Pointer}); | ||||
259 | if (!NullPointerIsDefined(MemInst->getFunction(), | ||||
260 | Pointer->getType()->getPointerAddressSpace())) | ||||
261 | addKnowledge({Attribute::NonNull, 0u, Pointer}); | ||||
262 | } | ||||
263 | if (MA.valueOrOne() > 1) | ||||
264 | addKnowledge( | ||||
265 | {Attribute::Alignment, unsigned(MA.valueOrOne().value()), Pointer}); | ||||
266 | } | ||||
267 | |||||
268 | void addInstruction(Instruction *I) { | ||||
269 | if (auto *Call
| ||||
270 | return addCall(Call); | ||||
271 | if (auto *Load
| ||||
272 | return addAccessedPtr(I, Load->getPointerOperand(), Load->getType(), | ||||
273 | Load->getAlign()); | ||||
274 | if (auto *Store = dyn_cast<StoreInst>(I)) | ||||
275 | return addAccessedPtr(I, Store->getPointerOperand(), | ||||
276 | Store->getValueOperand()->getType(), | ||||
277 | Store->getAlign()); | ||||
278 | // TODO: Add support for the other Instructions. | ||||
279 | // TODO: Maybe we should look around and merge with other llvm.assume. | ||||
280 | } | ||||
281 | }; | ||||
282 | |||||
283 | } // namespace | ||||
284 | |||||
285 | AssumeInst *llvm::buildAssumeFromInst(Instruction *I) { | ||||
286 | if (!EnableKnowledgeRetention) | ||||
| |||||
287 | return nullptr; | ||||
288 | AssumeBuilderState Builder(I->getModule()); | ||||
289 | Builder.addInstruction(I); | ||||
290 | return Builder.build(); | ||||
291 | } | ||||
292 | |||||
293 | void llvm::salvageKnowledge(Instruction *I, AssumptionCache *AC, | ||||
294 | DominatorTree *DT) { | ||||
295 | if (!EnableKnowledgeRetention || I->isTerminator()) | ||||
296 | return; | ||||
297 | AssumeBuilderState Builder(I->getModule(), I, AC, DT); | ||||
298 | Builder.addInstruction(I); | ||||
299 | if (auto *Intr = Builder.build()) { | ||||
300 | Intr->insertBefore(I); | ||||
301 | if (AC) | ||||
302 | AC->registerAssumption(Intr); | ||||
303 | } | ||||
304 | } | ||||
305 | |||||
306 | AssumeInst * | ||||
307 | llvm::buildAssumeFromKnowledge(ArrayRef<RetainedKnowledge> Knowledge, | ||||
308 | Instruction *CtxI, AssumptionCache *AC, | ||||
309 | DominatorTree *DT) { | ||||
310 | AssumeBuilderState Builder(CtxI->getModule(), CtxI, AC, DT); | ||||
311 | for (const RetainedKnowledge &RK : Knowledge) | ||||
312 | Builder.addKnowledge(RK); | ||||
313 | return Builder.build(); | ||||
314 | } | ||||
315 | |||||
316 | RetainedKnowledge llvm::simplifyRetainedKnowledge(AssumeInst *Assume, | ||||
317 | RetainedKnowledge RK, | ||||
318 | AssumptionCache *AC, | ||||
319 | DominatorTree *DT) { | ||||
320 | AssumeBuilderState Builder(Assume->getModule(), Assume, AC, DT); | ||||
321 | RK = canonicalizedKnowledge(RK, Assume->getModule()->getDataLayout()); | ||||
322 | |||||
323 | if (!Builder.isKnowledgeWorthPreserving(RK)) | ||||
324 | return RetainedKnowledge::none(); | ||||
325 | |||||
326 | if (Builder.tryToPreserveWithoutAddingAssume(RK)) | ||||
327 | return RetainedKnowledge::none(); | ||||
328 | return RK; | ||||
329 | } | ||||
330 | |||||
331 | namespace { | ||||
332 | |||||
333 | struct AssumeSimplify { | ||||
334 | Function &F; | ||||
335 | AssumptionCache &AC; | ||||
336 | DominatorTree *DT; | ||||
337 | LLVMContext &C; | ||||
338 | SmallDenseSet<IntrinsicInst *> CleanupToDo; | ||||
339 | StringMapEntry<uint32_t> *IgnoreTag; | ||||
340 | SmallDenseMap<BasicBlock *, SmallVector<IntrinsicInst *, 4>, 8> BBToAssume; | ||||
341 | bool MadeChange = false; | ||||
342 | |||||
343 | AssumeSimplify(Function &F, AssumptionCache &AC, DominatorTree *DT, | ||||
344 | LLVMContext &C) | ||||
345 | : F(F), AC(AC), DT(DT), C(C), | ||||
346 | IgnoreTag(C.getOrInsertBundleTag(IgnoreBundleTag)) {} | ||||
347 | |||||
348 | void buildMapping(bool FilterBooleanArgument) { | ||||
349 | BBToAssume.clear(); | ||||
350 | for (Value *V : AC.assumptions()) { | ||||
351 | if (!V) | ||||
352 | continue; | ||||
353 | IntrinsicInst *Assume = cast<IntrinsicInst>(V); | ||||
354 | if (FilterBooleanArgument) { | ||||
355 | auto *Arg = dyn_cast<ConstantInt>(Assume->getOperand(0)); | ||||
356 | if (!Arg || Arg->isZero()) | ||||
357 | continue; | ||||
358 | } | ||||
359 | BBToAssume[Assume->getParent()].push_back(Assume); | ||||
360 | } | ||||
361 | |||||
362 | for (auto &Elem : BBToAssume) { | ||||
363 | llvm::sort(Elem.second, | ||||
364 | [](const IntrinsicInst *LHS, const IntrinsicInst *RHS) { | ||||
365 | return LHS->comesBefore(RHS); | ||||
366 | }); | ||||
367 | } | ||||
368 | } | ||||
369 | |||||
370 | /// Remove all asumes in CleanupToDo if there boolean argument is true and | ||||
371 | /// ForceCleanup is set or the assume doesn't hold valuable knowledge. | ||||
372 | void RunCleanup(bool ForceCleanup) { | ||||
373 | for (IntrinsicInst *Assume : CleanupToDo) { | ||||
374 | auto *Arg = dyn_cast<ConstantInt>(Assume->getOperand(0)); | ||||
375 | if (!Arg || Arg->isZero() || | ||||
376 | (!ForceCleanup && | ||||
377 | !isAssumeWithEmptyBundle(cast<AssumeInst>(*Assume)))) | ||||
378 | continue; | ||||
379 | MadeChange = true; | ||||
380 | if (ForceCleanup) | ||||
381 | NumAssumesMerged++; | ||||
382 | else | ||||
383 | NumAssumesRemoved++; | ||||
384 | Assume->eraseFromParent(); | ||||
385 | } | ||||
386 | CleanupToDo.clear(); | ||||
387 | } | ||||
388 | |||||
389 | /// Remove knowledge stored in assume when it is already know by an attribute | ||||
390 | /// or an other assume. This can when valid update an existing knowledge in an | ||||
391 | /// attribute or an other assume. | ||||
392 | void dropRedundantKnowledge() { | ||||
393 | struct MapValue { | ||||
394 | IntrinsicInst *Assume; | ||||
395 | unsigned ArgValue; | ||||
396 | CallInst::BundleOpInfo *BOI; | ||||
397 | }; | ||||
398 | buildMapping(false); | ||||
399 | SmallDenseMap<std::pair<Value *, Attribute::AttrKind>, | ||||
400 | SmallVector<MapValue, 2>, 16> | ||||
401 | Knowledge; | ||||
402 | for (BasicBlock *BB : depth_first(&F)) | ||||
403 | for (Value *V : BBToAssume[BB]) { | ||||
404 | if (!V) | ||||
405 | continue; | ||||
406 | IntrinsicInst *Assume = cast<IntrinsicInst>(V); | ||||
407 | for (CallInst::BundleOpInfo &BOI : Assume->bundle_op_infos()) { | ||||
408 | auto RemoveFromAssume = [&]() { | ||||
409 | CleanupToDo.insert(Assume); | ||||
410 | if (BOI.Begin != BOI.End) { | ||||
411 | Use *U = &Assume->op_begin()[BOI.Begin + ABA_WasOn]; | ||||
412 | U->set(UndefValue::get(U->get()->getType())); | ||||
413 | } | ||||
414 | BOI.Tag = IgnoreTag; | ||||
415 | }; | ||||
416 | if (BOI.Tag == IgnoreTag) { | ||||
417 | CleanupToDo.insert(Assume); | ||||
418 | continue; | ||||
419 | } | ||||
420 | RetainedKnowledge RK = | ||||
421 | getKnowledgeFromBundle(cast<AssumeInst>(*Assume), BOI); | ||||
422 | if (auto *Arg = dyn_cast_or_null<Argument>(RK.WasOn)) { | ||||
423 | bool HasSameKindAttr = Arg->hasAttribute(RK.AttrKind); | ||||
424 | if (HasSameKindAttr) | ||||
425 | if (!Attribute::isIntAttrKind(RK.AttrKind) || | ||||
426 | Arg->getAttribute(RK.AttrKind).getValueAsInt() >= | ||||
427 | RK.ArgValue) { | ||||
428 | RemoveFromAssume(); | ||||
429 | continue; | ||||
430 | } | ||||
431 | if (isValidAssumeForContext( | ||||
432 | Assume, &*F.getEntryBlock().getFirstInsertionPt()) || | ||||
433 | Assume == &*F.getEntryBlock().getFirstInsertionPt()) { | ||||
434 | if (HasSameKindAttr) | ||||
435 | Arg->removeAttr(RK.AttrKind); | ||||
436 | Arg->addAttr(Attribute::get(C, RK.AttrKind, RK.ArgValue)); | ||||
437 | MadeChange = true; | ||||
438 | RemoveFromAssume(); | ||||
439 | continue; | ||||
440 | } | ||||
441 | } | ||||
442 | auto &Lookup = Knowledge[{RK.WasOn, RK.AttrKind}]; | ||||
443 | for (MapValue &Elem : Lookup) { | ||||
444 | if (!isValidAssumeForContext(Elem.Assume, Assume, DT)) | ||||
445 | continue; | ||||
446 | if (Elem.ArgValue >= RK.ArgValue) { | ||||
447 | RemoveFromAssume(); | ||||
448 | continue; | ||||
449 | } else if (isValidAssumeForContext(Assume, Elem.Assume, DT)) { | ||||
450 | Elem.Assume->op_begin()[Elem.BOI->Begin + ABA_Argument].set( | ||||
451 | ConstantInt::get(Type::getInt64Ty(C), RK.ArgValue)); | ||||
452 | MadeChange = true; | ||||
453 | RemoveFromAssume(); | ||||
454 | continue; | ||||
455 | } | ||||
456 | } | ||||
457 | Lookup.push_back({Assume, RK.ArgValue, &BOI}); | ||||
458 | } | ||||
459 | } | ||||
460 | } | ||||
461 | |||||
462 | using MergeIterator = SmallVectorImpl<IntrinsicInst *>::iterator; | ||||
463 | |||||
464 | /// Merge all Assumes from Begin to End in and insert the resulting assume as | ||||
465 | /// high as possible in the basicblock. | ||||
466 | void mergeRange(BasicBlock *BB, MergeIterator Begin, MergeIterator End) { | ||||
467 | if (Begin == End || std::next(Begin) == End) | ||||
468 | return; | ||||
469 | /// Provide no additional information so that AssumeBuilderState doesn't | ||||
470 | /// try to do any punning since it already has been done better. | ||||
471 | AssumeBuilderState Builder(F.getParent()); | ||||
472 | |||||
473 | /// For now it is initialized to the best value it could have | ||||
474 | Instruction *InsertPt = BB->getFirstNonPHI(); | ||||
475 | if (isa<LandingPadInst>(InsertPt)) | ||||
476 | InsertPt = InsertPt->getNextNode(); | ||||
477 | for (IntrinsicInst *I : make_range(Begin, End)) { | ||||
478 | CleanupToDo.insert(I); | ||||
479 | for (CallInst::BundleOpInfo &BOI : I->bundle_op_infos()) { | ||||
480 | RetainedKnowledge RK = | ||||
481 | getKnowledgeFromBundle(cast<AssumeInst>(*I), BOI); | ||||
482 | if (!RK) | ||||
483 | continue; | ||||
484 | Builder.addKnowledge(RK); | ||||
485 | if (auto *I = dyn_cast_or_null<Instruction>(RK.WasOn)) | ||||
486 | if (I->getParent() == InsertPt->getParent() && | ||||
487 | (InsertPt->comesBefore(I) || InsertPt == I)) | ||||
488 | InsertPt = I->getNextNode(); | ||||
489 | } | ||||
490 | } | ||||
491 | |||||
492 | /// Adjust InsertPt if it is before Begin, since mergeAssumes only | ||||
493 | /// guarantees we can place the resulting assume between Begin and End. | ||||
494 | if (InsertPt->comesBefore(*Begin)) | ||||
495 | for (auto It = (*Begin)->getIterator(), E = InsertPt->getIterator(); | ||||
496 | It != E; --It) | ||||
497 | if (!isGuaranteedToTransferExecutionToSuccessor(&*It)) { | ||||
498 | InsertPt = It->getNextNode(); | ||||
499 | break; | ||||
500 | } | ||||
501 | auto *MergedAssume = Builder.build(); | ||||
502 | if (!MergedAssume) | ||||
503 | return; | ||||
504 | MadeChange = true; | ||||
505 | MergedAssume->insertBefore(InsertPt); | ||||
506 | AC.registerAssumption(MergedAssume); | ||||
507 | } | ||||
508 | |||||
509 | /// Merge assume when they are in the same BasicBlock and for all instruction | ||||
510 | /// between them isGuaranteedToTransferExecutionToSuccessor returns true. | ||||
511 | void mergeAssumes() { | ||||
512 | buildMapping(true); | ||||
513 | |||||
514 | SmallVector<MergeIterator, 4> SplitPoints; | ||||
515 | for (auto &Elem : BBToAssume) { | ||||
516 | SmallVectorImpl<IntrinsicInst *> &AssumesInBB = Elem.second; | ||||
517 | if (AssumesInBB.size() < 2) | ||||
518 | continue; | ||||
519 | /// AssumesInBB is already sorted by order in the block. | ||||
520 | |||||
521 | BasicBlock::iterator It = AssumesInBB.front()->getIterator(); | ||||
522 | BasicBlock::iterator E = AssumesInBB.back()->getIterator(); | ||||
523 | SplitPoints.push_back(AssumesInBB.begin()); | ||||
524 | MergeIterator LastSplit = AssumesInBB.begin(); | ||||
525 | for (; It != E; ++It) | ||||
526 | if (!isGuaranteedToTransferExecutionToSuccessor(&*It)) { | ||||
527 | for (; (*LastSplit)->comesBefore(&*It); ++LastSplit) | ||||
528 | ; | ||||
529 | if (SplitPoints.back() != LastSplit) | ||||
530 | SplitPoints.push_back(LastSplit); | ||||
531 | } | ||||
532 | SplitPoints.push_back(AssumesInBB.end()); | ||||
533 | for (auto SplitIt = SplitPoints.begin(); | ||||
534 | SplitIt != std::prev(SplitPoints.end()); SplitIt++) { | ||||
535 | mergeRange(Elem.first, *SplitIt, *(SplitIt + 1)); | ||||
536 | } | ||||
537 | SplitPoints.clear(); | ||||
538 | } | ||||
539 | } | ||||
540 | }; | ||||
541 | |||||
542 | bool simplifyAssumes(Function &F, AssumptionCache *AC, DominatorTree *DT) { | ||||
543 | AssumeSimplify AS(F, *AC, DT, F.getContext()); | ||||
544 | |||||
545 | /// Remove knowledge that is already known by a dominating other assume or an | ||||
546 | /// attribute. | ||||
547 | AS.dropRedundantKnowledge(); | ||||
548 | |||||
549 | /// Remove assume that are empty. | ||||
550 | AS.RunCleanup(false); | ||||
551 | |||||
552 | /// Merge assume in the same basicblock when possible. | ||||
553 | AS.mergeAssumes(); | ||||
554 | |||||
555 | /// Remove assume that were merged. | ||||
556 | AS.RunCleanup(true); | ||||
557 | return AS.MadeChange; | ||||
558 | } | ||||
559 | |||||
560 | } // namespace | ||||
561 | |||||
562 | PreservedAnalyses AssumeSimplifyPass::run(Function &F, | ||||
563 | FunctionAnalysisManager &AM) { | ||||
564 | if (!EnableKnowledgeRetention) | ||||
565 | return PreservedAnalyses::all(); | ||||
566 | simplifyAssumes(F, &AM.getResult<AssumptionAnalysis>(F), | ||||
567 | AM.getCachedResult<DominatorTreeAnalysis>(F)); | ||||
568 | return PreservedAnalyses::all(); | ||||
569 | } | ||||
570 | |||||
571 | namespace { | ||||
572 | class AssumeSimplifyPassLegacyPass : public FunctionPass { | ||||
573 | public: | ||||
574 | static char ID; | ||||
575 | |||||
576 | AssumeSimplifyPassLegacyPass() : FunctionPass(ID) { | ||||
577 | initializeAssumeSimplifyPassLegacyPassPass( | ||||
578 | *PassRegistry::getPassRegistry()); | ||||
579 | } | ||||
580 | bool runOnFunction(Function &F) override { | ||||
581 | if (skipFunction(F) || !EnableKnowledgeRetention) | ||||
582 | return false; | ||||
583 | AssumptionCache &AC = | ||||
584 | getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | ||||
585 | DominatorTreeWrapperPass *DTWP = | ||||
586 | getAnalysisIfAvailable<DominatorTreeWrapperPass>(); | ||||
587 | return simplifyAssumes(F, &AC, DTWP ? &DTWP->getDomTree() : nullptr); | ||||
588 | } | ||||
589 | |||||
590 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
591 | AU.addRequired<AssumptionCacheTracker>(); | ||||
592 | |||||
593 | AU.setPreservesAll(); | ||||
594 | } | ||||
595 | }; | ||||
596 | } // namespace | ||||
597 | |||||
598 | char AssumeSimplifyPassLegacyPass::ID = 0; | ||||
599 | |||||
600 | INITIALIZE_PASS_BEGIN(AssumeSimplifyPassLegacyPass, "assume-simplify",static void *initializeAssumeSimplifyPassLegacyPassPassOnce(PassRegistry &Registry) { | ||||
601 | "Assume Simplify", false, false)static void *initializeAssumeSimplifyPassLegacyPassPassOnce(PassRegistry &Registry) { | ||||
602 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | ||||
603 | INITIALIZE_PASS_END(AssumeSimplifyPassLegacyPass, "assume-simplify",PassInfo *PI = new PassInfo( "Assume Simplify", "assume-simplify" , &AssumeSimplifyPassLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<AssumeSimplifyPassLegacyPass>), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeAssumeSimplifyPassLegacyPassPassFlag ; void llvm::initializeAssumeSimplifyPassLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeAssumeSimplifyPassLegacyPassPassFlag , initializeAssumeSimplifyPassLegacyPassPassOnce, std::ref(Registry )); } | ||||
604 | "Assume Simplify", false, false)PassInfo *PI = new PassInfo( "Assume Simplify", "assume-simplify" , &AssumeSimplifyPassLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<AssumeSimplifyPassLegacyPass>), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeAssumeSimplifyPassLegacyPassPassFlag ; void llvm::initializeAssumeSimplifyPassLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeAssumeSimplifyPassLegacyPassPassFlag , initializeAssumeSimplifyPassLegacyPassPassOnce, std::ref(Registry )); } | ||||
605 | |||||
606 | FunctionPass *llvm::createAssumeSimplifyPass() { | ||||
607 | return new AssumeSimplifyPassLegacyPass(); | ||||
608 | } | ||||
609 | |||||
610 | PreservedAnalyses AssumeBuilderPass::run(Function &F, | ||||
611 | FunctionAnalysisManager &AM) { | ||||
612 | AssumptionCache *AC = &AM.getResult<AssumptionAnalysis>(F); | ||||
613 | DominatorTree* DT = AM.getCachedResult<DominatorTreeAnalysis>(F); | ||||
614 | for (Instruction &I : instructions(F)) | ||||
615 | salvageKnowledge(&I, AC, DT); | ||||
616 | return PreservedAnalyses::all(); | ||||
617 | } | ||||
618 | |||||
619 | namespace { | ||||
620 | class AssumeBuilderPassLegacyPass : public FunctionPass { | ||||
621 | public: | ||||
622 | static char ID; | ||||
623 | |||||
624 | AssumeBuilderPassLegacyPass() : FunctionPass(ID) { | ||||
625 | initializeAssumeBuilderPassLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||
626 | } | ||||
627 | bool runOnFunction(Function &F) override { | ||||
628 | AssumptionCache &AC = | ||||
629 | getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | ||||
630 | DominatorTreeWrapperPass *DTWP = | ||||
631 | getAnalysisIfAvailable<DominatorTreeWrapperPass>(); | ||||
632 | for (Instruction &I : instructions(F)) | ||||
633 | salvageKnowledge(&I, &AC, DTWP ? &DTWP->getDomTree() : nullptr); | ||||
634 | return true; | ||||
635 | } | ||||
636 | |||||
637 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
638 | AU.addRequired<AssumptionCacheTracker>(); | ||||
639 | |||||
640 | AU.setPreservesAll(); | ||||
641 | } | ||||
642 | }; | ||||
643 | } // namespace | ||||
644 | |||||
645 | char AssumeBuilderPassLegacyPass::ID = 0; | ||||
646 | |||||
647 | INITIALIZE_PASS_BEGIN(AssumeBuilderPassLegacyPass, "assume-builder",static void *initializeAssumeBuilderPassLegacyPassPassOnce(PassRegistry &Registry) { | ||||
648 | "Assume Builder", false, false)static void *initializeAssumeBuilderPassLegacyPassPassOnce(PassRegistry &Registry) { | ||||
649 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | ||||
650 | INITIALIZE_PASS_END(AssumeBuilderPassLegacyPass, "assume-builder",PassInfo *PI = new PassInfo( "Assume Builder", "assume-builder" , &AssumeBuilderPassLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<AssumeBuilderPassLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeAssumeBuilderPassLegacyPassPassFlag; void llvm::initializeAssumeBuilderPassLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeAssumeBuilderPassLegacyPassPassFlag , initializeAssumeBuilderPassLegacyPassPassOnce, std::ref(Registry )); } | ||||
651 | "Assume Builder", false, false)PassInfo *PI = new PassInfo( "Assume Builder", "assume-builder" , &AssumeBuilderPassLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<AssumeBuilderPassLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeAssumeBuilderPassLegacyPassPassFlag; void llvm::initializeAssumeBuilderPassLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeAssumeBuilderPassLegacyPassPassFlag , initializeAssumeBuilderPassLegacyPassPassOnce, std::ref(Registry )); } |
1 | //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file contains routines that help analyze properties that chains of | |||
10 | // computations have. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #ifndef LLVM_ANALYSIS_VALUETRACKING_H | |||
15 | #define LLVM_ANALYSIS_VALUETRACKING_H | |||
16 | ||||
17 | #include "llvm/ADT/ArrayRef.h" | |||
18 | #include "llvm/ADT/Optional.h" | |||
19 | #include "llvm/ADT/SmallSet.h" | |||
20 | #include "llvm/IR/Constants.h" | |||
21 | #include "llvm/IR/DataLayout.h" | |||
22 | #include "llvm/IR/InstrTypes.h" | |||
23 | #include "llvm/IR/Intrinsics.h" | |||
24 | #include "llvm/IR/Operator.h" | |||
25 | #include <cassert> | |||
26 | #include <cstdint> | |||
27 | ||||
28 | namespace llvm { | |||
29 | ||||
30 | class AddOperator; | |||
31 | class AllocaInst; | |||
32 | class APInt; | |||
33 | class AssumptionCache; | |||
34 | class DominatorTree; | |||
35 | class GEPOperator; | |||
36 | class IntrinsicInst; | |||
37 | class LoadInst; | |||
38 | class WithOverflowInst; | |||
39 | struct KnownBits; | |||
40 | class Loop; | |||
41 | class LoopInfo; | |||
42 | class MDNode; | |||
43 | class OptimizationRemarkEmitter; | |||
44 | class StringRef; | |||
45 | class TargetLibraryInfo; | |||
46 | class Value; | |||
47 | ||||
48 | constexpr unsigned MaxAnalysisRecursionDepth = 6; | |||
49 | ||||
50 | /// Determine which bits of V are known to be either zero or one and return | |||
51 | /// them in the KnownZero/KnownOne bit sets. | |||
52 | /// | |||
53 | /// This function is defined on values with integer type, values with pointer | |||
54 | /// type, and vectors of integers. In the case | |||
55 | /// where V is a vector, the known zero and known one values are the | |||
56 | /// same width as the vector element, and the bit is set only if it is true | |||
57 | /// for all of the elements in the vector. | |||
58 | void computeKnownBits(const Value *V, KnownBits &Known, | |||
59 | const DataLayout &DL, unsigned Depth = 0, | |||
60 | AssumptionCache *AC = nullptr, | |||
61 | const Instruction *CxtI = nullptr, | |||
62 | const DominatorTree *DT = nullptr, | |||
63 | OptimizationRemarkEmitter *ORE = nullptr, | |||
64 | bool UseInstrInfo = true); | |||
65 | ||||
66 | /// Determine which bits of V are known to be either zero or one and return | |||
67 | /// them in the KnownZero/KnownOne bit sets. | |||
68 | /// | |||
69 | /// This function is defined on values with integer type, values with pointer | |||
70 | /// type, and vectors of integers. In the case | |||
71 | /// where V is a vector, the known zero and known one values are the | |||
72 | /// same width as the vector element, and the bit is set only if it is true | |||
73 | /// for all of the demanded elements in the vector. | |||
74 | void computeKnownBits(const Value *V, const APInt &DemandedElts, | |||
75 | KnownBits &Known, const DataLayout &DL, | |||
76 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | |||
77 | const Instruction *CxtI = nullptr, | |||
78 | const DominatorTree *DT = nullptr, | |||
79 | OptimizationRemarkEmitter *ORE = nullptr, | |||
80 | bool UseInstrInfo = true); | |||
81 | ||||
82 | /// Returns the known bits rather than passing by reference. | |||
83 | KnownBits computeKnownBits(const Value *V, const DataLayout &DL, | |||
84 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | |||
85 | const Instruction *CxtI = nullptr, | |||
86 | const DominatorTree *DT = nullptr, | |||
87 | OptimizationRemarkEmitter *ORE = nullptr, | |||
88 | bool UseInstrInfo = true); | |||
89 | ||||
90 | /// Returns the known bits rather than passing by reference. | |||
91 | KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, | |||
92 | const DataLayout &DL, unsigned Depth = 0, | |||
93 | AssumptionCache *AC = nullptr, | |||
94 | const Instruction *CxtI = nullptr, | |||
95 | const DominatorTree *DT = nullptr, | |||
96 | OptimizationRemarkEmitter *ORE = nullptr, | |||
97 | bool UseInstrInfo = true); | |||
98 | ||||
99 | /// Compute known bits from the range metadata. | |||
100 | /// \p KnownZero the set of bits that are known to be zero | |||
101 | /// \p KnownOne the set of bits that are known to be one | |||
102 | void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, | |||
103 | KnownBits &Known); | |||
104 | ||||
105 | /// Return true if LHS and RHS have no common bits set. | |||
106 | bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, | |||
107 | const DataLayout &DL, | |||
108 | AssumptionCache *AC = nullptr, | |||
109 | const Instruction *CxtI = nullptr, | |||
110 | const DominatorTree *DT = nullptr, | |||
111 | bool UseInstrInfo = true); | |||
112 | ||||
113 | /// Return true if the given value is known to have exactly one bit set when | |||
114 | /// defined. For vectors return true if every element is known to be a power | |||
115 | /// of two when defined. Supports values with integer or pointer type and | |||
116 | /// vectors of integers. If 'OrZero' is set, then return true if the given | |||
117 | /// value is either a power of two or zero. | |||
118 | bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, | |||
119 | bool OrZero = false, unsigned Depth = 0, | |||
120 | AssumptionCache *AC = nullptr, | |||
121 | const Instruction *CxtI = nullptr, | |||
122 | const DominatorTree *DT = nullptr, | |||
123 | bool UseInstrInfo = true); | |||
124 | ||||
125 | bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI); | |||
126 | ||||
127 | /// Return true if the given value is known to be non-zero when defined. For | |||
128 | /// vectors, return true if every element is known to be non-zero when | |||
129 | /// defined. For pointers, if the context instruction and dominator tree are | |||
130 | /// specified, perform context-sensitive analysis and return true if the | |||
131 | /// pointer couldn't possibly be null at the specified instruction. | |||
132 | /// Supports values with integer or pointer type and vectors of integers. | |||
133 | bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0, | |||
134 | AssumptionCache *AC = nullptr, | |||
135 | const Instruction *CxtI = nullptr, | |||
136 | const DominatorTree *DT = nullptr, | |||
137 | bool UseInstrInfo = true); | |||
138 | ||||
139 | /// Return true if the two given values are negation. | |||
140 | /// Currently can recoginze Value pair: | |||
141 | /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X) | |||
142 | /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A) | |||
143 | bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false); | |||
144 | ||||
145 | /// Returns true if the give value is known to be non-negative. | |||
146 | bool isKnownNonNegative(const Value *V, const DataLayout &DL, | |||
147 | unsigned Depth = 0, | |||
148 | AssumptionCache *AC = nullptr, | |||
149 | const Instruction *CxtI = nullptr, | |||
150 | const DominatorTree *DT = nullptr, | |||
151 | bool UseInstrInfo = true); | |||
152 | ||||
153 | /// Returns true if the given value is known be positive (i.e. non-negative | |||
154 | /// and non-zero). | |||
155 | bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0, | |||
156 | AssumptionCache *AC = nullptr, | |||
157 | const Instruction *CxtI = nullptr, | |||
158 | const DominatorTree *DT = nullptr, | |||
159 | bool UseInstrInfo = true); | |||
160 | ||||
161 | /// Returns true if the given value is known be negative (i.e. non-positive | |||
162 | /// and non-zero). | |||
163 | bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0, | |||
164 | AssumptionCache *AC = nullptr, | |||
165 | const Instruction *CxtI = nullptr, | |||
166 | const DominatorTree *DT = nullptr, | |||
167 | bool UseInstrInfo = true); | |||
168 | ||||
169 | /// Return true if the given values are known to be non-equal when defined. | |||
170 | /// Supports scalar integer types only. | |||
171 | bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, | |||
172 | AssumptionCache *AC = nullptr, | |||
173 | const Instruction *CxtI = nullptr, | |||
174 | const DominatorTree *DT = nullptr, | |||
175 | bool UseInstrInfo = true); | |||
176 | ||||
177 | /// Return true if 'V & Mask' is known to be zero. We use this predicate to | |||
178 | /// simplify operations downstream. Mask is known to be zero for bits that V | |||
179 | /// cannot have. | |||
180 | /// | |||
181 | /// This function is defined on values with integer type, values with pointer | |||
182 | /// type, and vectors of integers. In the case | |||
183 | /// where V is a vector, the mask, known zero, and known one values are the | |||
184 | /// same width as the vector element, and the bit is set only if it is true | |||
185 | /// for all of the elements in the vector. | |||
186 | bool MaskedValueIsZero(const Value *V, const APInt &Mask, | |||
187 | const DataLayout &DL, | |||
188 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | |||
189 | const Instruction *CxtI = nullptr, | |||
190 | const DominatorTree *DT = nullptr, | |||
191 | bool UseInstrInfo = true); | |||
192 | ||||
193 | /// Return the number of times the sign bit of the register is replicated into | |||
194 | /// the other bits. We know that at least 1 bit is always equal to the sign | |||
195 | /// bit (itself), but other cases can give us information. For example, | |||
196 | /// immediately after an "ashr X, 2", we know that the top 3 bits are all | |||
197 | /// equal to each other, so we return 3. For vectors, return the number of | |||
198 | /// sign bits for the vector element with the mininum number of known sign | |||
199 | /// bits. | |||
200 | unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, | |||
201 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | |||
202 | const Instruction *CxtI = nullptr, | |||
203 | const DominatorTree *DT = nullptr, | |||
204 | bool UseInstrInfo = true); | |||
205 | ||||
206 | /// This function computes the integer multiple of Base that equals V. If | |||
207 | /// successful, it returns true and returns the multiple in Multiple. If | |||
208 | /// unsuccessful, it returns false. Also, if V can be simplified to an | |||
209 | /// integer, then the simplified V is returned in Val. Look through sext only | |||
210 | /// if LookThroughSExt=true. | |||
211 | bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, | |||
212 | bool LookThroughSExt = false, | |||
213 | unsigned Depth = 0); | |||
214 | ||||
215 | /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent | |||
216 | /// intrinsics are treated as-if they were intrinsics. | |||
217 | Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, | |||
218 | const TargetLibraryInfo *TLI); | |||
219 | ||||
220 | /// Return true if we can prove that the specified FP value is never equal to | |||
221 | /// -0.0. | |||
222 | bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, | |||
223 | unsigned Depth = 0); | |||
224 | ||||
225 | /// Return true if we can prove that the specified FP value is either NaN or | |||
226 | /// never less than -0.0. | |||
227 | /// | |||
228 | /// NaN --> true | |||
229 | /// +0 --> true | |||
230 | /// -0 --> true | |||
231 | /// x > +0 --> true | |||
232 | /// x < -0 --> false | |||
233 | bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI); | |||
234 | ||||
235 | /// Return true if the floating-point scalar value is not an infinity or if | |||
236 | /// the floating-point vector value has no infinities. Return false if a value | |||
237 | /// could ever be infinity. | |||
238 | bool isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, | |||
239 | unsigned Depth = 0); | |||
240 | ||||
241 | /// Return true if the floating-point scalar value is not a NaN or if the | |||
242 | /// floating-point vector value has no NaN elements. Return false if a value | |||
243 | /// could ever be NaN. | |||
244 | bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, | |||
245 | unsigned Depth = 0); | |||
246 | ||||
247 | /// Return true if we can prove that the specified FP value's sign bit is 0. | |||
248 | /// | |||
249 | /// NaN --> true/false (depending on the NaN's sign bit) | |||
250 | /// +0 --> true | |||
251 | /// -0 --> false | |||
252 | /// x > +0 --> true | |||
253 | /// x < -0 --> false | |||
254 | bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI); | |||
255 | ||||
256 | /// If the specified value can be set by repeating the same byte in memory, | |||
257 | /// return the i8 value that it is represented with. This is true for all i8 | |||
258 | /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double | |||
259 | /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g. | |||
260 | /// i16 0x1234), return null. If the value is entirely undef and padding, | |||
261 | /// return undef. | |||
262 | Value *isBytewiseValue(Value *V, const DataLayout &DL); | |||
263 | ||||
264 | /// Given an aggregate and an sequence of indices, see if the scalar value | |||
265 | /// indexed is already around as a register, for example if it were inserted | |||
266 | /// directly into the aggregate. | |||
267 | /// | |||
268 | /// If InsertBefore is not null, this function will duplicate (modified) | |||
269 | /// insertvalues when a part of a nested struct is extracted. | |||
270 | Value *FindInsertedValue(Value *V, | |||
271 | ArrayRef<unsigned> idx_range, | |||
272 | Instruction *InsertBefore = nullptr); | |||
273 | ||||
274 | /// Analyze the specified pointer to see if it can be expressed as a base | |||
275 | /// pointer plus a constant offset. Return the base and offset to the caller. | |||
276 | /// | |||
277 | /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that | |||
278 | /// creates and later unpacks the required APInt. | |||
279 | inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, | |||
280 | const DataLayout &DL, | |||
281 | bool AllowNonInbounds = true) { | |||
282 | APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); | |||
| ||||
283 | Value *Base = | |||
284 | Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds); | |||
285 | ||||
286 | Offset = OffsetAPInt.getSExtValue(); | |||
287 | return Base; | |||
288 | } | |||
289 | inline const Value * | |||
290 | GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset, | |||
291 | const DataLayout &DL, | |||
292 | bool AllowNonInbounds = true) { | |||
293 | return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL, | |||
294 | AllowNonInbounds); | |||
295 | } | |||
296 | ||||
297 | /// Returns true if the GEP is based on a pointer to a string (array of | |||
298 | // \p CharSize integers) and is indexing into this string. | |||
299 | bool isGEPBasedOnPointerToString(const GEPOperator *GEP, | |||
300 | unsigned CharSize = 8); | |||
301 | ||||
302 | /// Represents offset+length into a ConstantDataArray. | |||
303 | struct ConstantDataArraySlice { | |||
304 | /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid | |||
305 | /// initializer, it just doesn't fit the ConstantDataArray interface). | |||
306 | const ConstantDataArray *Array; | |||
307 | ||||
308 | /// Slice starts at this Offset. | |||
309 | uint64_t Offset; | |||
310 | ||||
311 | /// Length of the slice. | |||
312 | uint64_t Length; | |||
313 | ||||
314 | /// Moves the Offset and adjusts Length accordingly. | |||
315 | void move(uint64_t Delta) { | |||
316 | assert(Delta < Length)((void)0); | |||
317 | Offset += Delta; | |||
318 | Length -= Delta; | |||
319 | } | |||
320 | ||||
321 | /// Convenience accessor for elements in the slice. | |||
322 | uint64_t operator[](unsigned I) const { | |||
323 | return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset); | |||
324 | } | |||
325 | }; | |||
326 | ||||
327 | /// Returns true if the value \p V is a pointer into a ConstantDataArray. | |||
328 | /// If successful \p Slice will point to a ConstantDataArray info object | |||
329 | /// with an appropriate offset. | |||
330 | bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, | |||
331 | unsigned ElementSize, uint64_t Offset = 0); | |||
332 | ||||
333 | /// This function computes the length of a null-terminated C string pointed to | |||
334 | /// by V. If successful, it returns true and returns the string in Str. If | |||
335 | /// unsuccessful, it returns false. This does not include the trailing null | |||
336 | /// character by default. If TrimAtNul is set to false, then this returns any | |||
337 | /// trailing null characters as well as any other characters that come after | |||
338 | /// it. | |||
339 | bool getConstantStringInfo(const Value *V, StringRef &Str, | |||
340 | uint64_t Offset = 0, bool TrimAtNul = true); | |||
341 | ||||
342 | /// If we can compute the length of the string pointed to by the specified | |||
343 | /// pointer, return 'len+1'. If we can't, return 0. | |||
344 | uint64_t GetStringLength(const Value *V, unsigned CharSize = 8); | |||
345 | ||||
346 | /// This function returns call pointer argument that is considered the same by | |||
347 | /// aliasing rules. You CAN'T use it to replace one value with another. If | |||
348 | /// \p MustPreserveNullness is true, the call must preserve the nullness of | |||
349 | /// the pointer. | |||
350 | const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call, | |||
351 | bool MustPreserveNullness); | |||
352 | inline Value * | |||
353 | getArgumentAliasingToReturnedPointer(CallBase *Call, | |||
354 | bool MustPreserveNullness) { | |||
355 | return const_cast<Value *>(getArgumentAliasingToReturnedPointer( | |||
356 | const_cast<const CallBase *>(Call), MustPreserveNullness)); | |||
357 | } | |||
358 | ||||
359 | /// {launder,strip}.invariant.group returns pointer that aliases its argument, | |||
360 | /// and it only captures pointer by returning it. | |||
361 | /// These intrinsics are not marked as nocapture, because returning is | |||
362 | /// considered as capture. The arguments are not marked as returned neither, | |||
363 | /// because it would make it useless. If \p MustPreserveNullness is true, | |||
364 | /// the intrinsic must preserve the nullness of the pointer. | |||
365 | bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( | |||
366 | const CallBase *Call, bool MustPreserveNullness); | |||
367 | ||||
368 | /// This method strips off any GEP address adjustments and pointer casts from | |||
369 | /// the specified value, returning the original object being addressed. Note | |||
370 | /// that the returned value has pointer type if the specified value does. If | |||
371 | /// the MaxLookup value is non-zero, it limits the number of instructions to | |||
372 | /// be stripped off. | |||
373 | const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6); | |||
374 | inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) { | |||
375 | // Force const to avoid infinite recursion. | |||
376 | const Value *VConst = V; | |||
377 | return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup)); | |||
378 | } | |||
379 | ||||
380 | /// This method is similar to getUnderlyingObject except that it can | |||
381 | /// look through phi and select instructions and return multiple objects. | |||
382 | /// | |||
383 | /// If LoopInfo is passed, loop phis are further analyzed. If a pointer | |||
384 | /// accesses different objects in each iteration, we don't look through the | |||
385 | /// phi node. E.g. consider this loop nest: | |||
386 | /// | |||
387 | /// int **A; | |||
388 | /// for (i) | |||
389 | /// for (j) { | |||
390 | /// A[i][j] = A[i-1][j] * B[j] | |||
391 | /// } | |||
392 | /// | |||
393 | /// This is transformed by Load-PRE to stash away A[i] for the next iteration | |||
394 | /// of the outer loop: | |||
395 | /// | |||
396 | /// Curr = A[0]; // Prev_0 | |||
397 | /// for (i: 1..N) { | |||
398 | /// Prev = Curr; // Prev = PHI (Prev_0, Curr) | |||
399 | /// Curr = A[i]; | |||
400 | /// for (j: 0..N) { | |||
401 | /// Curr[j] = Prev[j] * B[j] | |||
402 | /// } | |||
403 | /// } | |||
404 | /// | |||
405 | /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects | |||
406 | /// should not assume that Curr and Prev share the same underlying object thus | |||
407 | /// it shouldn't look through the phi above. | |||
408 | void getUnderlyingObjects(const Value *V, | |||
409 | SmallVectorImpl<const Value *> &Objects, | |||
410 | LoopInfo *LI = nullptr, unsigned MaxLookup = 6); | |||
411 | ||||
412 | /// This is a wrapper around getUnderlyingObjects and adds support for basic | |||
413 | /// ptrtoint+arithmetic+inttoptr sequences. | |||
414 | bool getUnderlyingObjectsForCodeGen(const Value *V, | |||
415 | SmallVectorImpl<Value *> &Objects); | |||
416 | ||||
417 | /// Returns unique alloca where the value comes from, or nullptr. | |||
418 | /// If OffsetZero is true check that V points to the begining of the alloca. | |||
419 | AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false); | |||
420 | inline const AllocaInst *findAllocaForValue(const Value *V, | |||
421 | bool OffsetZero = false) { | |||
422 | return findAllocaForValue(const_cast<Value *>(V), OffsetZero); | |||
423 | } | |||
424 | ||||
425 | /// Return true if the only users of this pointer are lifetime markers. | |||
426 | bool onlyUsedByLifetimeMarkers(const Value *V); | |||
427 | ||||
428 | /// Return true if the only users of this pointer are lifetime markers or | |||
429 | /// droppable instructions. | |||
430 | bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V); | |||
431 | ||||
432 | /// Return true if speculation of the given load must be suppressed to avoid | |||
433 | /// ordering or interfering with an active sanitizer. If not suppressed, | |||
434 | /// dereferenceability and alignment must be proven separately. Note: This | |||
435 | /// is only needed for raw reasoning; if you use the interface below | |||
436 | /// (isSafeToSpeculativelyExecute), this is handled internally. | |||
437 | bool mustSuppressSpeculation(const LoadInst &LI); | |||
438 | ||||
439 | /// Return true if the instruction does not have any effects besides | |||
440 | /// calculating the result and does not have undefined behavior. | |||
441 | /// | |||
442 | /// This method never returns true for an instruction that returns true for | |||
443 | /// mayHaveSideEffects; however, this method also does some other checks in | |||
444 | /// addition. It checks for undefined behavior, like dividing by zero or | |||
445 | /// loading from an invalid pointer (but not for undefined results, like a | |||
446 | /// shift with a shift amount larger than the width of the result). It checks | |||
447 | /// for malloc and alloca because speculatively executing them might cause a | |||
448 | /// memory leak. It also returns false for instructions related to control | |||
449 | /// flow, specifically terminators and PHI nodes. | |||
450 | /// | |||
451 | /// If the CtxI is specified this method performs context-sensitive analysis | |||
452 | /// and returns true if it is safe to execute the instruction immediately | |||
453 | /// before the CtxI. | |||
454 | /// | |||
455 | /// If the CtxI is NOT specified this method only looks at the instruction | |||
456 | /// itself and its operands, so if this method returns true, it is safe to | |||
457 | /// move the instruction as long as the correct dominance relationships for | |||
458 | /// the operands and users hold. | |||
459 | /// | |||
460 | /// This method can return true for instructions that read memory; | |||
461 | /// for such instructions, moving them may change the resulting value. | |||
462 | bool isSafeToSpeculativelyExecute(const Value *V, | |||
463 | const Instruction *CtxI = nullptr, | |||
464 | const DominatorTree *DT = nullptr, | |||
465 | const TargetLibraryInfo *TLI = nullptr); | |||
466 | ||||
467 | /// Returns true if the result or effects of the given instructions \p I | |||
468 | /// depend on or influence global memory. | |||
469 | /// Memory dependence arises for example if the instruction reads from | |||
470 | /// memory or may produce effects or undefined behaviour. Memory dependent | |||
471 | /// instructions generally cannot be reorderd with respect to other memory | |||
472 | /// dependent instructions or moved into non-dominated basic blocks. | |||
473 | /// Instructions which just compute a value based on the values of their | |||
474 | /// operands are not memory dependent. | |||
475 | bool mayBeMemoryDependent(const Instruction &I); | |||
476 | ||||
477 | /// Return true if it is an intrinsic that cannot be speculated but also | |||
478 | /// cannot trap. | |||
479 | bool isAssumeLikeIntrinsic(const Instruction *I); | |||
480 | ||||
481 | /// Return true if it is valid to use the assumptions provided by an | |||
482 | /// assume intrinsic, I, at the point in the control-flow identified by the | |||
483 | /// context instruction, CxtI. | |||
484 | bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, | |||
485 | const DominatorTree *DT = nullptr); | |||
486 | ||||
487 | enum class OverflowResult { | |||
488 | /// Always overflows in the direction of signed/unsigned min value. | |||
489 | AlwaysOverflowsLow, | |||
490 | /// Always overflows in the direction of signed/unsigned max value. | |||
491 | AlwaysOverflowsHigh, | |||
492 | /// May or may not overflow. | |||
493 | MayOverflow, | |||
494 | /// Never overflows. | |||
495 | NeverOverflows, | |||
496 | }; | |||
497 | ||||
498 | OverflowResult computeOverflowForUnsignedMul(const Value *LHS, | |||
499 | const Value *RHS, | |||
500 | const DataLayout &DL, | |||
501 | AssumptionCache *AC, | |||
502 | const Instruction *CxtI, | |||
503 | const DominatorTree *DT, | |||
504 | bool UseInstrInfo = true); | |||
505 | OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, | |||
506 | const DataLayout &DL, | |||
507 | AssumptionCache *AC, | |||
508 | const Instruction *CxtI, | |||
509 | const DominatorTree *DT, | |||
510 | bool UseInstrInfo = true); | |||
511 | OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, | |||
512 | const Value *RHS, | |||
513 | const DataLayout &DL, | |||
514 | AssumptionCache *AC, | |||
515 | const Instruction *CxtI, | |||
516 | const DominatorTree *DT, | |||
517 | bool UseInstrInfo = true); | |||
518 | OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS, | |||
519 | const DataLayout &DL, | |||
520 | AssumptionCache *AC = nullptr, | |||
521 | const Instruction *CxtI = nullptr, | |||
522 | const DominatorTree *DT = nullptr); | |||
523 | /// This version also leverages the sign bit of Add if known. | |||
524 | OverflowResult computeOverflowForSignedAdd(const AddOperator *Add, | |||
525 | const DataLayout &DL, | |||
526 | AssumptionCache *AC = nullptr, | |||
527 | const Instruction *CxtI = nullptr, | |||
528 | const DominatorTree *DT = nullptr); | |||
529 | OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, | |||
530 | const DataLayout &DL, | |||
531 | AssumptionCache *AC, | |||
532 | const Instruction *CxtI, | |||
533 | const DominatorTree *DT); | |||
534 | OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, | |||
535 | const DataLayout &DL, | |||
536 | AssumptionCache *AC, | |||
537 | const Instruction *CxtI, | |||
538 | const DominatorTree *DT); | |||
539 | ||||
540 | /// Returns true if the arithmetic part of the \p WO 's result is | |||
541 | /// used only along the paths control dependent on the computation | |||
542 | /// not overflowing, \p WO being an <op>.with.overflow intrinsic. | |||
543 | bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, | |||
544 | const DominatorTree &DT); | |||
545 | ||||
546 | ||||
547 | /// Determine the possible constant range of an integer or vector of integer | |||
548 | /// value. This is intended as a cheap, non-recursive check. | |||
549 | ConstantRange computeConstantRange(const Value *V, bool UseInstrInfo = true, | |||
550 | AssumptionCache *AC = nullptr, | |||
551 | const Instruction *CtxI = nullptr, | |||
552 | unsigned Depth = 0); | |||
553 | ||||
554 | /// Return true if this function can prove that the instruction I will | |||
555 | /// always transfer execution to one of its successors (including the next | |||
556 | /// instruction that follows within a basic block). E.g. this is not | |||
557 | /// guaranteed for function calls that could loop infinitely. | |||
558 | /// | |||
559 | /// In other words, this function returns false for instructions that may | |||
560 | /// transfer execution or fail to transfer execution in a way that is not | |||
561 | /// captured in the CFG nor in the sequence of instructions within a basic | |||
562 | /// block. | |||
563 | /// | |||
564 | /// Undefined behavior is assumed not to happen, so e.g. division is | |||
565 | /// guaranteed to transfer execution to the following instruction even | |||
566 | /// though division by zero might cause undefined behavior. | |||
567 | bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I); | |||
568 | ||||
569 | /// Returns true if this block does not contain a potential implicit exit. | |||
570 | /// This is equivelent to saying that all instructions within the basic block | |||
571 | /// are guaranteed to transfer execution to their successor within the basic | |||
572 | /// block. This has the same assumptions w.r.t. undefined behavior as the | |||
573 | /// instruction variant of this function. | |||
574 | bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB); | |||
575 | ||||
576 | /// Return true if this function can prove that the instruction I | |||
577 | /// is executed for every iteration of the loop L. | |||
578 | /// | |||
579 | /// Note that this currently only considers the loop header. | |||
580 | bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, | |||
581 | const Loop *L); | |||
582 | ||||
583 | /// Return true if I yields poison or raises UB if any of its operands is | |||
584 | /// poison. | |||
585 | /// Formally, given I = `r = op v1 v2 .. vN`, propagatesPoison returns true | |||
586 | /// if, for all i, r is evaluated to poison or op raises UB if vi = poison. | |||
587 | /// If vi is a vector or an aggregate and r is a single value, any poison | |||
588 | /// element in vi should make r poison or raise UB. | |||
589 | /// To filter out operands that raise UB on poison, you can use | |||
590 | /// getGuaranteedNonPoisonOp. | |||
591 | bool propagatesPoison(const Operator *I); | |||
592 | ||||
593 | /// Insert operands of I into Ops such that I will trigger undefined behavior | |||
594 | /// if I is executed and that operand has a poison value. | |||
595 | void getGuaranteedNonPoisonOps(const Instruction *I, | |||
596 | SmallPtrSetImpl<const Value *> &Ops); | |||
597 | /// Insert operands of I into Ops such that I will trigger undefined behavior | |||
598 | /// if I is executed and that operand is not a well-defined value | |||
599 | /// (i.e. has undef bits or poison). | |||
600 | void getGuaranteedWellDefinedOps(const Instruction *I, | |||
601 | SmallPtrSetImpl<const Value *> &Ops); | |||
602 | ||||
603 | /// Return true if the given instruction must trigger undefined behavior | |||
604 | /// when I is executed with any operands which appear in KnownPoison holding | |||
605 | /// a poison value at the point of execution. | |||
606 | bool mustTriggerUB(const Instruction *I, | |||
607 | const SmallSet<const Value *, 16>& KnownPoison); | |||
608 | ||||
609 | /// Return true if this function can prove that if Inst is executed | |||
610 | /// and yields a poison value or undef bits, then that will trigger | |||
611 | /// undefined behavior. | |||
612 | /// | |||
613 | /// Note that this currently only considers the basic block that is | |||
614 | /// the parent of Inst. | |||
615 | bool programUndefinedIfUndefOrPoison(const Instruction *Inst); | |||
616 | bool programUndefinedIfPoison(const Instruction *Inst); | |||
617 | ||||
618 | /// canCreateUndefOrPoison returns true if Op can create undef or poison from | |||
619 | /// non-undef & non-poison operands. | |||
620 | /// For vectors, canCreateUndefOrPoison returns true if there is potential | |||
621 | /// poison or undef in any element of the result when vectors without | |||
622 | /// undef/poison poison are given as operands. | |||
623 | /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns | |||
624 | /// true. If Op raises immediate UB but never creates poison or undef | |||
625 | /// (e.g. sdiv I, 0), canCreatePoison returns false. | |||
626 | /// | |||
627 | /// canCreatePoison returns true if Op can create poison from non-poison | |||
628 | /// operands. | |||
629 | bool canCreateUndefOrPoison(const Operator *Op); | |||
630 | bool canCreatePoison(const Operator *Op); | |||
631 | ||||
632 | /// Return true if V is poison given that ValAssumedPoison is already poison. | |||
633 | /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`, | |||
634 | /// impliesPoison returns true. | |||
635 | bool impliesPoison(const Value *ValAssumedPoison, const Value *V); | |||
636 | ||||
637 | /// Return true if this function can prove that V does not have undef bits | |||
638 | /// and is never poison. If V is an aggregate value or vector, check whether | |||
639 | /// all elements (except padding) are not undef or poison. | |||
640 | /// Note that this is different from canCreateUndefOrPoison because the | |||
641 | /// function assumes Op's operands are not poison/undef. | |||
642 | /// | |||
643 | /// If CtxI and DT are specified this method performs flow-sensitive analysis | |||
644 | /// and returns true if it is guaranteed to be never undef or poison | |||
645 | /// immediately before the CtxI. | |||
646 | bool isGuaranteedNotToBeUndefOrPoison(const Value *V, | |||
647 | AssumptionCache *AC = nullptr, | |||
648 | const Instruction *CtxI = nullptr, | |||
649 | const DominatorTree *DT = nullptr, | |||
650 | unsigned Depth = 0); | |||
651 | bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr, | |||
652 | const Instruction *CtxI = nullptr, | |||
653 | const DominatorTree *DT = nullptr, | |||
654 | unsigned Depth = 0); | |||
655 | ||||
656 | /// Specific patterns of select instructions we can match. | |||
657 | enum SelectPatternFlavor { | |||
658 | SPF_UNKNOWN = 0, | |||
659 | SPF_SMIN, /// Signed minimum | |||
660 | SPF_UMIN, /// Unsigned minimum | |||
661 | SPF_SMAX, /// Signed maximum | |||
662 | SPF_UMAX, /// Unsigned maximum | |||
663 | SPF_FMINNUM, /// Floating point minnum | |||
664 | SPF_FMAXNUM, /// Floating point maxnum | |||
665 | SPF_ABS, /// Absolute value | |||
666 | SPF_NABS /// Negated absolute value | |||
667 | }; | |||
668 | ||||
669 | /// Behavior when a floating point min/max is given one NaN and one | |||
670 | /// non-NaN as input. | |||
671 | enum SelectPatternNaNBehavior { | |||
672 | SPNB_NA = 0, /// NaN behavior not applicable. | |||
673 | SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN. | |||
674 | SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN. | |||
675 | SPNB_RETURNS_ANY /// Given one NaN input, can return either (or | |||
676 | /// it has been determined that no operands can | |||
677 | /// be NaN). | |||
678 | }; | |||
679 | ||||
680 | struct SelectPatternResult { | |||
681 | SelectPatternFlavor Flavor; | |||
682 | SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is | |||
683 | /// SPF_FMINNUM or SPF_FMAXNUM. | |||
684 | bool Ordered; /// When implementing this min/max pattern as | |||
685 | /// fcmp; select, does the fcmp have to be | |||
686 | /// ordered? | |||
687 | ||||
688 | /// Return true if \p SPF is a min or a max pattern. | |||
689 | static bool isMinOrMax(SelectPatternFlavor SPF) { | |||
690 | return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS; | |||
691 | } | |||
692 | }; | |||
693 | ||||
694 | /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind | |||
695 | /// and providing the out parameter results if we successfully match. | |||
696 | /// | |||
697 | /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be | |||
698 | /// the negation instruction from the idiom. | |||
699 | /// | |||
700 | /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does | |||
701 | /// not match that of the original select. If this is the case, the cast | |||
702 | /// operation (one of Trunc,SExt,Zext) that must be done to transform the | |||
703 | /// type of LHS and RHS into the type of V is returned in CastOp. | |||
704 | /// | |||
705 | /// For example: | |||
706 | /// %1 = icmp slt i32 %a, i32 4 | |||
707 | /// %2 = sext i32 %a to i64 | |||
708 | /// %3 = select i1 %1, i64 %2, i64 4 | |||
709 | /// | |||
710 | /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt | |||
711 | /// | |||
712 | SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, | |||
713 | Instruction::CastOps *CastOp = nullptr, | |||
714 | unsigned Depth = 0); | |||
715 | ||||
716 | inline SelectPatternResult | |||
717 | matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS) { | |||
718 | Value *L = const_cast<Value *>(LHS); | |||
719 | Value *R = const_cast<Value *>(RHS); | |||
720 | auto Result = matchSelectPattern(const_cast<Value *>(V), L, R); | |||
721 | LHS = L; | |||
722 | RHS = R; | |||
723 | return Result; | |||
724 | } | |||
725 | ||||
726 | /// Determine the pattern that a select with the given compare as its | |||
727 | /// predicate and given values as its true/false operands would match. | |||
728 | SelectPatternResult matchDecomposedSelectPattern( | |||
729 | CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, | |||
730 | Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0); | |||
731 | ||||
732 | /// Return the canonical comparison predicate for the specified | |||
733 | /// minimum/maximum flavor. | |||
734 | CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, | |||
735 | bool Ordered = false); | |||
736 | ||||
737 | /// Return the inverse minimum/maximum flavor of the specified flavor. | |||
738 | /// For example, signed minimum is the inverse of signed maximum. | |||
739 | SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF); | |||
740 | ||||
741 | Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID); | |||
742 | ||||
743 | /// Return the canonical inverse comparison predicate for the specified | |||
744 | /// minimum/maximum flavor. | |||
745 | CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF); | |||
746 | ||||
747 | /// Return the minimum or maximum constant value for the specified integer | |||
748 | /// min/max flavor and type. | |||
749 | APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth); | |||
750 | ||||
751 | /// Check if the values in \p VL are select instructions that can be converted | |||
752 | /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a | |||
753 | /// conversion is possible, together with a bool indicating whether all select | |||
754 | /// conditions are only used by the selects. Otherwise return | |||
755 | /// Intrinsic::not_intrinsic. | |||
756 | std::pair<Intrinsic::ID, bool> | |||
757 | canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL); | |||
758 | ||||
759 | /// Attempt to match a simple first order recurrence cycle of the form: | |||
760 | /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge] | |||
761 | /// %inc = binop %iv, %step | |||
762 | /// OR | |||
763 | /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge] | |||
764 | /// %inc = binop %step, %iv | |||
765 | /// | |||
766 | /// A first order recurrence is a formula with the form: X_n = f(X_(n-1)) | |||
767 | /// | |||
768 | /// A couple of notes on subtleties in that definition: | |||
769 | /// * The Step does not have to be loop invariant. In math terms, it can | |||
770 | /// be a free variable. We allow recurrences with both constant and | |||
771 | /// variable coefficients. Callers may wish to filter cases where Step | |||
772 | /// does not dominate P. | |||
773 | /// * For non-commutative operators, we will match both forms. This | |||
774 | /// results in some odd recurrence structures. Callers may wish to filter | |||
775 | /// out recurrences where the phi is not the LHS of the returned operator. | |||
776 | /// * Because of the structure matched, the caller can assume as a post | |||
777 | /// condition of the match the presence of a Loop with P's parent as it's | |||
778 | /// header *except* in unreachable code. (Dominance decays in unreachable | |||
779 | /// code.) | |||
780 | /// | |||
781 | /// NOTE: This is intentional simple. If you want the ability to analyze | |||
782 | /// non-trivial loop conditons, see ScalarEvolution instead. | |||
783 | bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, | |||
784 | Value *&Start, Value *&Step); | |||
785 | ||||
786 | /// Analogous to the above, but starting from the binary operator | |||
787 | bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, | |||
788 | Value *&Start, Value *&Step); | |||
789 | ||||
790 | /// Return true if RHS is known to be implied true by LHS. Return false if | |||
791 | /// RHS is known to be implied false by LHS. Otherwise, return None if no | |||
792 | /// implication can be made. | |||
793 | /// A & B must be i1 (boolean) values or a vector of such values. Note that | |||
794 | /// the truth table for implication is the same as <=u on i1 values (but not | |||
795 | /// <=s!). The truth table for both is: | |||
796 | /// | T | F (B) | |||
797 | /// T | T | F | |||
798 | /// F | T | T | |||
799 | /// (A) | |||
800 | Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS, | |||
801 | const DataLayout &DL, bool LHSIsTrue = true, | |||
802 | unsigned Depth = 0); | |||
803 | Optional<bool> isImpliedCondition(const Value *LHS, | |||
804 | CmpInst::Predicate RHSPred, | |||
805 | const Value *RHSOp0, const Value *RHSOp1, | |||
806 | const DataLayout &DL, bool LHSIsTrue = true, | |||
807 | unsigned Depth = 0); | |||
808 | ||||
809 | /// Return the boolean condition value in the context of the given instruction | |||
810 | /// if it is known based on dominating conditions. | |||
811 | Optional<bool> isImpliedByDomCondition(const Value *Cond, | |||
812 | const Instruction *ContextI, | |||
813 | const DataLayout &DL); | |||
814 | Optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred, | |||
815 | const Value *LHS, const Value *RHS, | |||
816 | const Instruction *ContextI, | |||
817 | const DataLayout &DL); | |||
818 | ||||
819 | /// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that | |||
820 | /// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In | |||
821 | /// this case offset would be -8. | |||
822 | Optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2, | |||
823 | const DataLayout &DL); | |||
824 | } // end namespace llvm | |||
825 | ||||
826 | #endif // LLVM_ANALYSIS_VALUETRACKING_H |