File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/CodeGenPrepare.cpp |
Warning: | line 4177, column 20 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This pass munges the code in the input function to better prepare it for | |||
10 | // SelectionDAG-based code generation. This works around limitations in it's | |||
11 | // basic-block-at-a-time approach. It should eventually be removed. | |||
12 | // | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #include "llvm/ADT/APInt.h" | |||
16 | #include "llvm/ADT/ArrayRef.h" | |||
17 | #include "llvm/ADT/DenseMap.h" | |||
18 | #include "llvm/ADT/MapVector.h" | |||
19 | #include "llvm/ADT/PointerIntPair.h" | |||
20 | #include "llvm/ADT/STLExtras.h" | |||
21 | #include "llvm/ADT/SmallPtrSet.h" | |||
22 | #include "llvm/ADT/SmallVector.h" | |||
23 | #include "llvm/ADT/Statistic.h" | |||
24 | #include "llvm/Analysis/BlockFrequencyInfo.h" | |||
25 | #include "llvm/Analysis/BranchProbabilityInfo.h" | |||
26 | #include "llvm/Analysis/ConstantFolding.h" | |||
27 | #include "llvm/Analysis/InstructionSimplify.h" | |||
28 | #include "llvm/Analysis/LoopInfo.h" | |||
29 | #include "llvm/Analysis/MemoryBuiltins.h" | |||
30 | #include "llvm/Analysis/ProfileSummaryInfo.h" | |||
31 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
32 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
33 | #include "llvm/Analysis/ValueTracking.h" | |||
34 | #include "llvm/Analysis/VectorUtils.h" | |||
35 | #include "llvm/CodeGen/Analysis.h" | |||
36 | #include "llvm/CodeGen/ISDOpcodes.h" | |||
37 | #include "llvm/CodeGen/SelectionDAGNodes.h" | |||
38 | #include "llvm/CodeGen/TargetLowering.h" | |||
39 | #include "llvm/CodeGen/TargetPassConfig.h" | |||
40 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
41 | #include "llvm/CodeGen/ValueTypes.h" | |||
42 | #include "llvm/Config/llvm-config.h" | |||
43 | #include "llvm/IR/Argument.h" | |||
44 | #include "llvm/IR/Attributes.h" | |||
45 | #include "llvm/IR/BasicBlock.h" | |||
46 | #include "llvm/IR/Constant.h" | |||
47 | #include "llvm/IR/Constants.h" | |||
48 | #include "llvm/IR/DataLayout.h" | |||
49 | #include "llvm/IR/DebugInfo.h" | |||
50 | #include "llvm/IR/DerivedTypes.h" | |||
51 | #include "llvm/IR/Dominators.h" | |||
52 | #include "llvm/IR/Function.h" | |||
53 | #include "llvm/IR/GetElementPtrTypeIterator.h" | |||
54 | #include "llvm/IR/GlobalValue.h" | |||
55 | #include "llvm/IR/GlobalVariable.h" | |||
56 | #include "llvm/IR/IRBuilder.h" | |||
57 | #include "llvm/IR/InlineAsm.h" | |||
58 | #include "llvm/IR/InstrTypes.h" | |||
59 | #include "llvm/IR/Instruction.h" | |||
60 | #include "llvm/IR/Instructions.h" | |||
61 | #include "llvm/IR/IntrinsicInst.h" | |||
62 | #include "llvm/IR/Intrinsics.h" | |||
63 | #include "llvm/IR/IntrinsicsAArch64.h" | |||
64 | #include "llvm/IR/LLVMContext.h" | |||
65 | #include "llvm/IR/MDBuilder.h" | |||
66 | #include "llvm/IR/Module.h" | |||
67 | #include "llvm/IR/Operator.h" | |||
68 | #include "llvm/IR/PatternMatch.h" | |||
69 | #include "llvm/IR/Statepoint.h" | |||
70 | #include "llvm/IR/Type.h" | |||
71 | #include "llvm/IR/Use.h" | |||
72 | #include "llvm/IR/User.h" | |||
73 | #include "llvm/IR/Value.h" | |||
74 | #include "llvm/IR/ValueHandle.h" | |||
75 | #include "llvm/IR/ValueMap.h" | |||
76 | #include "llvm/InitializePasses.h" | |||
77 | #include "llvm/Pass.h" | |||
78 | #include "llvm/Support/BlockFrequency.h" | |||
79 | #include "llvm/Support/BranchProbability.h" | |||
80 | #include "llvm/Support/Casting.h" | |||
81 | #include "llvm/Support/CommandLine.h" | |||
82 | #include "llvm/Support/Compiler.h" | |||
83 | #include "llvm/Support/Debug.h" | |||
84 | #include "llvm/Support/ErrorHandling.h" | |||
85 | #include "llvm/Support/MachineValueType.h" | |||
86 | #include "llvm/Support/MathExtras.h" | |||
87 | #include "llvm/Support/raw_ostream.h" | |||
88 | #include "llvm/Target/TargetMachine.h" | |||
89 | #include "llvm/Target/TargetOptions.h" | |||
90 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
91 | #include "llvm/Transforms/Utils/BypassSlowDivision.h" | |||
92 | #include "llvm/Transforms/Utils/Local.h" | |||
93 | #include "llvm/Transforms/Utils/SimplifyLibCalls.h" | |||
94 | #include "llvm/Transforms/Utils/SizeOpts.h" | |||
95 | #include <algorithm> | |||
96 | #include <cassert> | |||
97 | #include <cstdint> | |||
98 | #include <iterator> | |||
99 | #include <limits> | |||
100 | #include <memory> | |||
101 | #include <utility> | |||
102 | #include <vector> | |||
103 | ||||
104 | using namespace llvm; | |||
105 | using namespace llvm::PatternMatch; | |||
106 | ||||
107 | #define DEBUG_TYPE"codegenprepare" "codegenprepare" | |||
108 | ||||
109 | STATISTIC(NumBlocksElim, "Number of blocks eliminated")static llvm::Statistic NumBlocksElim = {"codegenprepare", "NumBlocksElim" , "Number of blocks eliminated"}; | |||
110 | STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated")static llvm::Statistic NumPHIsElim = {"codegenprepare", "NumPHIsElim" , "Number of trivial PHIs eliminated"}; | |||
111 | STATISTIC(NumGEPsElim, "Number of GEPs converted to casts")static llvm::Statistic NumGEPsElim = {"codegenprepare", "NumGEPsElim" , "Number of GEPs converted to casts"}; | |||
112 | STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" } | |||
113 | "sunken Cmps")static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" }; | |||
114 | STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" } | |||
115 | "of sunken Casts")static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" }; | |||
116 | STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" } | |||
117 | "computations were sunk")static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" }; | |||
118 | STATISTIC(NumMemoryInstsPhiCreated,static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions"} | |||
119 | "Number of phis created when address "static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions"} | |||
120 | "computations were sunk to memory instructions")static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions"}; | |||
121 | STATISTIC(NumMemoryInstsSelectCreated,static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions"} | |||
122 | "Number of select created when address "static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions"} | |||
123 | "computations were sunk to memory instructions")static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions"}; | |||
124 | STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads")static llvm::Statistic NumExtsMoved = {"codegenprepare", "NumExtsMoved" , "Number of [s|z]ext instructions combined with loads"}; | |||
125 | STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized")static llvm::Statistic NumExtUses = {"codegenprepare", "NumExtUses" , "Number of uses of [s|z]ext instructions optimized"}; | |||
126 | STATISTIC(NumAndsAdded,static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads"} | |||
127 | "Number of and mask instructions added to form ext loads")static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads"}; | |||
128 | STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized")static llvm::Statistic NumAndUses = {"codegenprepare", "NumAndUses" , "Number of uses of and mask instructions optimized"}; | |||
129 | STATISTIC(NumRetsDup, "Number of return instructions duplicated")static llvm::Statistic NumRetsDup = {"codegenprepare", "NumRetsDup" , "Number of return instructions duplicated"}; | |||
130 | STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved")static llvm::Statistic NumDbgValueMoved = {"codegenprepare", "NumDbgValueMoved" , "Number of debug value instructions moved"}; | |||
131 | STATISTIC(NumSelectsExpanded, "Number of selects turned into branches")static llvm::Statistic NumSelectsExpanded = {"codegenprepare" , "NumSelectsExpanded", "Number of selects turned into branches" }; | |||
132 | STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed")static llvm::Statistic NumStoreExtractExposed = {"codegenprepare" , "NumStoreExtractExposed", "Number of store(extractelement) exposed" }; | |||
133 | ||||
134 | static cl::opt<bool> DisableBranchOpts( | |||
135 | "disable-cgp-branch-opts", cl::Hidden, cl::init(false), | |||
136 | cl::desc("Disable branch optimizations in CodeGenPrepare")); | |||
137 | ||||
138 | static cl::opt<bool> | |||
139 | DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), | |||
140 | cl::desc("Disable GC optimizations in CodeGenPrepare")); | |||
141 | ||||
142 | static cl::opt<bool> DisableSelectToBranch( | |||
143 | "disable-cgp-select2branch", cl::Hidden, cl::init(false), | |||
144 | cl::desc("Disable select to branch conversion.")); | |||
145 | ||||
146 | static cl::opt<bool> AddrSinkUsingGEPs( | |||
147 | "addr-sink-using-gep", cl::Hidden, cl::init(true), | |||
148 | cl::desc("Address sinking in CGP using GEPs.")); | |||
149 | ||||
150 | static cl::opt<bool> EnableAndCmpSinking( | |||
151 | "enable-andcmp-sinking", cl::Hidden, cl::init(true), | |||
152 | cl::desc("Enable sinkinig and/cmp into branches.")); | |||
153 | ||||
154 | static cl::opt<bool> DisableStoreExtract( | |||
155 | "disable-cgp-store-extract", cl::Hidden, cl::init(false), | |||
156 | cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); | |||
157 | ||||
158 | static cl::opt<bool> StressStoreExtract( | |||
159 | "stress-cgp-store-extract", cl::Hidden, cl::init(false), | |||
160 | cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); | |||
161 | ||||
162 | static cl::opt<bool> DisableExtLdPromotion( | |||
163 | "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
164 | cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " | |||
165 | "CodeGenPrepare")); | |||
166 | ||||
167 | static cl::opt<bool> StressExtLdPromotion( | |||
168 | "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
169 | cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " | |||
170 | "optimization in CodeGenPrepare")); | |||
171 | ||||
172 | static cl::opt<bool> DisablePreheaderProtect( | |||
173 | "disable-preheader-prot", cl::Hidden, cl::init(false), | |||
174 | cl::desc("Disable protection against removing loop preheaders")); | |||
175 | ||||
176 | static cl::opt<bool> ProfileGuidedSectionPrefix( | |||
177 | "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, | |||
178 | cl::desc("Use profile info to add section prefix for hot/cold functions")); | |||
179 | ||||
180 | static cl::opt<bool> ProfileUnknownInSpecialSection( | |||
181 | "profile-unknown-in-special-section", cl::Hidden, cl::init(false), | |||
182 | cl::ZeroOrMore, | |||
183 | cl::desc("In profiling mode like sampleFDO, if a function doesn't have " | |||
184 | "profile, we cannot tell the function is cold for sure because " | |||
185 | "it may be a function newly added without ever being sampled. " | |||
186 | "With the flag enabled, compiler can put such profile unknown " | |||
187 | "functions into a special section, so runtime system can choose " | |||
188 | "to handle it in a different way than .text section, to save " | |||
189 | "RAM for example. ")); | |||
190 | ||||
191 | static cl::opt<unsigned> FreqRatioToSkipMerge( | |||
192 | "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), | |||
193 | cl::desc("Skip merging empty blocks if (frequency of empty block) / " | |||
194 | "(frequency of destination block) is greater than this ratio")); | |||
195 | ||||
196 | static cl::opt<bool> ForceSplitStore( | |||
197 | "force-split-store", cl::Hidden, cl::init(false), | |||
198 | cl::desc("Force store splitting no matter what the target query says.")); | |||
199 | ||||
200 | static cl::opt<bool> | |||
201 | EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, | |||
202 | cl::desc("Enable merging of redundant sexts when one is dominating" | |||
203 | " the other."), cl::init(true)); | |||
204 | ||||
205 | static cl::opt<bool> DisableComplexAddrModes( | |||
206 | "disable-complex-addr-modes", cl::Hidden, cl::init(false), | |||
207 | cl::desc("Disables combining addressing modes with different parts " | |||
208 | "in optimizeMemoryInst.")); | |||
209 | ||||
210 | static cl::opt<bool> | |||
211 | AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), | |||
212 | cl::desc("Allow creation of Phis in Address sinking.")); | |||
213 | ||||
214 | static cl::opt<bool> | |||
215 | AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), | |||
216 | cl::desc("Allow creation of selects in Address sinking.")); | |||
217 | ||||
218 | static cl::opt<bool> AddrSinkCombineBaseReg( | |||
219 | "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), | |||
220 | cl::desc("Allow combining of BaseReg field in Address sinking.")); | |||
221 | ||||
222 | static cl::opt<bool> AddrSinkCombineBaseGV( | |||
223 | "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), | |||
224 | cl::desc("Allow combining of BaseGV field in Address sinking.")); | |||
225 | ||||
226 | static cl::opt<bool> AddrSinkCombineBaseOffs( | |||
227 | "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), | |||
228 | cl::desc("Allow combining of BaseOffs field in Address sinking.")); | |||
229 | ||||
230 | static cl::opt<bool> AddrSinkCombineScaledReg( | |||
231 | "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), | |||
232 | cl::desc("Allow combining of ScaledReg field in Address sinking.")); | |||
233 | ||||
234 | static cl::opt<bool> | |||
235 | EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, | |||
236 | cl::init(true), | |||
237 | cl::desc("Enable splitting large offset of GEP.")); | |||
238 | ||||
239 | static cl::opt<bool> EnableICMP_EQToICMP_ST( | |||
240 | "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), | |||
241 | cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.")); | |||
242 | ||||
243 | static cl::opt<bool> | |||
244 | VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), | |||
245 | cl::desc("Enable BFI update verification for " | |||
246 | "CodeGenPrepare.")); | |||
247 | ||||
248 | static cl::opt<bool> OptimizePhiTypes( | |||
249 | "cgp-optimize-phi-types", cl::Hidden, cl::init(false), | |||
250 | cl::desc("Enable converting phi types in CodeGenPrepare")); | |||
251 | ||||
252 | namespace { | |||
253 | ||||
254 | enum ExtType { | |||
255 | ZeroExtension, // Zero extension has been seen. | |||
256 | SignExtension, // Sign extension has been seen. | |||
257 | BothExtension // This extension type is used if we saw sext after | |||
258 | // ZeroExtension had been set, or if we saw zext after | |||
259 | // SignExtension had been set. It makes the type | |||
260 | // information of a promoted instruction invalid. | |||
261 | }; | |||
262 | ||||
263 | using SetOfInstrs = SmallPtrSet<Instruction *, 16>; | |||
264 | using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; | |||
265 | using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; | |||
266 | using SExts = SmallVector<Instruction *, 16>; | |||
267 | using ValueToSExts = DenseMap<Value *, SExts>; | |||
268 | ||||
269 | class TypePromotionTransaction; | |||
270 | ||||
271 | class CodeGenPrepare : public FunctionPass { | |||
272 | const TargetMachine *TM = nullptr; | |||
273 | const TargetSubtargetInfo *SubtargetInfo; | |||
274 | const TargetLowering *TLI = nullptr; | |||
275 | const TargetRegisterInfo *TRI; | |||
276 | const TargetTransformInfo *TTI = nullptr; | |||
277 | const TargetLibraryInfo *TLInfo; | |||
278 | const LoopInfo *LI; | |||
279 | std::unique_ptr<BlockFrequencyInfo> BFI; | |||
280 | std::unique_ptr<BranchProbabilityInfo> BPI; | |||
281 | ProfileSummaryInfo *PSI; | |||
282 | ||||
283 | /// As we scan instructions optimizing them, this is the next instruction | |||
284 | /// to optimize. Transforms that can invalidate this should update it. | |||
285 | BasicBlock::iterator CurInstIterator; | |||
286 | ||||
287 | /// Keeps track of non-local addresses that have been sunk into a block. | |||
288 | /// This allows us to avoid inserting duplicate code for blocks with | |||
289 | /// multiple load/stores of the same address. The usage of WeakTrackingVH | |||
290 | /// enables SunkAddrs to be treated as a cache whose entries can be | |||
291 | /// invalidated if a sunken address computation has been erased. | |||
292 | ValueMap<Value*, WeakTrackingVH> SunkAddrs; | |||
293 | ||||
294 | /// Keeps track of all instructions inserted for the current function. | |||
295 | SetOfInstrs InsertedInsts; | |||
296 | ||||
297 | /// Keeps track of the type of the related instruction before their | |||
298 | /// promotion for the current function. | |||
299 | InstrToOrigTy PromotedInsts; | |||
300 | ||||
301 | /// Keep track of instructions removed during promotion. | |||
302 | SetOfInstrs RemovedInsts; | |||
303 | ||||
304 | /// Keep track of sext chains based on their initial value. | |||
305 | DenseMap<Value *, Instruction *> SeenChainsForSExt; | |||
306 | ||||
307 | /// Keep track of GEPs accessing the same data structures such as structs or | |||
308 | /// arrays that are candidates to be split later because of their large | |||
309 | /// size. | |||
310 | MapVector< | |||
311 | AssertingVH<Value>, | |||
312 | SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> | |||
313 | LargeOffsetGEPMap; | |||
314 | ||||
315 | /// Keep track of new GEP base after splitting the GEPs having large offset. | |||
316 | SmallSet<AssertingVH<Value>, 2> NewGEPBases; | |||
317 | ||||
318 | /// Map serial numbers to Large offset GEPs. | |||
319 | DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; | |||
320 | ||||
321 | /// Keep track of SExt promoted. | |||
322 | ValueToSExts ValToSExtendedUses; | |||
323 | ||||
324 | /// True if the function has the OptSize attribute. | |||
325 | bool OptSize; | |||
326 | ||||
327 | /// DataLayout for the Function being processed. | |||
328 | const DataLayout *DL = nullptr; | |||
329 | ||||
330 | /// Building the dominator tree can be expensive, so we only build it | |||
331 | /// lazily and update it when required. | |||
332 | std::unique_ptr<DominatorTree> DT; | |||
333 | ||||
334 | public: | |||
335 | static char ID; // Pass identification, replacement for typeid | |||
336 | ||||
337 | CodeGenPrepare() : FunctionPass(ID) { | |||
338 | initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); | |||
339 | } | |||
340 | ||||
341 | bool runOnFunction(Function &F) override; | |||
342 | ||||
343 | StringRef getPassName() const override { return "CodeGen Prepare"; } | |||
344 | ||||
345 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
346 | // FIXME: When we can selectively preserve passes, preserve the domtree. | |||
347 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); | |||
348 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | |||
349 | AU.addRequired<TargetPassConfig>(); | |||
350 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
351 | AU.addRequired<LoopInfoWrapperPass>(); | |||
352 | } | |||
353 | ||||
354 | private: | |||
355 | template <typename F> | |||
356 | void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { | |||
357 | // Substituting can cause recursive simplifications, which can invalidate | |||
358 | // our iterator. Use a WeakTrackingVH to hold onto it in case this | |||
359 | // happens. | |||
360 | Value *CurValue = &*CurInstIterator; | |||
361 | WeakTrackingVH IterHandle(CurValue); | |||
362 | ||||
363 | f(); | |||
364 | ||||
365 | // If the iterator instruction was recursively deleted, start over at the | |||
366 | // start of the block. | |||
367 | if (IterHandle != CurValue) { | |||
368 | CurInstIterator = BB->begin(); | |||
369 | SunkAddrs.clear(); | |||
370 | } | |||
371 | } | |||
372 | ||||
373 | // Get the DominatorTree, building if necessary. | |||
374 | DominatorTree &getDT(Function &F) { | |||
375 | if (!DT) | |||
376 | DT = std::make_unique<DominatorTree>(F); | |||
377 | return *DT; | |||
378 | } | |||
379 | ||||
380 | void removeAllAssertingVHReferences(Value *V); | |||
381 | bool eliminateAssumptions(Function &F); | |||
382 | bool eliminateFallThrough(Function &F); | |||
383 | bool eliminateMostlyEmptyBlocks(Function &F); | |||
384 | BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); | |||
385 | bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; | |||
386 | void eliminateMostlyEmptyBlock(BasicBlock *BB); | |||
387 | bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, | |||
388 | bool isPreheader); | |||
389 | bool makeBitReverse(Instruction &I); | |||
390 | bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); | |||
391 | bool optimizeInst(Instruction *I, bool &ModifiedDT); | |||
392 | bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, | |||
393 | Type *AccessTy, unsigned AddrSpace); | |||
394 | bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr); | |||
395 | bool optimizeInlineAsmInst(CallInst *CS); | |||
396 | bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); | |||
397 | bool optimizeExt(Instruction *&I); | |||
398 | bool optimizeExtUses(Instruction *I); | |||
399 | bool optimizeLoadExt(LoadInst *Load); | |||
400 | bool optimizeShiftInst(BinaryOperator *BO); | |||
401 | bool optimizeFunnelShift(IntrinsicInst *Fsh); | |||
402 | bool optimizeSelectInst(SelectInst *SI); | |||
403 | bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); | |||
404 | bool optimizeSwitchInst(SwitchInst *SI); | |||
405 | bool optimizeExtractElementInst(Instruction *Inst); | |||
406 | bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT); | |||
407 | bool fixupDbgValue(Instruction *I); | |||
408 | bool placeDbgValues(Function &F); | |||
409 | bool placePseudoProbes(Function &F); | |||
410 | bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, | |||
411 | LoadInst *&LI, Instruction *&Inst, bool HasPromoted); | |||
412 | bool tryToPromoteExts(TypePromotionTransaction &TPT, | |||
413 | const SmallVectorImpl<Instruction *> &Exts, | |||
414 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | |||
415 | unsigned CreatedInstsCost = 0); | |||
416 | bool mergeSExts(Function &F); | |||
417 | bool splitLargeGEPOffsets(); | |||
418 | bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited, | |||
419 | SmallPtrSetImpl<Instruction *> &DeletedInstrs); | |||
420 | bool optimizePhiTypes(Function &F); | |||
421 | bool performAddressTypePromotion( | |||
422 | Instruction *&Inst, | |||
423 | bool AllowPromotionWithoutCommonHeader, | |||
424 | bool HasPromoted, TypePromotionTransaction &TPT, | |||
425 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); | |||
426 | bool splitBranchCondition(Function &F, bool &ModifiedDT); | |||
427 | bool simplifyOffsetableRelocate(GCStatepointInst &I); | |||
428 | ||||
429 | bool tryToSinkFreeOperands(Instruction *I); | |||
430 | bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, | |||
431 | Value *Arg1, CmpInst *Cmp, | |||
432 | Intrinsic::ID IID); | |||
433 | bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT); | |||
434 | bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT); | |||
435 | bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT); | |||
436 | void verifyBFIUpdates(Function &F); | |||
437 | }; | |||
438 | ||||
439 | } // end anonymous namespace | |||
440 | ||||
441 | char CodeGenPrepare::ID = 0; | |||
442 | ||||
443 | INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | |||
444 | "Optimize for code generation", false, false)static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | |||
445 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry); | |||
446 | INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)initializeProfileSummaryInfoWrapperPassPass(Registry); | |||
447 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | |||
448 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry); | |||
449 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); | |||
450 | INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE,PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | |||
451 | "Optimize for code generation", false, false)PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | |||
452 | ||||
453 | FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } | |||
454 | ||||
455 | bool CodeGenPrepare::runOnFunction(Function &F) { | |||
456 | if (skipFunction(F)) | |||
457 | return false; | |||
458 | ||||
459 | DL = &F.getParent()->getDataLayout(); | |||
460 | ||||
461 | bool EverMadeChange = false; | |||
462 | // Clear per function information. | |||
463 | InsertedInsts.clear(); | |||
464 | PromotedInsts.clear(); | |||
465 | ||||
466 | TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); | |||
467 | SubtargetInfo = TM->getSubtargetImpl(F); | |||
468 | TLI = SubtargetInfo->getTargetLowering(); | |||
469 | TRI = SubtargetInfo->getRegisterInfo(); | |||
470 | TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | |||
471 | TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | |||
472 | LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | |||
473 | BPI.reset(new BranchProbabilityInfo(F, *LI)); | |||
474 | BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); | |||
475 | PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); | |||
476 | OptSize = F.hasOptSize(); | |||
477 | if (ProfileGuidedSectionPrefix) { | |||
478 | // The hot attribute overwrites profile count based hotness while profile | |||
479 | // counts based hotness overwrite the cold attribute. | |||
480 | // This is a conservative behabvior. | |||
481 | if (F.hasFnAttribute(Attribute::Hot) || | |||
482 | PSI->isFunctionHotInCallGraph(&F, *BFI)) | |||
483 | F.setSectionPrefix("hot"); | |||
484 | // If PSI shows this function is not hot, we will placed the function | |||
485 | // into unlikely section if (1) PSI shows this is a cold function, or | |||
486 | // (2) the function has a attribute of cold. | |||
487 | else if (PSI->isFunctionColdInCallGraph(&F, *BFI) || | |||
488 | F.hasFnAttribute(Attribute::Cold)) | |||
489 | F.setSectionPrefix("unlikely"); | |||
490 | else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() && | |||
491 | PSI->isFunctionHotnessUnknown(F)) | |||
492 | F.setSectionPrefix("unknown"); | |||
493 | } | |||
494 | ||||
495 | /// This optimization identifies DIV instructions that can be | |||
496 | /// profitably bypassed and carried out with a shorter, faster divide. | |||
497 | if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) { | |||
498 | const DenseMap<unsigned int, unsigned int> &BypassWidths = | |||
499 | TLI->getBypassSlowDivWidths(); | |||
500 | BasicBlock* BB = &*F.begin(); | |||
501 | while (BB != nullptr) { | |||
502 | // bypassSlowDivision may create new BBs, but we don't want to reapply the | |||
503 | // optimization to those blocks. | |||
504 | BasicBlock* Next = BB->getNextNode(); | |||
505 | // F.hasOptSize is already checked in the outer if statement. | |||
506 | if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) | |||
507 | EverMadeChange |= bypassSlowDivision(BB, BypassWidths); | |||
508 | BB = Next; | |||
509 | } | |||
510 | } | |||
511 | ||||
512 | // Get rid of @llvm.assume builtins before attempting to eliminate empty | |||
513 | // blocks, since there might be blocks that only contain @llvm.assume calls | |||
514 | // (plus arguments that we can get rid of). | |||
515 | EverMadeChange |= eliminateAssumptions(F); | |||
516 | ||||
517 | // Eliminate blocks that contain only PHI nodes and an | |||
518 | // unconditional branch. | |||
519 | EverMadeChange |= eliminateMostlyEmptyBlocks(F); | |||
520 | ||||
521 | bool ModifiedDT = false; | |||
522 | if (!DisableBranchOpts) | |||
523 | EverMadeChange |= splitBranchCondition(F, ModifiedDT); | |||
524 | ||||
525 | // Split some critical edges where one of the sources is an indirect branch, | |||
526 | // to help generate sane code for PHIs involving such edges. | |||
527 | EverMadeChange |= SplitIndirectBrCriticalEdges(F); | |||
528 | ||||
529 | bool MadeChange = true; | |||
530 | while (MadeChange) { | |||
531 | MadeChange = false; | |||
532 | DT.reset(); | |||
533 | for (Function::iterator I = F.begin(); I != F.end(); ) { | |||
534 | BasicBlock *BB = &*I++; | |||
535 | bool ModifiedDTOnIteration = false; | |||
536 | MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); | |||
537 | ||||
538 | // Restart BB iteration if the dominator tree of the Function was changed | |||
539 | if (ModifiedDTOnIteration) | |||
540 | break; | |||
541 | } | |||
542 | if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) | |||
543 | MadeChange |= mergeSExts(F); | |||
544 | if (!LargeOffsetGEPMap.empty()) | |||
545 | MadeChange |= splitLargeGEPOffsets(); | |||
546 | MadeChange |= optimizePhiTypes(F); | |||
547 | ||||
548 | if (MadeChange) | |||
549 | eliminateFallThrough(F); | |||
550 | ||||
551 | // Really free removed instructions during promotion. | |||
552 | for (Instruction *I : RemovedInsts) | |||
553 | I->deleteValue(); | |||
554 | ||||
555 | EverMadeChange |= MadeChange; | |||
556 | SeenChainsForSExt.clear(); | |||
557 | ValToSExtendedUses.clear(); | |||
558 | RemovedInsts.clear(); | |||
559 | LargeOffsetGEPMap.clear(); | |||
560 | LargeOffsetGEPID.clear(); | |||
561 | } | |||
562 | ||||
563 | NewGEPBases.clear(); | |||
564 | SunkAddrs.clear(); | |||
565 | ||||
566 | if (!DisableBranchOpts) { | |||
567 | MadeChange = false; | |||
568 | // Use a set vector to get deterministic iteration order. The order the | |||
569 | // blocks are removed may affect whether or not PHI nodes in successors | |||
570 | // are removed. | |||
571 | SmallSetVector<BasicBlock*, 8> WorkList; | |||
572 | for (BasicBlock &BB : F) { | |||
573 | SmallVector<BasicBlock *, 2> Successors(successors(&BB)); | |||
574 | MadeChange |= ConstantFoldTerminator(&BB, true); | |||
575 | if (!MadeChange) continue; | |||
576 | ||||
577 | for (BasicBlock *Succ : Successors) | |||
578 | if (pred_empty(Succ)) | |||
579 | WorkList.insert(Succ); | |||
580 | } | |||
581 | ||||
582 | // Delete the dead blocks and any of their dead successors. | |||
583 | MadeChange |= !WorkList.empty(); | |||
584 | while (!WorkList.empty()) { | |||
585 | BasicBlock *BB = WorkList.pop_back_val(); | |||
586 | SmallVector<BasicBlock*, 2> Successors(successors(BB)); | |||
587 | ||||
588 | DeleteDeadBlock(BB); | |||
589 | ||||
590 | for (BasicBlock *Succ : Successors) | |||
591 | if (pred_empty(Succ)) | |||
592 | WorkList.insert(Succ); | |||
593 | } | |||
594 | ||||
595 | // Merge pairs of basic blocks with unconditional branches, connected by | |||
596 | // a single edge. | |||
597 | if (EverMadeChange || MadeChange) | |||
598 | MadeChange |= eliminateFallThrough(F); | |||
599 | ||||
600 | EverMadeChange |= MadeChange; | |||
601 | } | |||
602 | ||||
603 | if (!DisableGCOpts) { | |||
604 | SmallVector<GCStatepointInst *, 2> Statepoints; | |||
605 | for (BasicBlock &BB : F) | |||
606 | for (Instruction &I : BB) | |||
607 | if (auto *SP = dyn_cast<GCStatepointInst>(&I)) | |||
608 | Statepoints.push_back(SP); | |||
609 | for (auto &I : Statepoints) | |||
610 | EverMadeChange |= simplifyOffsetableRelocate(*I); | |||
611 | } | |||
612 | ||||
613 | // Do this last to clean up use-before-def scenarios introduced by other | |||
614 | // preparatory transforms. | |||
615 | EverMadeChange |= placeDbgValues(F); | |||
616 | EverMadeChange |= placePseudoProbes(F); | |||
617 | ||||
618 | #ifndef NDEBUG1 | |||
619 | if (VerifyBFIUpdates) | |||
620 | verifyBFIUpdates(F); | |||
621 | #endif | |||
622 | ||||
623 | return EverMadeChange; | |||
624 | } | |||
625 | ||||
626 | bool CodeGenPrepare::eliminateAssumptions(Function &F) { | |||
627 | bool MadeChange = false; | |||
628 | for (BasicBlock &BB : F) { | |||
629 | CurInstIterator = BB.begin(); | |||
630 | while (CurInstIterator != BB.end()) { | |||
631 | Instruction *I = &*(CurInstIterator++); | |||
632 | if (auto *Assume = dyn_cast<AssumeInst>(I)) { | |||
633 | MadeChange = true; | |||
634 | Value *Operand = Assume->getOperand(0); | |||
635 | Assume->eraseFromParent(); | |||
636 | ||||
637 | resetIteratorIfInvalidatedWhileCalling(&BB, [&]() { | |||
638 | RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr); | |||
639 | }); | |||
640 | } | |||
641 | } | |||
642 | } | |||
643 | return MadeChange; | |||
644 | } | |||
645 | ||||
646 | /// An instruction is about to be deleted, so remove all references to it in our | |||
647 | /// GEP-tracking data strcutures. | |||
648 | void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) { | |||
649 | LargeOffsetGEPMap.erase(V); | |||
650 | NewGEPBases.erase(V); | |||
651 | ||||
652 | auto GEP = dyn_cast<GetElementPtrInst>(V); | |||
653 | if (!GEP) | |||
654 | return; | |||
655 | ||||
656 | LargeOffsetGEPID.erase(GEP); | |||
657 | ||||
658 | auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand()); | |||
659 | if (VecI == LargeOffsetGEPMap.end()) | |||
660 | return; | |||
661 | ||||
662 | auto &GEPVector = VecI->second; | |||
663 | const auto &I = | |||
664 | llvm::find_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; }); | |||
665 | if (I == GEPVector.end()) | |||
666 | return; | |||
667 | ||||
668 | GEPVector.erase(I); | |||
669 | if (GEPVector.empty()) | |||
670 | LargeOffsetGEPMap.erase(VecI); | |||
671 | } | |||
672 | ||||
673 | // Verify BFI has been updated correctly by recomputing BFI and comparing them. | |||
674 | void LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) CodeGenPrepare::verifyBFIUpdates(Function &F) { | |||
675 | DominatorTree NewDT(F); | |||
676 | LoopInfo NewLI(NewDT); | |||
677 | BranchProbabilityInfo NewBPI(F, NewLI, TLInfo); | |||
678 | BlockFrequencyInfo NewBFI(F, NewBPI, NewLI); | |||
679 | NewBFI.verifyMatch(*BFI); | |||
680 | } | |||
681 | ||||
682 | /// Merge basic blocks which are connected by a single edge, where one of the | |||
683 | /// basic blocks has a single successor pointing to the other basic block, | |||
684 | /// which has a single predecessor. | |||
685 | bool CodeGenPrepare::eliminateFallThrough(Function &F) { | |||
686 | bool Changed = false; | |||
687 | // Scan all of the blocks in the function, except for the entry block. | |||
688 | // Use a temporary array to avoid iterator being invalidated when | |||
689 | // deleting blocks. | |||
690 | SmallVector<WeakTrackingVH, 16> Blocks; | |||
691 | for (auto &Block : llvm::drop_begin(F)) | |||
692 | Blocks.push_back(&Block); | |||
693 | ||||
694 | SmallSet<WeakTrackingVH, 16> Preds; | |||
695 | for (auto &Block : Blocks) { | |||
696 | auto *BB = cast_or_null<BasicBlock>(Block); | |||
697 | if (!BB) | |||
698 | continue; | |||
699 | // If the destination block has a single pred, then this is a trivial | |||
700 | // edge, just collapse it. | |||
701 | BasicBlock *SinglePred = BB->getSinglePredecessor(); | |||
702 | ||||
703 | // Don't merge if BB's address is taken. | |||
704 | if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; | |||
705 | ||||
706 | BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); | |||
707 | if (Term && !Term->isConditional()) { | |||
708 | Changed = true; | |||
709 | LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n")do { } while (false); | |||
710 | ||||
711 | // Merge BB into SinglePred and delete it. | |||
712 | MergeBlockIntoPredecessor(BB); | |||
713 | Preds.insert(SinglePred); | |||
714 | } | |||
715 | } | |||
716 | ||||
717 | // (Repeatedly) merging blocks into their predecessors can create redundant | |||
718 | // debug intrinsics. | |||
719 | for (auto &Pred : Preds) | |||
720 | if (auto *BB = cast_or_null<BasicBlock>(Pred)) | |||
721 | RemoveRedundantDbgInstrs(BB); | |||
722 | ||||
723 | return Changed; | |||
724 | } | |||
725 | ||||
726 | /// Find a destination block from BB if BB is mergeable empty block. | |||
727 | BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { | |||
728 | // If this block doesn't end with an uncond branch, ignore it. | |||
729 | BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); | |||
730 | if (!BI || !BI->isUnconditional()) | |||
731 | return nullptr; | |||
732 | ||||
733 | // If the instruction before the branch (skipping debug info) isn't a phi | |||
734 | // node, then other stuff is happening here. | |||
735 | BasicBlock::iterator BBI = BI->getIterator(); | |||
736 | if (BBI != BB->begin()) { | |||
737 | --BBI; | |||
738 | while (isa<DbgInfoIntrinsic>(BBI)) { | |||
739 | if (BBI == BB->begin()) | |||
740 | break; | |||
741 | --BBI; | |||
742 | } | |||
743 | if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) | |||
744 | return nullptr; | |||
745 | } | |||
746 | ||||
747 | // Do not break infinite loops. | |||
748 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
749 | if (DestBB == BB) | |||
750 | return nullptr; | |||
751 | ||||
752 | if (!canMergeBlocks(BB, DestBB)) | |||
753 | DestBB = nullptr; | |||
754 | ||||
755 | return DestBB; | |||
756 | } | |||
757 | ||||
758 | /// Eliminate blocks that contain only PHI nodes, debug info directives, and an | |||
759 | /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split | |||
760 | /// edges in ways that are non-optimal for isel. Start by eliminating these | |||
761 | /// blocks so we can split them the way we want them. | |||
762 | bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { | |||
763 | SmallPtrSet<BasicBlock *, 16> Preheaders; | |||
764 | SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); | |||
765 | while (!LoopList.empty()) { | |||
766 | Loop *L = LoopList.pop_back_val(); | |||
767 | llvm::append_range(LoopList, *L); | |||
768 | if (BasicBlock *Preheader = L->getLoopPreheader()) | |||
769 | Preheaders.insert(Preheader); | |||
770 | } | |||
771 | ||||
772 | bool MadeChange = false; | |||
773 | // Copy blocks into a temporary array to avoid iterator invalidation issues | |||
774 | // as we remove them. | |||
775 | // Note that this intentionally skips the entry block. | |||
776 | SmallVector<WeakTrackingVH, 16> Blocks; | |||
777 | for (auto &Block : llvm::drop_begin(F)) | |||
778 | Blocks.push_back(&Block); | |||
779 | ||||
780 | for (auto &Block : Blocks) { | |||
781 | BasicBlock *BB = cast_or_null<BasicBlock>(Block); | |||
782 | if (!BB) | |||
783 | continue; | |||
784 | BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); | |||
785 | if (!DestBB || | |||
786 | !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) | |||
787 | continue; | |||
788 | ||||
789 | eliminateMostlyEmptyBlock(BB); | |||
790 | MadeChange = true; | |||
791 | } | |||
792 | return MadeChange; | |||
793 | } | |||
794 | ||||
795 | bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, | |||
796 | BasicBlock *DestBB, | |||
797 | bool isPreheader) { | |||
798 | // Do not delete loop preheaders if doing so would create a critical edge. | |||
799 | // Loop preheaders can be good locations to spill registers. If the | |||
800 | // preheader is deleted and we create a critical edge, registers may be | |||
801 | // spilled in the loop body instead. | |||
802 | if (!DisablePreheaderProtect && isPreheader && | |||
803 | !(BB->getSinglePredecessor() && | |||
804 | BB->getSinglePredecessor()->getSingleSuccessor())) | |||
805 | return false; | |||
806 | ||||
807 | // Skip merging if the block's successor is also a successor to any callbr | |||
808 | // that leads to this block. | |||
809 | // FIXME: Is this really needed? Is this a correctness issue? | |||
810 | for (BasicBlock *Pred : predecessors(BB)) { | |||
811 | if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator())) | |||
812 | for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i) | |||
813 | if (DestBB == CBI->getSuccessor(i)) | |||
814 | return false; | |||
815 | } | |||
816 | ||||
817 | // Try to skip merging if the unique predecessor of BB is terminated by a | |||
818 | // switch or indirect branch instruction, and BB is used as an incoming block | |||
819 | // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to | |||
820 | // add COPY instructions in the predecessor of BB instead of BB (if it is not | |||
821 | // merged). Note that the critical edge created by merging such blocks wont be | |||
822 | // split in MachineSink because the jump table is not analyzable. By keeping | |||
823 | // such empty block (BB), ISel will place COPY instructions in BB, not in the | |||
824 | // predecessor of BB. | |||
825 | BasicBlock *Pred = BB->getUniquePredecessor(); | |||
826 | if (!Pred || | |||
827 | !(isa<SwitchInst>(Pred->getTerminator()) || | |||
828 | isa<IndirectBrInst>(Pred->getTerminator()))) | |||
829 | return true; | |||
830 | ||||
831 | if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) | |||
832 | return true; | |||
833 | ||||
834 | // We use a simple cost heuristic which determine skipping merging is | |||
835 | // profitable if the cost of skipping merging is less than the cost of | |||
836 | // merging : Cost(skipping merging) < Cost(merging BB), where the | |||
837 | // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and | |||
838 | // the Cost(merging BB) is Freq(Pred) * Cost(Copy). | |||
839 | // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : | |||
840 | // Freq(Pred) / Freq(BB) > 2. | |||
841 | // Note that if there are multiple empty blocks sharing the same incoming | |||
842 | // value for the PHIs in the DestBB, we consider them together. In such | |||
843 | // case, Cost(merging BB) will be the sum of their frequencies. | |||
844 | ||||
845 | if (!isa<PHINode>(DestBB->begin())) | |||
846 | return true; | |||
847 | ||||
848 | SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; | |||
849 | ||||
850 | // Find all other incoming blocks from which incoming values of all PHIs in | |||
851 | // DestBB are the same as the ones from BB. | |||
852 | for (BasicBlock *DestBBPred : predecessors(DestBB)) { | |||
853 | if (DestBBPred == BB) | |||
854 | continue; | |||
855 | ||||
856 | if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { | |||
857 | return DestPN.getIncomingValueForBlock(BB) == | |||
858 | DestPN.getIncomingValueForBlock(DestBBPred); | |||
859 | })) | |||
860 | SameIncomingValueBBs.insert(DestBBPred); | |||
861 | } | |||
862 | ||||
863 | // See if all BB's incoming values are same as the value from Pred. In this | |||
864 | // case, no reason to skip merging because COPYs are expected to be place in | |||
865 | // Pred already. | |||
866 | if (SameIncomingValueBBs.count(Pred)) | |||
867 | return true; | |||
868 | ||||
869 | BlockFrequency PredFreq = BFI->getBlockFreq(Pred); | |||
870 | BlockFrequency BBFreq = BFI->getBlockFreq(BB); | |||
871 | ||||
872 | for (auto *SameValueBB : SameIncomingValueBBs) | |||
873 | if (SameValueBB->getUniquePredecessor() == Pred && | |||
874 | DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) | |||
875 | BBFreq += BFI->getBlockFreq(SameValueBB); | |||
876 | ||||
877 | return PredFreq.getFrequency() <= | |||
878 | BBFreq.getFrequency() * FreqRatioToSkipMerge; | |||
879 | } | |||
880 | ||||
881 | /// Return true if we can merge BB into DestBB if there is a single | |||
882 | /// unconditional branch between them, and BB contains no other non-phi | |||
883 | /// instructions. | |||
884 | bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, | |||
885 | const BasicBlock *DestBB) const { | |||
886 | // We only want to eliminate blocks whose phi nodes are used by phi nodes in | |||
887 | // the successor. If there are more complex condition (e.g. preheaders), | |||
888 | // don't mess around with them. | |||
889 | for (const PHINode &PN : BB->phis()) { | |||
890 | for (const User *U : PN.users()) { | |||
891 | const Instruction *UI = cast<Instruction>(U); | |||
892 | if (UI->getParent() != DestBB || !isa<PHINode>(UI)) | |||
893 | return false; | |||
894 | // If User is inside DestBB block and it is a PHINode then check | |||
895 | // incoming value. If incoming value is not from BB then this is | |||
896 | // a complex condition (e.g. preheaders) we want to avoid here. | |||
897 | if (UI->getParent() == DestBB) { | |||
898 | if (const PHINode *UPN = dyn_cast<PHINode>(UI)) | |||
899 | for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { | |||
900 | Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); | |||
901 | if (Insn && Insn->getParent() == BB && | |||
902 | Insn->getParent() != UPN->getIncomingBlock(I)) | |||
903 | return false; | |||
904 | } | |||
905 | } | |||
906 | } | |||
907 | } | |||
908 | ||||
909 | // If BB and DestBB contain any common predecessors, then the phi nodes in BB | |||
910 | // and DestBB may have conflicting incoming values for the block. If so, we | |||
911 | // can't merge the block. | |||
912 | const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); | |||
913 | if (!DestBBPN) return true; // no conflict. | |||
914 | ||||
915 | // Collect the preds of BB. | |||
916 | SmallPtrSet<const BasicBlock*, 16> BBPreds; | |||
917 | if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
918 | // It is faster to get preds from a PHI than with pred_iterator. | |||
919 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
920 | BBPreds.insert(BBPN->getIncomingBlock(i)); | |||
921 | } else { | |||
922 | BBPreds.insert(pred_begin(BB), pred_end(BB)); | |||
923 | } | |||
924 | ||||
925 | // Walk the preds of DestBB. | |||
926 | for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { | |||
927 | BasicBlock *Pred = DestBBPN->getIncomingBlock(i); | |||
928 | if (BBPreds.count(Pred)) { // Common predecessor? | |||
929 | for (const PHINode &PN : DestBB->phis()) { | |||
930 | const Value *V1 = PN.getIncomingValueForBlock(Pred); | |||
931 | const Value *V2 = PN.getIncomingValueForBlock(BB); | |||
932 | ||||
933 | // If V2 is a phi node in BB, look up what the mapped value will be. | |||
934 | if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) | |||
935 | if (V2PN->getParent() == BB) | |||
936 | V2 = V2PN->getIncomingValueForBlock(Pred); | |||
937 | ||||
938 | // If there is a conflict, bail out. | |||
939 | if (V1 != V2) return false; | |||
940 | } | |||
941 | } | |||
942 | } | |||
943 | ||||
944 | return true; | |||
945 | } | |||
946 | ||||
947 | /// Eliminate a basic block that has only phi's and an unconditional branch in | |||
948 | /// it. | |||
949 | void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { | |||
950 | BranchInst *BI = cast<BranchInst>(BB->getTerminator()); | |||
951 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
952 | ||||
953 | LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"do { } while (false) | |||
954 | << *BB << *DestBB)do { } while (false); | |||
955 | ||||
956 | // If the destination block has a single pred, then this is a trivial edge, | |||
957 | // just collapse it. | |||
958 | if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { | |||
959 | if (SinglePred != DestBB) { | |||
960 | assert(SinglePred == BB &&((void)0) | |||
961 | "Single predecessor not the same as predecessor")((void)0); | |||
962 | // Merge DestBB into SinglePred/BB and delete it. | |||
963 | MergeBlockIntoPredecessor(DestBB); | |||
964 | // Note: BB(=SinglePred) will not be deleted on this path. | |||
965 | // DestBB(=its single successor) is the one that was deleted. | |||
966 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n")do { } while (false); | |||
967 | return; | |||
968 | } | |||
969 | } | |||
970 | ||||
971 | // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB | |||
972 | // to handle the new incoming edges it is about to have. | |||
973 | for (PHINode &PN : DestBB->phis()) { | |||
974 | // Remove the incoming value for BB, and remember it. | |||
975 | Value *InVal = PN.removeIncomingValue(BB, false); | |||
976 | ||||
977 | // Two options: either the InVal is a phi node defined in BB or it is some | |||
978 | // value that dominates BB. | |||
979 | PHINode *InValPhi = dyn_cast<PHINode>(InVal); | |||
980 | if (InValPhi && InValPhi->getParent() == BB) { | |||
981 | // Add all of the input values of the input PHI as inputs of this phi. | |||
982 | for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) | |||
983 | PN.addIncoming(InValPhi->getIncomingValue(i), | |||
984 | InValPhi->getIncomingBlock(i)); | |||
985 | } else { | |||
986 | // Otherwise, add one instance of the dominating value for each edge that | |||
987 | // we will be adding. | |||
988 | if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
989 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
990 | PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); | |||
991 | } else { | |||
992 | for (BasicBlock *Pred : predecessors(BB)) | |||
993 | PN.addIncoming(InVal, Pred); | |||
994 | } | |||
995 | } | |||
996 | } | |||
997 | ||||
998 | // The PHIs are now updated, change everything that refers to BB to use | |||
999 | // DestBB and remove BB. | |||
1000 | BB->replaceAllUsesWith(DestBB); | |||
1001 | BB->eraseFromParent(); | |||
1002 | ++NumBlocksElim; | |||
1003 | ||||
1004 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { } while (false); | |||
1005 | } | |||
1006 | ||||
1007 | // Computes a map of base pointer relocation instructions to corresponding | |||
1008 | // derived pointer relocation instructions given a vector of all relocate calls | |||
1009 | static void computeBaseDerivedRelocateMap( | |||
1010 | const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, | |||
1011 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> | |||
1012 | &RelocateInstMap) { | |||
1013 | // Collect information in two maps: one primarily for locating the base object | |||
1014 | // while filling the second map; the second map is the final structure holding | |||
1015 | // a mapping between Base and corresponding Derived relocate calls | |||
1016 | DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; | |||
1017 | for (auto *ThisRelocate : AllRelocateCalls) { | |||
1018 | auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), | |||
1019 | ThisRelocate->getDerivedPtrIndex()); | |||
1020 | RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); | |||
1021 | } | |||
1022 | for (auto &Item : RelocateIdxMap) { | |||
1023 | std::pair<unsigned, unsigned> Key = Item.first; | |||
1024 | if (Key.first == Key.second) | |||
1025 | // Base relocation: nothing to insert | |||
1026 | continue; | |||
1027 | ||||
1028 | GCRelocateInst *I = Item.second; | |||
1029 | auto BaseKey = std::make_pair(Key.first, Key.first); | |||
1030 | ||||
1031 | // We're iterating over RelocateIdxMap so we cannot modify it. | |||
1032 | auto MaybeBase = RelocateIdxMap.find(BaseKey); | |||
1033 | if (MaybeBase == RelocateIdxMap.end()) | |||
1034 | // TODO: We might want to insert a new base object relocate and gep off | |||
1035 | // that, if there are enough derived object relocates. | |||
1036 | continue; | |||
1037 | ||||
1038 | RelocateInstMap[MaybeBase->second].push_back(I); | |||
1039 | } | |||
1040 | } | |||
1041 | ||||
1042 | // Accepts a GEP and extracts the operands into a vector provided they're all | |||
1043 | // small integer constants | |||
1044 | static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, | |||
1045 | SmallVectorImpl<Value *> &OffsetV) { | |||
1046 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) { | |||
1047 | // Only accept small constant integer operands | |||
1048 | auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); | |||
1049 | if (!Op || Op->getZExtValue() > 20) | |||
1050 | return false; | |||
1051 | } | |||
1052 | ||||
1053 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) | |||
1054 | OffsetV.push_back(GEP->getOperand(i)); | |||
1055 | return true; | |||
1056 | } | |||
1057 | ||||
1058 | // Takes a RelocatedBase (base pointer relocation instruction) and Targets to | |||
1059 | // replace, computes a replacement, and affects it. | |||
1060 | static bool | |||
1061 | simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, | |||
1062 | const SmallVectorImpl<GCRelocateInst *> &Targets) { | |||
1063 | bool MadeChange = false; | |||
1064 | // We must ensure the relocation of derived pointer is defined after | |||
1065 | // relocation of base pointer. If we find a relocation corresponding to base | |||
1066 | // defined earlier than relocation of base then we move relocation of base | |||
1067 | // right before found relocation. We consider only relocation in the same | |||
1068 | // basic block as relocation of base. Relocations from other basic block will | |||
1069 | // be skipped by optimization and we do not care about them. | |||
1070 | for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); | |||
1071 | &*R != RelocatedBase; ++R) | |||
1072 | if (auto *RI = dyn_cast<GCRelocateInst>(R)) | |||
1073 | if (RI->getStatepoint() == RelocatedBase->getStatepoint()) | |||
1074 | if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { | |||
1075 | RelocatedBase->moveBefore(RI); | |||
1076 | break; | |||
1077 | } | |||
1078 | ||||
1079 | for (GCRelocateInst *ToReplace : Targets) { | |||
1080 | assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&((void)0) | |||
1081 | "Not relocating a derived object of the original base object")((void)0); | |||
1082 | if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { | |||
1083 | // A duplicate relocate call. TODO: coalesce duplicates. | |||
1084 | continue; | |||
1085 | } | |||
1086 | ||||
1087 | if (RelocatedBase->getParent() != ToReplace->getParent()) { | |||
1088 | // Base and derived relocates are in different basic blocks. | |||
1089 | // In this case transform is only valid when base dominates derived | |||
1090 | // relocate. However it would be too expensive to check dominance | |||
1091 | // for each such relocate, so we skip the whole transformation. | |||
1092 | continue; | |||
1093 | } | |||
1094 | ||||
1095 | Value *Base = ToReplace->getBasePtr(); | |||
1096 | auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); | |||
1097 | if (!Derived || Derived->getPointerOperand() != Base) | |||
1098 | continue; | |||
1099 | ||||
1100 | SmallVector<Value *, 2> OffsetV; | |||
1101 | if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) | |||
1102 | continue; | |||
1103 | ||||
1104 | // Create a Builder and replace the target callsite with a gep | |||
1105 | assert(RelocatedBase->getNextNode() &&((void)0) | |||
1106 | "Should always have one since it's not a terminator")((void)0); | |||
1107 | ||||
1108 | // Insert after RelocatedBase | |||
1109 | IRBuilder<> Builder(RelocatedBase->getNextNode()); | |||
1110 | Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); | |||
1111 | ||||
1112 | // If gc_relocate does not match the actual type, cast it to the right type. | |||
1113 | // In theory, there must be a bitcast after gc_relocate if the type does not | |||
1114 | // match, and we should reuse it to get the derived pointer. But it could be | |||
1115 | // cases like this: | |||
1116 | // bb1: | |||
1117 | // ... | |||
1118 | // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) | |||
1119 | // br label %merge | |||
1120 | // | |||
1121 | // bb2: | |||
1122 | // ... | |||
1123 | // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) | |||
1124 | // br label %merge | |||
1125 | // | |||
1126 | // merge: | |||
1127 | // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] | |||
1128 | // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* | |||
1129 | // | |||
1130 | // In this case, we can not find the bitcast any more. So we insert a new bitcast | |||
1131 | // no matter there is already one or not. In this way, we can handle all cases, and | |||
1132 | // the extra bitcast should be optimized away in later passes. | |||
1133 | Value *ActualRelocatedBase = RelocatedBase; | |||
1134 | if (RelocatedBase->getType() != Base->getType()) { | |||
1135 | ActualRelocatedBase = | |||
1136 | Builder.CreateBitCast(RelocatedBase, Base->getType()); | |||
1137 | } | |||
1138 | Value *Replacement = Builder.CreateGEP( | |||
1139 | Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); | |||
1140 | Replacement->takeName(ToReplace); | |||
1141 | // If the newly generated derived pointer's type does not match the original derived | |||
1142 | // pointer's type, cast the new derived pointer to match it. Same reasoning as above. | |||
1143 | Value *ActualReplacement = Replacement; | |||
1144 | if (Replacement->getType() != ToReplace->getType()) { | |||
1145 | ActualReplacement = | |||
1146 | Builder.CreateBitCast(Replacement, ToReplace->getType()); | |||
1147 | } | |||
1148 | ToReplace->replaceAllUsesWith(ActualReplacement); | |||
1149 | ToReplace->eraseFromParent(); | |||
1150 | ||||
1151 | MadeChange = true; | |||
1152 | } | |||
1153 | return MadeChange; | |||
1154 | } | |||
1155 | ||||
1156 | // Turns this: | |||
1157 | // | |||
1158 | // %base = ... | |||
1159 | // %ptr = gep %base + 15 | |||
1160 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
1161 | // %base' = relocate(%tok, i32 4, i32 4) | |||
1162 | // %ptr' = relocate(%tok, i32 4, i32 5) | |||
1163 | // %val = load %ptr' | |||
1164 | // | |||
1165 | // into this: | |||
1166 | // | |||
1167 | // %base = ... | |||
1168 | // %ptr = gep %base + 15 | |||
1169 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
1170 | // %base' = gc.relocate(%tok, i32 4, i32 4) | |||
1171 | // %ptr' = gep %base' + 15 | |||
1172 | // %val = load %ptr' | |||
1173 | bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) { | |||
1174 | bool MadeChange = false; | |||
1175 | SmallVector<GCRelocateInst *, 2> AllRelocateCalls; | |||
1176 | for (auto *U : I.users()) | |||
1177 | if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) | |||
1178 | // Collect all the relocate calls associated with a statepoint | |||
1179 | AllRelocateCalls.push_back(Relocate); | |||
1180 | ||||
1181 | // We need at least one base pointer relocation + one derived pointer | |||
1182 | // relocation to mangle | |||
1183 | if (AllRelocateCalls.size() < 2) | |||
1184 | return false; | |||
1185 | ||||
1186 | // RelocateInstMap is a mapping from the base relocate instruction to the | |||
1187 | // corresponding derived relocate instructions | |||
1188 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; | |||
1189 | computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); | |||
1190 | if (RelocateInstMap.empty()) | |||
1191 | return false; | |||
1192 | ||||
1193 | for (auto &Item : RelocateInstMap) | |||
1194 | // Item.first is the RelocatedBase to offset against | |||
1195 | // Item.second is the vector of Targets to replace | |||
1196 | MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); | |||
1197 | return MadeChange; | |||
1198 | } | |||
1199 | ||||
1200 | /// Sink the specified cast instruction into its user blocks. | |||
1201 | static bool SinkCast(CastInst *CI) { | |||
1202 | BasicBlock *DefBB = CI->getParent(); | |||
1203 | ||||
1204 | /// InsertedCasts - Only insert a cast in each block once. | |||
1205 | DenseMap<BasicBlock*, CastInst*> InsertedCasts; | |||
1206 | ||||
1207 | bool MadeChange = false; | |||
1208 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); | |||
1209 | UI != E; ) { | |||
1210 | Use &TheUse = UI.getUse(); | |||
1211 | Instruction *User = cast<Instruction>(*UI); | |||
1212 | ||||
1213 | // Figure out which BB this cast is used in. For PHI's this is the | |||
1214 | // appropriate predecessor block. | |||
1215 | BasicBlock *UserBB = User->getParent(); | |||
1216 | if (PHINode *PN = dyn_cast<PHINode>(User)) { | |||
1217 | UserBB = PN->getIncomingBlock(TheUse); | |||
1218 | } | |||
1219 | ||||
1220 | // Preincrement use iterator so we don't invalidate it. | |||
1221 | ++UI; | |||
1222 | ||||
1223 | // The first insertion point of a block containing an EH pad is after the | |||
1224 | // pad. If the pad is the user, we cannot sink the cast past the pad. | |||
1225 | if (User->isEHPad()) | |||
1226 | continue; | |||
1227 | ||||
1228 | // If the block selected to receive the cast is an EH pad that does not | |||
1229 | // allow non-PHI instructions before the terminator, we can't sink the | |||
1230 | // cast. | |||
1231 | if (UserBB->getTerminator()->isEHPad()) | |||
1232 | continue; | |||
1233 | ||||
1234 | // If this user is in the same block as the cast, don't change the cast. | |||
1235 | if (UserBB == DefBB) continue; | |||
1236 | ||||
1237 | // If we have already inserted a cast into this block, use it. | |||
1238 | CastInst *&InsertedCast = InsertedCasts[UserBB]; | |||
1239 | ||||
1240 | if (!InsertedCast) { | |||
1241 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1242 | assert(InsertPt != UserBB->end())((void)0); | |||
1243 | InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), | |||
1244 | CI->getType(), "", &*InsertPt); | |||
1245 | InsertedCast->setDebugLoc(CI->getDebugLoc()); | |||
1246 | } | |||
1247 | ||||
1248 | // Replace a use of the cast with a use of the new cast. | |||
1249 | TheUse = InsertedCast; | |||
1250 | MadeChange = true; | |||
1251 | ++NumCastUses; | |||
1252 | } | |||
1253 | ||||
1254 | // If we removed all uses, nuke the cast. | |||
1255 | if (CI->use_empty()) { | |||
1256 | salvageDebugInfo(*CI); | |||
1257 | CI->eraseFromParent(); | |||
1258 | MadeChange = true; | |||
1259 | } | |||
1260 | ||||
1261 | return MadeChange; | |||
1262 | } | |||
1263 | ||||
1264 | /// If the specified cast instruction is a noop copy (e.g. it's casting from | |||
1265 | /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to | |||
1266 | /// reduce the number of virtual registers that must be created and coalesced. | |||
1267 | /// | |||
1268 | /// Return true if any changes are made. | |||
1269 | static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, | |||
1270 | const DataLayout &DL) { | |||
1271 | // Sink only "cheap" (or nop) address-space casts. This is a weaker condition | |||
1272 | // than sinking only nop casts, but is helpful on some platforms. | |||
1273 | if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { | |||
1274 | if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(), | |||
1275 | ASC->getDestAddressSpace())) | |||
1276 | return false; | |||
1277 | } | |||
1278 | ||||
1279 | // If this is a noop copy, | |||
1280 | EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); | |||
1281 | EVT DstVT = TLI.getValueType(DL, CI->getType()); | |||
1282 | ||||
1283 | // This is an fp<->int conversion? | |||
1284 | if (SrcVT.isInteger() != DstVT.isInteger()) | |||
1285 | return false; | |||
1286 | ||||
1287 | // If this is an extension, it will be a zero or sign extension, which | |||
1288 | // isn't a noop. | |||
1289 | if (SrcVT.bitsLT(DstVT)) return false; | |||
1290 | ||||
1291 | // If these values will be promoted, find out what they will be promoted | |||
1292 | // to. This helps us consider truncates on PPC as noop copies when they | |||
1293 | // are. | |||
1294 | if (TLI.getTypeAction(CI->getContext(), SrcVT) == | |||
1295 | TargetLowering::TypePromoteInteger) | |||
1296 | SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); | |||
1297 | if (TLI.getTypeAction(CI->getContext(), DstVT) == | |||
1298 | TargetLowering::TypePromoteInteger) | |||
1299 | DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); | |||
1300 | ||||
1301 | // If, after promotion, these are the same types, this is a noop copy. | |||
1302 | if (SrcVT != DstVT) | |||
1303 | return false; | |||
1304 | ||||
1305 | return SinkCast(CI); | |||
1306 | } | |||
1307 | ||||
1308 | // Match a simple increment by constant operation. Note that if a sub is | |||
1309 | // matched, the step is negated (as if the step had been canonicalized to | |||
1310 | // an add, even though we leave the instruction alone.) | |||
1311 | bool matchIncrement(const Instruction* IVInc, Instruction *&LHS, | |||
1312 | Constant *&Step) { | |||
1313 | if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) || | |||
1314 | match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>( | |||
1315 | m_Instruction(LHS), m_Constant(Step))))) | |||
1316 | return true; | |||
1317 | if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) || | |||
1318 | match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>( | |||
1319 | m_Instruction(LHS), m_Constant(Step))))) { | |||
1320 | Step = ConstantExpr::getNeg(Step); | |||
1321 | return true; | |||
1322 | } | |||
1323 | return false; | |||
1324 | } | |||
1325 | ||||
1326 | /// If given \p PN is an inductive variable with value IVInc coming from the | |||
1327 | /// backedge, and on each iteration it gets increased by Step, return pair | |||
1328 | /// <IVInc, Step>. Otherwise, return None. | |||
1329 | static Optional<std::pair<Instruction *, Constant *> > | |||
1330 | getIVIncrement(const PHINode *PN, const LoopInfo *LI) { | |||
1331 | const Loop *L = LI->getLoopFor(PN->getParent()); | |||
1332 | if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch()) | |||
1333 | return None; | |||
1334 | auto *IVInc = | |||
1335 | dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch())); | |||
1336 | if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L) | |||
1337 | return None; | |||
1338 | Instruction *LHS = nullptr; | |||
1339 | Constant *Step = nullptr; | |||
1340 | if (matchIncrement(IVInc, LHS, Step) && LHS == PN) | |||
1341 | return std::make_pair(IVInc, Step); | |||
1342 | return None; | |||
1343 | } | |||
1344 | ||||
1345 | static bool isIVIncrement(const Value *V, const LoopInfo *LI) { | |||
1346 | auto *I = dyn_cast<Instruction>(V); | |||
1347 | if (!I) | |||
1348 | return false; | |||
1349 | Instruction *LHS = nullptr; | |||
1350 | Constant *Step = nullptr; | |||
1351 | if (!matchIncrement(I, LHS, Step)) | |||
1352 | return false; | |||
1353 | if (auto *PN = dyn_cast<PHINode>(LHS)) | |||
1354 | if (auto IVInc = getIVIncrement(PN, LI)) | |||
1355 | return IVInc->first == I; | |||
1356 | return false; | |||
1357 | } | |||
1358 | ||||
1359 | bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO, | |||
1360 | Value *Arg0, Value *Arg1, | |||
1361 | CmpInst *Cmp, | |||
1362 | Intrinsic::ID IID) { | |||
1363 | auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) { | |||
1364 | if (!isIVIncrement(BO, LI)) | |||
1365 | return false; | |||
1366 | const Loop *L = LI->getLoopFor(BO->getParent()); | |||
1367 | assert(L && "L should not be null after isIVIncrement()")((void)0); | |||
1368 | // Do not risk on moving increment into a child loop. | |||
1369 | if (LI->getLoopFor(Cmp->getParent()) != L) | |||
1370 | return false; | |||
1371 | ||||
1372 | // Finally, we need to ensure that the insert point will dominate all | |||
1373 | // existing uses of the increment. | |||
1374 | ||||
1375 | auto &DT = getDT(*BO->getParent()->getParent()); | |||
1376 | if (DT.dominates(Cmp->getParent(), BO->getParent())) | |||
1377 | // If we're moving up the dom tree, all uses are trivially dominated. | |||
1378 | // (This is the common case for code produced by LSR.) | |||
1379 | return true; | |||
1380 | ||||
1381 | // Otherwise, special case the single use in the phi recurrence. | |||
1382 | return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch()); | |||
1383 | }; | |||
1384 | if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) { | |||
1385 | // We used to use a dominator tree here to allow multi-block optimization. | |||
1386 | // But that was problematic because: | |||
1387 | // 1. It could cause a perf regression by hoisting the math op into the | |||
1388 | // critical path. | |||
1389 | // 2. It could cause a perf regression by creating a value that was live | |||
1390 | // across multiple blocks and increasing register pressure. | |||
1391 | // 3. Use of a dominator tree could cause large compile-time regression. | |||
1392 | // This is because we recompute the DT on every change in the main CGP | |||
1393 | // run-loop. The recomputing is probably unnecessary in many cases, so if | |||
1394 | // that was fixed, using a DT here would be ok. | |||
1395 | // | |||
1396 | // There is one important particular case we still want to handle: if BO is | |||
1397 | // the IV increment. Important properties that make it profitable: | |||
1398 | // - We can speculate IV increment anywhere in the loop (as long as the | |||
1399 | // indvar Phi is its only user); | |||
1400 | // - Upon computing Cmp, we effectively compute something equivalent to the | |||
1401 | // IV increment (despite it loops differently in the IR). So moving it up | |||
1402 | // to the cmp point does not really increase register pressure. | |||
1403 | return false; | |||
1404 | } | |||
1405 | ||||
1406 | // We allow matching the canonical IR (add X, C) back to (usubo X, -C). | |||
1407 | if (BO->getOpcode() == Instruction::Add && | |||
1408 | IID == Intrinsic::usub_with_overflow) { | |||
1409 | assert(isa<Constant>(Arg1) && "Unexpected input for usubo")((void)0); | |||
1410 | Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1)); | |||
1411 | } | |||
1412 | ||||
1413 | // Insert at the first instruction of the pair. | |||
1414 | Instruction *InsertPt = nullptr; | |||
1415 | for (Instruction &Iter : *Cmp->getParent()) { | |||
1416 | // If BO is an XOR, it is not guaranteed that it comes after both inputs to | |||
1417 | // the overflow intrinsic are defined. | |||
1418 | if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) { | |||
1419 | InsertPt = &Iter; | |||
1420 | break; | |||
1421 | } | |||
1422 | } | |||
1423 | assert(InsertPt != nullptr && "Parent block did not contain cmp or binop")((void)0); | |||
1424 | ||||
1425 | IRBuilder<> Builder(InsertPt); | |||
1426 | Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1); | |||
1427 | if (BO->getOpcode() != Instruction::Xor) { | |||
1428 | Value *Math = Builder.CreateExtractValue(MathOV, 0, "math"); | |||
1429 | BO->replaceAllUsesWith(Math); | |||
1430 | } else | |||
1431 | assert(BO->hasOneUse() &&((void)0) | |||
1432 | "Patterns with XOr should use the BO only in the compare")((void)0); | |||
1433 | Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov"); | |||
1434 | Cmp->replaceAllUsesWith(OV); | |||
1435 | Cmp->eraseFromParent(); | |||
1436 | BO->eraseFromParent(); | |||
1437 | return true; | |||
1438 | } | |||
1439 | ||||
1440 | /// Match special-case patterns that check for unsigned add overflow. | |||
1441 | static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, | |||
1442 | BinaryOperator *&Add) { | |||
1443 | // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val) | |||
1444 | // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero) | |||
1445 | Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); | |||
1446 | ||||
1447 | // We are not expecting non-canonical/degenerate code. Just bail out. | |||
1448 | if (isa<Constant>(A)) | |||
1449 | return false; | |||
1450 | ||||
1451 | ICmpInst::Predicate Pred = Cmp->getPredicate(); | |||
1452 | if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes())) | |||
1453 | B = ConstantInt::get(B->getType(), 1); | |||
1454 | else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) | |||
1455 | B = ConstantInt::get(B->getType(), -1); | |||
1456 | else | |||
1457 | return false; | |||
1458 | ||||
1459 | // Check the users of the variable operand of the compare looking for an add | |||
1460 | // with the adjusted constant. | |||
1461 | for (User *U : A->users()) { | |||
1462 | if (match(U, m_Add(m_Specific(A), m_Specific(B)))) { | |||
1463 | Add = cast<BinaryOperator>(U); | |||
1464 | return true; | |||
1465 | } | |||
1466 | } | |||
1467 | return false; | |||
1468 | } | |||
1469 | ||||
1470 | /// Try to combine the compare into a call to the llvm.uadd.with.overflow | |||
1471 | /// intrinsic. Return true if any changes were made. | |||
1472 | bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp, | |||
1473 | bool &ModifiedDT) { | |||
1474 | Value *A, *B; | |||
1475 | BinaryOperator *Add; | |||
1476 | if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) { | |||
1477 | if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add)) | |||
1478 | return false; | |||
1479 | // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases. | |||
1480 | A = Add->getOperand(0); | |||
1481 | B = Add->getOperand(1); | |||
1482 | } | |||
1483 | ||||
1484 | if (!TLI->shouldFormOverflowOp(ISD::UADDO, | |||
1485 | TLI->getValueType(*DL, Add->getType()), | |||
1486 | Add->hasNUsesOrMore(2))) | |||
1487 | return false; | |||
1488 | ||||
1489 | // We don't want to move around uses of condition values this late, so we | |||
1490 | // check if it is legal to create the call to the intrinsic in the basic | |||
1491 | // block containing the icmp. | |||
1492 | if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) | |||
1493 | return false; | |||
1494 | ||||
1495 | if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp, | |||
1496 | Intrinsic::uadd_with_overflow)) | |||
1497 | return false; | |||
1498 | ||||
1499 | // Reset callers - do not crash by iterating over a dead instruction. | |||
1500 | ModifiedDT = true; | |||
1501 | return true; | |||
1502 | } | |||
1503 | ||||
1504 | bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp, | |||
1505 | bool &ModifiedDT) { | |||
1506 | // We are not expecting non-canonical/degenerate code. Just bail out. | |||
1507 | Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); | |||
1508 | if (isa<Constant>(A) && isa<Constant>(B)) | |||
1509 | return false; | |||
1510 | ||||
1511 | // Convert (A u> B) to (A u< B) to simplify pattern matching. | |||
1512 | ICmpInst::Predicate Pred = Cmp->getPredicate(); | |||
1513 | if (Pred == ICmpInst::ICMP_UGT) { | |||
1514 | std::swap(A, B); | |||
1515 | Pred = ICmpInst::ICMP_ULT; | |||
1516 | } | |||
1517 | // Convert special-case: (A == 0) is the same as (A u< 1). | |||
1518 | if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) { | |||
1519 | B = ConstantInt::get(B->getType(), 1); | |||
1520 | Pred = ICmpInst::ICMP_ULT; | |||
1521 | } | |||
1522 | // Convert special-case: (A != 0) is the same as (0 u< A). | |||
1523 | if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) { | |||
1524 | std::swap(A, B); | |||
1525 | Pred = ICmpInst::ICMP_ULT; | |||
1526 | } | |||
1527 | if (Pred != ICmpInst::ICMP_ULT) | |||
1528 | return false; | |||
1529 | ||||
1530 | // Walk the users of a variable operand of a compare looking for a subtract or | |||
1531 | // add with that same operand. Also match the 2nd operand of the compare to | |||
1532 | // the add/sub, but that may be a negated constant operand of an add. | |||
1533 | Value *CmpVariableOperand = isa<Constant>(A) ? B : A; | |||
1534 | BinaryOperator *Sub = nullptr; | |||
1535 | for (User *U : CmpVariableOperand->users()) { | |||
1536 | // A - B, A u< B --> usubo(A, B) | |||
1537 | if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) { | |||
1538 | Sub = cast<BinaryOperator>(U); | |||
1539 | break; | |||
1540 | } | |||
1541 | ||||
1542 | // A + (-C), A u< C (canonicalized form of (sub A, C)) | |||
1543 | const APInt *CmpC, *AddC; | |||
1544 | if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) && | |||
1545 | match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) { | |||
1546 | Sub = cast<BinaryOperator>(U); | |||
1547 | break; | |||
1548 | } | |||
1549 | } | |||
1550 | if (!Sub) | |||
1551 | return false; | |||
1552 | ||||
1553 | if (!TLI->shouldFormOverflowOp(ISD::USUBO, | |||
1554 | TLI->getValueType(*DL, Sub->getType()), | |||
1555 | Sub->hasNUsesOrMore(2))) | |||
1556 | return false; | |||
1557 | ||||
1558 | if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1), | |||
1559 | Cmp, Intrinsic::usub_with_overflow)) | |||
1560 | return false; | |||
1561 | ||||
1562 | // Reset callers - do not crash by iterating over a dead instruction. | |||
1563 | ModifiedDT = true; | |||
1564 | return true; | |||
1565 | } | |||
1566 | ||||
1567 | /// Sink the given CmpInst into user blocks to reduce the number of virtual | |||
1568 | /// registers that must be created and coalesced. This is a clear win except on | |||
1569 | /// targets with multiple condition code registers (PowerPC), where it might | |||
1570 | /// lose; some adjustment may be wanted there. | |||
1571 | /// | |||
1572 | /// Return true if any changes are made. | |||
1573 | static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { | |||
1574 | if (TLI.hasMultipleConditionRegisters()) | |||
1575 | return false; | |||
1576 | ||||
1577 | // Avoid sinking soft-FP comparisons, since this can move them into a loop. | |||
1578 | if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp)) | |||
1579 | return false; | |||
1580 | ||||
1581 | // Only insert a cmp in each block once. | |||
1582 | DenseMap<BasicBlock*, CmpInst*> InsertedCmps; | |||
1583 | ||||
1584 | bool MadeChange = false; | |||
1585 | for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); | |||
1586 | UI != E; ) { | |||
1587 | Use &TheUse = UI.getUse(); | |||
1588 | Instruction *User = cast<Instruction>(*UI); | |||
1589 | ||||
1590 | // Preincrement use iterator so we don't invalidate it. | |||
1591 | ++UI; | |||
1592 | ||||
1593 | // Don't bother for PHI nodes. | |||
1594 | if (isa<PHINode>(User)) | |||
1595 | continue; | |||
1596 | ||||
1597 | // Figure out which BB this cmp is used in. | |||
1598 | BasicBlock *UserBB = User->getParent(); | |||
1599 | BasicBlock *DefBB = Cmp->getParent(); | |||
1600 | ||||
1601 | // If this user is in the same block as the cmp, don't change the cmp. | |||
1602 | if (UserBB == DefBB) continue; | |||
1603 | ||||
1604 | // If we have already inserted a cmp into this block, use it. | |||
1605 | CmpInst *&InsertedCmp = InsertedCmps[UserBB]; | |||
1606 | ||||
1607 | if (!InsertedCmp) { | |||
1608 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1609 | assert(InsertPt != UserBB->end())((void)0); | |||
1610 | InsertedCmp = | |||
1611 | CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), | |||
1612 | Cmp->getOperand(0), Cmp->getOperand(1), "", | |||
1613 | &*InsertPt); | |||
1614 | // Propagate the debug info. | |||
1615 | InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); | |||
1616 | } | |||
1617 | ||||
1618 | // Replace a use of the cmp with a use of the new cmp. | |||
1619 | TheUse = InsertedCmp; | |||
1620 | MadeChange = true; | |||
1621 | ++NumCmpUses; | |||
1622 | } | |||
1623 | ||||
1624 | // If we removed all uses, nuke the cmp. | |||
1625 | if (Cmp->use_empty()) { | |||
1626 | Cmp->eraseFromParent(); | |||
1627 | MadeChange = true; | |||
1628 | } | |||
1629 | ||||
1630 | return MadeChange; | |||
1631 | } | |||
1632 | ||||
1633 | /// For pattern like: | |||
1634 | /// | |||
1635 | /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB) | |||
1636 | /// ... | |||
1637 | /// DomBB: | |||
1638 | /// ... | |||
1639 | /// br DomCond, TrueBB, CmpBB | |||
1640 | /// CmpBB: (with DomBB being the single predecessor) | |||
1641 | /// ... | |||
1642 | /// Cmp = icmp eq CmpOp0, CmpOp1 | |||
1643 | /// ... | |||
1644 | /// | |||
1645 | /// It would use two comparison on targets that lowering of icmp sgt/slt is | |||
1646 | /// different from lowering of icmp eq (PowerPC). This function try to convert | |||
1647 | /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'. | |||
1648 | /// After that, DomCond and Cmp can use the same comparison so reduce one | |||
1649 | /// comparison. | |||
1650 | /// | |||
1651 | /// Return true if any changes are made. | |||
1652 | static bool foldICmpWithDominatingICmp(CmpInst *Cmp, | |||
1653 | const TargetLowering &TLI) { | |||
1654 | if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp()) | |||
1655 | return false; | |||
1656 | ||||
1657 | ICmpInst::Predicate Pred = Cmp->getPredicate(); | |||
1658 | if (Pred != ICmpInst::ICMP_EQ) | |||
1659 | return false; | |||
1660 | ||||
1661 | // If icmp eq has users other than BranchInst and SelectInst, converting it to | |||
1662 | // icmp slt/sgt would introduce more redundant LLVM IR. | |||
1663 | for (User *U : Cmp->users()) { | |||
1664 | if (isa<BranchInst>(U)) | |||
1665 | continue; | |||
1666 | if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp) | |||
1667 | continue; | |||
1668 | return false; | |||
1669 | } | |||
1670 | ||||
1671 | // This is a cheap/incomplete check for dominance - just match a single | |||
1672 | // predecessor with a conditional branch. | |||
1673 | BasicBlock *CmpBB = Cmp->getParent(); | |||
1674 | BasicBlock *DomBB = CmpBB->getSinglePredecessor(); | |||
1675 | if (!DomBB) | |||
1676 | return false; | |||
1677 | ||||
1678 | // We want to ensure that the only way control gets to the comparison of | |||
1679 | // interest is that a less/greater than comparison on the same operands is | |||
1680 | // false. | |||
1681 | Value *DomCond; | |||
1682 | BasicBlock *TrueBB, *FalseBB; | |||
1683 | if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB))) | |||
1684 | return false; | |||
1685 | if (CmpBB != FalseBB) | |||
1686 | return false; | |||
1687 | ||||
1688 | Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1); | |||
1689 | ICmpInst::Predicate DomPred; | |||
1690 | if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1)))) | |||
1691 | return false; | |||
1692 | if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT) | |||
1693 | return false; | |||
1694 | ||||
1695 | // Convert the equality comparison to the opposite of the dominating | |||
1696 | // comparison and swap the direction for all branch/select users. | |||
1697 | // We have conceptually converted: | |||
1698 | // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>; | |||
1699 | // to | |||
1700 | // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>; | |||
1701 | // And similarly for branches. | |||
1702 | for (User *U : Cmp->users()) { | |||
1703 | if (auto *BI = dyn_cast<BranchInst>(U)) { | |||
1704 | assert(BI->isConditional() && "Must be conditional")((void)0); | |||
1705 | BI->swapSuccessors(); | |||
1706 | continue; | |||
1707 | } | |||
1708 | if (auto *SI = dyn_cast<SelectInst>(U)) { | |||
1709 | // Swap operands | |||
1710 | SI->swapValues(); | |||
1711 | SI->swapProfMetadata(); | |||
1712 | continue; | |||
1713 | } | |||
1714 | llvm_unreachable("Must be a branch or a select")__builtin_unreachable(); | |||
1715 | } | |||
1716 | Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred)); | |||
1717 | return true; | |||
1718 | } | |||
1719 | ||||
1720 | bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) { | |||
1721 | if (sinkCmpExpression(Cmp, *TLI)) | |||
1722 | return true; | |||
1723 | ||||
1724 | if (combineToUAddWithOverflow(Cmp, ModifiedDT)) | |||
1725 | return true; | |||
1726 | ||||
1727 | if (combineToUSubWithOverflow(Cmp, ModifiedDT)) | |||
1728 | return true; | |||
1729 | ||||
1730 | if (foldICmpWithDominatingICmp(Cmp, *TLI)) | |||
1731 | return true; | |||
1732 | ||||
1733 | return false; | |||
1734 | } | |||
1735 | ||||
1736 | /// Duplicate and sink the given 'and' instruction into user blocks where it is | |||
1737 | /// used in a compare to allow isel to generate better code for targets where | |||
1738 | /// this operation can be combined. | |||
1739 | /// | |||
1740 | /// Return true if any changes are made. | |||
1741 | static bool sinkAndCmp0Expression(Instruction *AndI, | |||
1742 | const TargetLowering &TLI, | |||
1743 | SetOfInstrs &InsertedInsts) { | |||
1744 | // Double-check that we're not trying to optimize an instruction that was | |||
1745 | // already optimized by some other part of this pass. | |||
1746 | assert(!InsertedInsts.count(AndI) &&((void)0) | |||
1747 | "Attempting to optimize already optimized and instruction")((void)0); | |||
1748 | (void) InsertedInsts; | |||
1749 | ||||
1750 | // Nothing to do for single use in same basic block. | |||
1751 | if (AndI->hasOneUse() && | |||
1752 | AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) | |||
1753 | return false; | |||
1754 | ||||
1755 | // Try to avoid cases where sinking/duplicating is likely to increase register | |||
1756 | // pressure. | |||
1757 | if (!isa<ConstantInt>(AndI->getOperand(0)) && | |||
1758 | !isa<ConstantInt>(AndI->getOperand(1)) && | |||
1759 | AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) | |||
1760 | return false; | |||
1761 | ||||
1762 | for (auto *U : AndI->users()) { | |||
1763 | Instruction *User = cast<Instruction>(U); | |||
1764 | ||||
1765 | // Only sink 'and' feeding icmp with 0. | |||
1766 | if (!isa<ICmpInst>(User)) | |||
1767 | return false; | |||
1768 | ||||
1769 | auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); | |||
1770 | if (!CmpC || !CmpC->isZero()) | |||
1771 | return false; | |||
1772 | } | |||
1773 | ||||
1774 | if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) | |||
1775 | return false; | |||
1776 | ||||
1777 | LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n")do { } while (false); | |||
1778 | LLVM_DEBUG(AndI->getParent()->dump())do { } while (false); | |||
1779 | ||||
1780 | // Push the 'and' into the same block as the icmp 0. There should only be | |||
1781 | // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any | |||
1782 | // others, so we don't need to keep track of which BBs we insert into. | |||
1783 | for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); | |||
1784 | UI != E; ) { | |||
1785 | Use &TheUse = UI.getUse(); | |||
1786 | Instruction *User = cast<Instruction>(*UI); | |||
1787 | ||||
1788 | // Preincrement use iterator so we don't invalidate it. | |||
1789 | ++UI; | |||
1790 | ||||
1791 | LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n")do { } while (false); | |||
1792 | ||||
1793 | // Keep the 'and' in the same place if the use is already in the same block. | |||
1794 | Instruction *InsertPt = | |||
1795 | User->getParent() == AndI->getParent() ? AndI : User; | |||
1796 | Instruction *InsertedAnd = | |||
1797 | BinaryOperator::Create(Instruction::And, AndI->getOperand(0), | |||
1798 | AndI->getOperand(1), "", InsertPt); | |||
1799 | // Propagate the debug info. | |||
1800 | InsertedAnd->setDebugLoc(AndI->getDebugLoc()); | |||
1801 | ||||
1802 | // Replace a use of the 'and' with a use of the new 'and'. | |||
1803 | TheUse = InsertedAnd; | |||
1804 | ++NumAndUses; | |||
1805 | LLVM_DEBUG(User->getParent()->dump())do { } while (false); | |||
1806 | } | |||
1807 | ||||
1808 | // We removed all uses, nuke the and. | |||
1809 | AndI->eraseFromParent(); | |||
1810 | return true; | |||
1811 | } | |||
1812 | ||||
1813 | /// Check if the candidates could be combined with a shift instruction, which | |||
1814 | /// includes: | |||
1815 | /// 1. Truncate instruction | |||
1816 | /// 2. And instruction and the imm is a mask of the low bits: | |||
1817 | /// imm & (imm+1) == 0 | |||
1818 | static bool isExtractBitsCandidateUse(Instruction *User) { | |||
1819 | if (!isa<TruncInst>(User)) { | |||
1820 | if (User->getOpcode() != Instruction::And || | |||
1821 | !isa<ConstantInt>(User->getOperand(1))) | |||
1822 | return false; | |||
1823 | ||||
1824 | const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); | |||
1825 | ||||
1826 | if ((Cimm & (Cimm + 1)).getBoolValue()) | |||
1827 | return false; | |||
1828 | } | |||
1829 | return true; | |||
1830 | } | |||
1831 | ||||
1832 | /// Sink both shift and truncate instruction to the use of truncate's BB. | |||
1833 | static bool | |||
1834 | SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, | |||
1835 | DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, | |||
1836 | const TargetLowering &TLI, const DataLayout &DL) { | |||
1837 | BasicBlock *UserBB = User->getParent(); | |||
1838 | DenseMap<BasicBlock *, CastInst *> InsertedTruncs; | |||
1839 | auto *TruncI = cast<TruncInst>(User); | |||
1840 | bool MadeChange = false; | |||
1841 | ||||
1842 | for (Value::user_iterator TruncUI = TruncI->user_begin(), | |||
1843 | TruncE = TruncI->user_end(); | |||
1844 | TruncUI != TruncE;) { | |||
1845 | ||||
1846 | Use &TruncTheUse = TruncUI.getUse(); | |||
1847 | Instruction *TruncUser = cast<Instruction>(*TruncUI); | |||
1848 | // Preincrement use iterator so we don't invalidate it. | |||
1849 | ||||
1850 | ++TruncUI; | |||
1851 | ||||
1852 | int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); | |||
1853 | if (!ISDOpcode) | |||
1854 | continue; | |||
1855 | ||||
1856 | // If the use is actually a legal node, there will not be an | |||
1857 | // implicit truncate. | |||
1858 | // FIXME: always querying the result type is just an | |||
1859 | // approximation; some nodes' legality is determined by the | |||
1860 | // operand or other means. There's no good way to find out though. | |||
1861 | if (TLI.isOperationLegalOrCustom( | |||
1862 | ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) | |||
1863 | continue; | |||
1864 | ||||
1865 | // Don't bother for PHI nodes. | |||
1866 | if (isa<PHINode>(TruncUser)) | |||
1867 | continue; | |||
1868 | ||||
1869 | BasicBlock *TruncUserBB = TruncUser->getParent(); | |||
1870 | ||||
1871 | if (UserBB == TruncUserBB) | |||
1872 | continue; | |||
1873 | ||||
1874 | BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; | |||
1875 | CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; | |||
1876 | ||||
1877 | if (!InsertedShift && !InsertedTrunc) { | |||
1878 | BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); | |||
1879 | assert(InsertPt != TruncUserBB->end())((void)0); | |||
1880 | // Sink the shift | |||
1881 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
1882 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | |||
1883 | "", &*InsertPt); | |||
1884 | else | |||
1885 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | |||
1886 | "", &*InsertPt); | |||
1887 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); | |||
1888 | ||||
1889 | // Sink the trunc | |||
1890 | BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); | |||
1891 | TruncInsertPt++; | |||
1892 | assert(TruncInsertPt != TruncUserBB->end())((void)0); | |||
1893 | ||||
1894 | InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, | |||
1895 | TruncI->getType(), "", &*TruncInsertPt); | |||
1896 | InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); | |||
1897 | ||||
1898 | MadeChange = true; | |||
1899 | ||||
1900 | TruncTheUse = InsertedTrunc; | |||
1901 | } | |||
1902 | } | |||
1903 | return MadeChange; | |||
1904 | } | |||
1905 | ||||
1906 | /// Sink the shift *right* instruction into user blocks if the uses could | |||
1907 | /// potentially be combined with this shift instruction and generate BitExtract | |||
1908 | /// instruction. It will only be applied if the architecture supports BitExtract | |||
1909 | /// instruction. Here is an example: | |||
1910 | /// BB1: | |||
1911 | /// %x.extract.shift = lshr i64 %arg1, 32 | |||
1912 | /// BB2: | |||
1913 | /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 | |||
1914 | /// ==> | |||
1915 | /// | |||
1916 | /// BB2: | |||
1917 | /// %x.extract.shift.1 = lshr i64 %arg1, 32 | |||
1918 | /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 | |||
1919 | /// | |||
1920 | /// CodeGen will recognize the pattern in BB2 and generate BitExtract | |||
1921 | /// instruction. | |||
1922 | /// Return true if any changes are made. | |||
1923 | static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, | |||
1924 | const TargetLowering &TLI, | |||
1925 | const DataLayout &DL) { | |||
1926 | BasicBlock *DefBB = ShiftI->getParent(); | |||
1927 | ||||
1928 | /// Only insert instructions in each block once. | |||
1929 | DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; | |||
1930 | ||||
1931 | bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); | |||
1932 | ||||
1933 | bool MadeChange = false; | |||
1934 | for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); | |||
1935 | UI != E;) { | |||
1936 | Use &TheUse = UI.getUse(); | |||
1937 | Instruction *User = cast<Instruction>(*UI); | |||
1938 | // Preincrement use iterator so we don't invalidate it. | |||
1939 | ++UI; | |||
1940 | ||||
1941 | // Don't bother for PHI nodes. | |||
1942 | if (isa<PHINode>(User)) | |||
1943 | continue; | |||
1944 | ||||
1945 | if (!isExtractBitsCandidateUse(User)) | |||
1946 | continue; | |||
1947 | ||||
1948 | BasicBlock *UserBB = User->getParent(); | |||
1949 | ||||
1950 | if (UserBB == DefBB) { | |||
1951 | // If the shift and truncate instruction are in the same BB. The use of | |||
1952 | // the truncate(TruncUse) may still introduce another truncate if not | |||
1953 | // legal. In this case, we would like to sink both shift and truncate | |||
1954 | // instruction to the BB of TruncUse. | |||
1955 | // for example: | |||
1956 | // BB1: | |||
1957 | // i64 shift.result = lshr i64 opnd, imm | |||
1958 | // trunc.result = trunc shift.result to i16 | |||
1959 | // | |||
1960 | // BB2: | |||
1961 | // ----> We will have an implicit truncate here if the architecture does | |||
1962 | // not have i16 compare. | |||
1963 | // cmp i16 trunc.result, opnd2 | |||
1964 | // | |||
1965 | if (isa<TruncInst>(User) && shiftIsLegal | |||
1966 | // If the type of the truncate is legal, no truncate will be | |||
1967 | // introduced in other basic blocks. | |||
1968 | && | |||
1969 | (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) | |||
1970 | MadeChange = | |||
1971 | SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); | |||
1972 | ||||
1973 | continue; | |||
1974 | } | |||
1975 | // If we have already inserted a shift into this block, use it. | |||
1976 | BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; | |||
1977 | ||||
1978 | if (!InsertedShift) { | |||
1979 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1980 | assert(InsertPt != UserBB->end())((void)0); | |||
1981 | ||||
1982 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
1983 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | |||
1984 | "", &*InsertPt); | |||
1985 | else | |||
1986 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | |||
1987 | "", &*InsertPt); | |||
1988 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); | |||
1989 | ||||
1990 | MadeChange = true; | |||
1991 | } | |||
1992 | ||||
1993 | // Replace a use of the shift with a use of the new shift. | |||
1994 | TheUse = InsertedShift; | |||
1995 | } | |||
1996 | ||||
1997 | // If we removed all uses, or there are none, nuke the shift. | |||
1998 | if (ShiftI->use_empty()) { | |||
1999 | salvageDebugInfo(*ShiftI); | |||
2000 | ShiftI->eraseFromParent(); | |||
2001 | MadeChange = true; | |||
2002 | } | |||
2003 | ||||
2004 | return MadeChange; | |||
2005 | } | |||
2006 | ||||
2007 | /// If counting leading or trailing zeros is an expensive operation and a zero | |||
2008 | /// input is defined, add a check for zero to avoid calling the intrinsic. | |||
2009 | /// | |||
2010 | /// We want to transform: | |||
2011 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) | |||
2012 | /// | |||
2013 | /// into: | |||
2014 | /// entry: | |||
2015 | /// %cmpz = icmp eq i64 %A, 0 | |||
2016 | /// br i1 %cmpz, label %cond.end, label %cond.false | |||
2017 | /// cond.false: | |||
2018 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) | |||
2019 | /// br label %cond.end | |||
2020 | /// cond.end: | |||
2021 | /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] | |||
2022 | /// | |||
2023 | /// If the transform is performed, return true and set ModifiedDT to true. | |||
2024 | static bool despeculateCountZeros(IntrinsicInst *CountZeros, | |||
2025 | const TargetLowering *TLI, | |||
2026 | const DataLayout *DL, | |||
2027 | bool &ModifiedDT) { | |||
2028 | // If a zero input is undefined, it doesn't make sense to despeculate that. | |||
2029 | if (match(CountZeros->getOperand(1), m_One())) | |||
2030 | return false; | |||
2031 | ||||
2032 | // If it's cheap to speculate, there's nothing to do. | |||
2033 | auto IntrinsicID = CountZeros->getIntrinsicID(); | |||
2034 | if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || | |||
2035 | (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) | |||
2036 | return false; | |||
2037 | ||||
2038 | // Only handle legal scalar cases. Anything else requires too much work. | |||
2039 | Type *Ty = CountZeros->getType(); | |||
2040 | unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); | |||
2041 | if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) | |||
2042 | return false; | |||
2043 | ||||
2044 | // Bail if the value is never zero. | |||
2045 | if (llvm::isKnownNonZero(CountZeros->getOperand(0), *DL)) | |||
2046 | return false; | |||
2047 | ||||
2048 | // The intrinsic will be sunk behind a compare against zero and branch. | |||
2049 | BasicBlock *StartBlock = CountZeros->getParent(); | |||
2050 | BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); | |||
2051 | ||||
2052 | // Create another block after the count zero intrinsic. A PHI will be added | |||
2053 | // in this block to select the result of the intrinsic or the bit-width | |||
2054 | // constant if the input to the intrinsic is zero. | |||
2055 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); | |||
2056 | BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); | |||
2057 | ||||
2058 | // Set up a builder to create a compare, conditional branch, and PHI. | |||
2059 | IRBuilder<> Builder(CountZeros->getContext()); | |||
2060 | Builder.SetInsertPoint(StartBlock->getTerminator()); | |||
2061 | Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); | |||
2062 | ||||
2063 | // Replace the unconditional branch that was created by the first split with | |||
2064 | // a compare against zero and a conditional branch. | |||
2065 | Value *Zero = Constant::getNullValue(Ty); | |||
2066 | Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); | |||
2067 | Builder.CreateCondBr(Cmp, EndBlock, CallBlock); | |||
2068 | StartBlock->getTerminator()->eraseFromParent(); | |||
2069 | ||||
2070 | // Create a PHI in the end block to select either the output of the intrinsic | |||
2071 | // or the bit width of the operand. | |||
2072 | Builder.SetInsertPoint(&EndBlock->front()); | |||
2073 | PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); | |||
2074 | CountZeros->replaceAllUsesWith(PN); | |||
2075 | Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); | |||
2076 | PN->addIncoming(BitWidth, StartBlock); | |||
2077 | PN->addIncoming(CountZeros, CallBlock); | |||
2078 | ||||
2079 | // We are explicitly handling the zero case, so we can set the intrinsic's | |||
2080 | // undefined zero argument to 'true'. This will also prevent reprocessing the | |||
2081 | // intrinsic; we only despeculate when a zero input is defined. | |||
2082 | CountZeros->setArgOperand(1, Builder.getTrue()); | |||
2083 | ModifiedDT = true; | |||
2084 | return true; | |||
2085 | } | |||
2086 | ||||
2087 | bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { | |||
2088 | BasicBlock *BB = CI->getParent(); | |||
2089 | ||||
2090 | // Lower inline assembly if we can. | |||
2091 | // If we found an inline asm expession, and if the target knows how to | |||
2092 | // lower it to normal LLVM code, do so now. | |||
2093 | if (CI->isInlineAsm()) { | |||
2094 | if (TLI->ExpandInlineAsm(CI)) { | |||
2095 | // Avoid invalidating the iterator. | |||
2096 | CurInstIterator = BB->begin(); | |||
2097 | // Avoid processing instructions out of order, which could cause | |||
2098 | // reuse before a value is defined. | |||
2099 | SunkAddrs.clear(); | |||
2100 | return true; | |||
2101 | } | |||
2102 | // Sink address computing for memory operands into the block. | |||
2103 | if (optimizeInlineAsmInst(CI)) | |||
2104 | return true; | |||
2105 | } | |||
2106 | ||||
2107 | // Align the pointer arguments to this call if the target thinks it's a good | |||
2108 | // idea | |||
2109 | unsigned MinSize, PrefAlign; | |||
2110 | if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { | |||
2111 | for (auto &Arg : CI->arg_operands()) { | |||
2112 | // We want to align both objects whose address is used directly and | |||
2113 | // objects whose address is used in casts and GEPs, though it only makes | |||
2114 | // sense for GEPs if the offset is a multiple of the desired alignment and | |||
2115 | // if size - offset meets the size threshold. | |||
2116 | if (!Arg->getType()->isPointerTy()) | |||
2117 | continue; | |||
2118 | APInt Offset(DL->getIndexSizeInBits( | |||
2119 | cast<PointerType>(Arg->getType())->getAddressSpace()), | |||
2120 | 0); | |||
2121 | Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); | |||
2122 | uint64_t Offset2 = Offset.getLimitedValue(); | |||
2123 | if ((Offset2 & (PrefAlign-1)) != 0) | |||
2124 | continue; | |||
2125 | AllocaInst *AI; | |||
2126 | if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && | |||
2127 | DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) | |||
2128 | AI->setAlignment(Align(PrefAlign)); | |||
2129 | // Global variables can only be aligned if they are defined in this | |||
2130 | // object (i.e. they are uniquely initialized in this object), and | |||
2131 | // over-aligning global variables that have an explicit section is | |||
2132 | // forbidden. | |||
2133 | GlobalVariable *GV; | |||
2134 | if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && | |||
2135 | GV->getPointerAlignment(*DL) < PrefAlign && | |||
2136 | DL->getTypeAllocSize(GV->getValueType()) >= | |||
2137 | MinSize + Offset2) | |||
2138 | GV->setAlignment(MaybeAlign(PrefAlign)); | |||
2139 | } | |||
2140 | // If this is a memcpy (or similar) then we may be able to improve the | |||
2141 | // alignment | |||
2142 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { | |||
2143 | Align DestAlign = getKnownAlignment(MI->getDest(), *DL); | |||
2144 | MaybeAlign MIDestAlign = MI->getDestAlign(); | |||
2145 | if (!MIDestAlign || DestAlign > *MIDestAlign) | |||
2146 | MI->setDestAlignment(DestAlign); | |||
2147 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { | |||
2148 | MaybeAlign MTISrcAlign = MTI->getSourceAlign(); | |||
2149 | Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL); | |||
2150 | if (!MTISrcAlign || SrcAlign > *MTISrcAlign) | |||
2151 | MTI->setSourceAlignment(SrcAlign); | |||
2152 | } | |||
2153 | } | |||
2154 | } | |||
2155 | ||||
2156 | // If we have a cold call site, try to sink addressing computation into the | |||
2157 | // cold block. This interacts with our handling for loads and stores to | |||
2158 | // ensure that we can fold all uses of a potential addressing computation | |||
2159 | // into their uses. TODO: generalize this to work over profiling data | |||
2160 | if (CI->hasFnAttr(Attribute::Cold) && | |||
2161 | !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) | |||
2162 | for (auto &Arg : CI->arg_operands()) { | |||
2163 | if (!Arg->getType()->isPointerTy()) | |||
2164 | continue; | |||
2165 | unsigned AS = Arg->getType()->getPointerAddressSpace(); | |||
2166 | return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); | |||
2167 | } | |||
2168 | ||||
2169 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); | |||
2170 | if (II) { | |||
2171 | switch (II->getIntrinsicID()) { | |||
2172 | default: break; | |||
2173 | case Intrinsic::assume: | |||
2174 | llvm_unreachable("llvm.assume should have been removed already")__builtin_unreachable(); | |||
2175 | case Intrinsic::experimental_widenable_condition: { | |||
2176 | // Give up on future widening oppurtunties so that we can fold away dead | |||
2177 | // paths and merge blocks before going into block-local instruction | |||
2178 | // selection. | |||
2179 | if (II->use_empty()) { | |||
2180 | II->eraseFromParent(); | |||
2181 | return true; | |||
2182 | } | |||
2183 | Constant *RetVal = ConstantInt::getTrue(II->getContext()); | |||
2184 | resetIteratorIfInvalidatedWhileCalling(BB, [&]() { | |||
2185 | replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); | |||
2186 | }); | |||
2187 | return true; | |||
2188 | } | |||
2189 | case Intrinsic::objectsize: | |||
2190 | llvm_unreachable("llvm.objectsize.* should have been lowered already")__builtin_unreachable(); | |||
2191 | case Intrinsic::is_constant: | |||
2192 | llvm_unreachable("llvm.is.constant.* should have been lowered already")__builtin_unreachable(); | |||
2193 | case Intrinsic::aarch64_stlxr: | |||
2194 | case Intrinsic::aarch64_stxr: { | |||
2195 | ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); | |||
2196 | if (!ExtVal || !ExtVal->hasOneUse() || | |||
2197 | ExtVal->getParent() == CI->getParent()) | |||
2198 | return false; | |||
2199 | // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. | |||
2200 | ExtVal->moveBefore(CI); | |||
2201 | // Mark this instruction as "inserted by CGP", so that other | |||
2202 | // optimizations don't touch it. | |||
2203 | InsertedInsts.insert(ExtVal); | |||
2204 | return true; | |||
2205 | } | |||
2206 | ||||
2207 | case Intrinsic::launder_invariant_group: | |||
2208 | case Intrinsic::strip_invariant_group: { | |||
2209 | Value *ArgVal = II->getArgOperand(0); | |||
2210 | auto it = LargeOffsetGEPMap.find(II); | |||
2211 | if (it != LargeOffsetGEPMap.end()) { | |||
2212 | // Merge entries in LargeOffsetGEPMap to reflect the RAUW. | |||
2213 | // Make sure not to have to deal with iterator invalidation | |||
2214 | // after possibly adding ArgVal to LargeOffsetGEPMap. | |||
2215 | auto GEPs = std::move(it->second); | |||
2216 | LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); | |||
2217 | LargeOffsetGEPMap.erase(II); | |||
2218 | } | |||
2219 | ||||
2220 | II->replaceAllUsesWith(ArgVal); | |||
2221 | II->eraseFromParent(); | |||
2222 | return true; | |||
2223 | } | |||
2224 | case Intrinsic::cttz: | |||
2225 | case Intrinsic::ctlz: | |||
2226 | // If counting zeros is expensive, try to avoid it. | |||
2227 | return despeculateCountZeros(II, TLI, DL, ModifiedDT); | |||
2228 | case Intrinsic::fshl: | |||
2229 | case Intrinsic::fshr: | |||
2230 | return optimizeFunnelShift(II); | |||
2231 | case Intrinsic::dbg_value: | |||
2232 | return fixupDbgValue(II); | |||
2233 | case Intrinsic::vscale: { | |||
2234 | // If datalayout has no special restrictions on vector data layout, | |||
2235 | // replace `llvm.vscale` by an equivalent constant expression | |||
2236 | // to benefit from cheap constant propagation. | |||
2237 | Type *ScalableVectorTy = | |||
2238 | VectorType::get(Type::getInt8Ty(II->getContext()), 1, true); | |||
2239 | if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) { | |||
2240 | auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo()); | |||
2241 | auto *One = ConstantInt::getSigned(II->getType(), 1); | |||
2242 | auto *CGep = | |||
2243 | ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One); | |||
2244 | II->replaceAllUsesWith(ConstantExpr::getPtrToInt(CGep, II->getType())); | |||
2245 | II->eraseFromParent(); | |||
2246 | return true; | |||
2247 | } | |||
2248 | break; | |||
2249 | } | |||
2250 | case Intrinsic::masked_gather: | |||
2251 | return optimizeGatherScatterInst(II, II->getArgOperand(0)); | |||
2252 | case Intrinsic::masked_scatter: | |||
2253 | return optimizeGatherScatterInst(II, II->getArgOperand(1)); | |||
2254 | } | |||
2255 | ||||
2256 | SmallVector<Value *, 2> PtrOps; | |||
2257 | Type *AccessTy; | |||
2258 | if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) | |||
2259 | while (!PtrOps.empty()) { | |||
2260 | Value *PtrVal = PtrOps.pop_back_val(); | |||
2261 | unsigned AS = PtrVal->getType()->getPointerAddressSpace(); | |||
2262 | if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) | |||
2263 | return true; | |||
2264 | } | |||
2265 | } | |||
2266 | ||||
2267 | // From here on out we're working with named functions. | |||
2268 | if (!CI->getCalledFunction()) return false; | |||
2269 | ||||
2270 | // Lower all default uses of _chk calls. This is very similar | |||
2271 | // to what InstCombineCalls does, but here we are only lowering calls | |||
2272 | // to fortified library functions (e.g. __memcpy_chk) that have the default | |||
2273 | // "don't know" as the objectsize. Anything else should be left alone. | |||
2274 | FortifiedLibCallSimplifier Simplifier(TLInfo, true); | |||
2275 | IRBuilder<> Builder(CI); | |||
2276 | if (Value *V = Simplifier.optimizeCall(CI, Builder)) { | |||
2277 | CI->replaceAllUsesWith(V); | |||
2278 | CI->eraseFromParent(); | |||
2279 | return true; | |||
2280 | } | |||
2281 | ||||
2282 | return false; | |||
2283 | } | |||
2284 | ||||
2285 | /// Look for opportunities to duplicate return instructions to the predecessor | |||
2286 | /// to enable tail call optimizations. The case it is currently looking for is: | |||
2287 | /// @code | |||
2288 | /// bb0: | |||
2289 | /// %tmp0 = tail call i32 @f0() | |||
2290 | /// br label %return | |||
2291 | /// bb1: | |||
2292 | /// %tmp1 = tail call i32 @f1() | |||
2293 | /// br label %return | |||
2294 | /// bb2: | |||
2295 | /// %tmp2 = tail call i32 @f2() | |||
2296 | /// br label %return | |||
2297 | /// return: | |||
2298 | /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] | |||
2299 | /// ret i32 %retval | |||
2300 | /// @endcode | |||
2301 | /// | |||
2302 | /// => | |||
2303 | /// | |||
2304 | /// @code | |||
2305 | /// bb0: | |||
2306 | /// %tmp0 = tail call i32 @f0() | |||
2307 | /// ret i32 %tmp0 | |||
2308 | /// bb1: | |||
2309 | /// %tmp1 = tail call i32 @f1() | |||
2310 | /// ret i32 %tmp1 | |||
2311 | /// bb2: | |||
2312 | /// %tmp2 = tail call i32 @f2() | |||
2313 | /// ret i32 %tmp2 | |||
2314 | /// @endcode | |||
2315 | bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) { | |||
2316 | ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); | |||
2317 | if (!RetI) | |||
2318 | return false; | |||
2319 | ||||
2320 | PHINode *PN = nullptr; | |||
2321 | ExtractValueInst *EVI = nullptr; | |||
2322 | BitCastInst *BCI = nullptr; | |||
2323 | Value *V = RetI->getReturnValue(); | |||
2324 | if (V) { | |||
2325 | BCI = dyn_cast<BitCastInst>(V); | |||
2326 | if (BCI) | |||
2327 | V = BCI->getOperand(0); | |||
2328 | ||||
2329 | EVI = dyn_cast<ExtractValueInst>(V); | |||
2330 | if (EVI) { | |||
2331 | V = EVI->getOperand(0); | |||
2332 | if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; })) | |||
2333 | return false; | |||
2334 | } | |||
2335 | ||||
2336 | PN = dyn_cast<PHINode>(V); | |||
2337 | if (!PN) | |||
2338 | return false; | |||
2339 | } | |||
2340 | ||||
2341 | if (PN && PN->getParent() != BB) | |||
2342 | return false; | |||
2343 | ||||
2344 | auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) { | |||
2345 | const BitCastInst *BC = dyn_cast<BitCastInst>(Inst); | |||
2346 | if (BC && BC->hasOneUse()) | |||
2347 | Inst = BC->user_back(); | |||
2348 | ||||
2349 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) | |||
2350 | return II->getIntrinsicID() == Intrinsic::lifetime_end; | |||
2351 | return false; | |||
2352 | }; | |||
2353 | ||||
2354 | // Make sure there are no instructions between the first instruction | |||
2355 | // and return. | |||
2356 | const Instruction *BI = BB->getFirstNonPHI(); | |||
2357 | // Skip over debug and the bitcast. | |||
2358 | while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI || | |||
2359 | isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI)) | |||
2360 | BI = BI->getNextNode(); | |||
2361 | if (BI != RetI) | |||
2362 | return false; | |||
2363 | ||||
2364 | /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail | |||
2365 | /// call. | |||
2366 | const Function *F = BB->getParent(); | |||
2367 | SmallVector<BasicBlock*, 4> TailCallBBs; | |||
2368 | if (PN) { | |||
2369 | for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { | |||
2370 | // Look through bitcasts. | |||
2371 | Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts(); | |||
2372 | CallInst *CI = dyn_cast<CallInst>(IncomingVal); | |||
2373 | BasicBlock *PredBB = PN->getIncomingBlock(I); | |||
2374 | // Make sure the phi value is indeed produced by the tail call. | |||
2375 | if (CI && CI->hasOneUse() && CI->getParent() == PredBB && | |||
2376 | TLI->mayBeEmittedAsTailCall(CI) && | |||
2377 | attributesPermitTailCall(F, CI, RetI, *TLI)) | |||
2378 | TailCallBBs.push_back(PredBB); | |||
2379 | } | |||
2380 | } else { | |||
2381 | SmallPtrSet<BasicBlock*, 4> VisitedBBs; | |||
2382 | for (BasicBlock *Pred : predecessors(BB)) { | |||
2383 | if (!VisitedBBs.insert(Pred).second) | |||
2384 | continue; | |||
2385 | if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) { | |||
2386 | CallInst *CI = dyn_cast<CallInst>(I); | |||
2387 | if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && | |||
2388 | attributesPermitTailCall(F, CI, RetI, *TLI)) | |||
2389 | TailCallBBs.push_back(Pred); | |||
2390 | } | |||
2391 | } | |||
2392 | } | |||
2393 | ||||
2394 | bool Changed = false; | |||
2395 | for (auto const &TailCallBB : TailCallBBs) { | |||
2396 | // Make sure the call instruction is followed by an unconditional branch to | |||
2397 | // the return block. | |||
2398 | BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator()); | |||
2399 | if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) | |||
2400 | continue; | |||
2401 | ||||
2402 | // Duplicate the return into TailCallBB. | |||
2403 | (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB); | |||
2404 | assert(!VerifyBFIUpdates ||((void)0) | |||
2405 | BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB))((void)0); | |||
2406 | BFI->setBlockFreq( | |||
2407 | BB, | |||
2408 | (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency()); | |||
2409 | ModifiedDT = Changed = true; | |||
2410 | ++NumRetsDup; | |||
2411 | } | |||
2412 | ||||
2413 | // If we eliminated all predecessors of the block, delete the block now. | |||
2414 | if (Changed && !BB->hasAddressTaken() && pred_empty(BB)) | |||
2415 | BB->eraseFromParent(); | |||
2416 | ||||
2417 | return Changed; | |||
2418 | } | |||
2419 | ||||
2420 | //===----------------------------------------------------------------------===// | |||
2421 | // Memory Optimization | |||
2422 | //===----------------------------------------------------------------------===// | |||
2423 | ||||
2424 | namespace { | |||
2425 | ||||
2426 | /// This is an extended version of TargetLowering::AddrMode | |||
2427 | /// which holds actual Value*'s for register values. | |||
2428 | struct ExtAddrMode : public TargetLowering::AddrMode { | |||
2429 | Value *BaseReg = nullptr; | |||
2430 | Value *ScaledReg = nullptr; | |||
2431 | Value *OriginalValue = nullptr; | |||
2432 | bool InBounds = true; | |||
2433 | ||||
2434 | enum FieldName { | |||
2435 | NoField = 0x00, | |||
2436 | BaseRegField = 0x01, | |||
2437 | BaseGVField = 0x02, | |||
2438 | BaseOffsField = 0x04, | |||
2439 | ScaledRegField = 0x08, | |||
2440 | ScaleField = 0x10, | |||
2441 | MultipleFields = 0xff | |||
2442 | }; | |||
2443 | ||||
2444 | ||||
2445 | ExtAddrMode() = default; | |||
2446 | ||||
2447 | void print(raw_ostream &OS) const; | |||
2448 | void dump() const; | |||
2449 | ||||
2450 | FieldName compare(const ExtAddrMode &other) { | |||
2451 | // First check that the types are the same on each field, as differing types | |||
2452 | // is something we can't cope with later on. | |||
2453 | if (BaseReg && other.BaseReg && | |||
2454 | BaseReg->getType() != other.BaseReg->getType()) | |||
2455 | return MultipleFields; | |||
2456 | if (BaseGV && other.BaseGV && | |||
2457 | BaseGV->getType() != other.BaseGV->getType()) | |||
2458 | return MultipleFields; | |||
2459 | if (ScaledReg && other.ScaledReg && | |||
2460 | ScaledReg->getType() != other.ScaledReg->getType()) | |||
2461 | return MultipleFields; | |||
2462 | ||||
2463 | // Conservatively reject 'inbounds' mismatches. | |||
2464 | if (InBounds != other.InBounds) | |||
2465 | return MultipleFields; | |||
2466 | ||||
2467 | // Check each field to see if it differs. | |||
2468 | unsigned Result = NoField; | |||
2469 | if (BaseReg != other.BaseReg) | |||
2470 | Result |= BaseRegField; | |||
2471 | if (BaseGV != other.BaseGV) | |||
2472 | Result |= BaseGVField; | |||
2473 | if (BaseOffs != other.BaseOffs) | |||
2474 | Result |= BaseOffsField; | |||
2475 | if (ScaledReg != other.ScaledReg) | |||
2476 | Result |= ScaledRegField; | |||
2477 | // Don't count 0 as being a different scale, because that actually means | |||
2478 | // unscaled (which will already be counted by having no ScaledReg). | |||
2479 | if (Scale && other.Scale && Scale != other.Scale) | |||
2480 | Result |= ScaleField; | |||
2481 | ||||
2482 | if (countPopulation(Result) > 1) | |||
2483 | return MultipleFields; | |||
2484 | else | |||
2485 | return static_cast<FieldName>(Result); | |||
2486 | } | |||
2487 | ||||
2488 | // An AddrMode is trivial if it involves no calculation i.e. it is just a base | |||
2489 | // with no offset. | |||
2490 | bool isTrivial() { | |||
2491 | // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is | |||
2492 | // trivial if at most one of these terms is nonzero, except that BaseGV and | |||
2493 | // BaseReg both being zero actually means a null pointer value, which we | |||
2494 | // consider to be 'non-zero' here. | |||
2495 | return !BaseOffs && !Scale && !(BaseGV && BaseReg); | |||
2496 | } | |||
2497 | ||||
2498 | Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { | |||
2499 | switch (Field) { | |||
2500 | default: | |||
2501 | return nullptr; | |||
2502 | case BaseRegField: | |||
2503 | return BaseReg; | |||
2504 | case BaseGVField: | |||
2505 | return BaseGV; | |||
2506 | case ScaledRegField: | |||
2507 | return ScaledReg; | |||
2508 | case BaseOffsField: | |||
2509 | return ConstantInt::get(IntPtrTy, BaseOffs); | |||
2510 | } | |||
2511 | } | |||
2512 | ||||
2513 | void SetCombinedField(FieldName Field, Value *V, | |||
2514 | const SmallVectorImpl<ExtAddrMode> &AddrModes) { | |||
2515 | switch (Field) { | |||
2516 | default: | |||
2517 | llvm_unreachable("Unhandled fields are expected to be rejected earlier")__builtin_unreachable(); | |||
2518 | break; | |||
2519 | case ExtAddrMode::BaseRegField: | |||
2520 | BaseReg = V; | |||
2521 | break; | |||
2522 | case ExtAddrMode::BaseGVField: | |||
2523 | // A combined BaseGV is an Instruction, not a GlobalValue, so it goes | |||
2524 | // in the BaseReg field. | |||
2525 | assert(BaseReg == nullptr)((void)0); | |||
2526 | BaseReg = V; | |||
2527 | BaseGV = nullptr; | |||
2528 | break; | |||
2529 | case ExtAddrMode::ScaledRegField: | |||
2530 | ScaledReg = V; | |||
2531 | // If we have a mix of scaled and unscaled addrmodes then we want scale | |||
2532 | // to be the scale and not zero. | |||
2533 | if (!Scale) | |||
2534 | for (const ExtAddrMode &AM : AddrModes) | |||
2535 | if (AM.Scale) { | |||
2536 | Scale = AM.Scale; | |||
2537 | break; | |||
2538 | } | |||
2539 | break; | |||
2540 | case ExtAddrMode::BaseOffsField: | |||
2541 | // The offset is no longer a constant, so it goes in ScaledReg with a | |||
2542 | // scale of 1. | |||
2543 | assert(ScaledReg == nullptr)((void)0); | |||
2544 | ScaledReg = V; | |||
2545 | Scale = 1; | |||
2546 | BaseOffs = 0; | |||
2547 | break; | |||
2548 | } | |||
2549 | } | |||
2550 | }; | |||
2551 | ||||
2552 | } // end anonymous namespace | |||
2553 | ||||
2554 | #ifndef NDEBUG1 | |||
2555 | static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { | |||
2556 | AM.print(OS); | |||
2557 | return OS; | |||
2558 | } | |||
2559 | #endif | |||
2560 | ||||
2561 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
2562 | void ExtAddrMode::print(raw_ostream &OS) const { | |||
2563 | bool NeedPlus = false; | |||
2564 | OS << "["; | |||
2565 | if (InBounds) | |||
2566 | OS << "inbounds "; | |||
2567 | if (BaseGV) { | |||
2568 | OS << (NeedPlus ? " + " : "") | |||
2569 | << "GV:"; | |||
2570 | BaseGV->printAsOperand(OS, /*PrintType=*/false); | |||
2571 | NeedPlus = true; | |||
2572 | } | |||
2573 | ||||
2574 | if (BaseOffs) { | |||
2575 | OS << (NeedPlus ? " + " : "") | |||
2576 | << BaseOffs; | |||
2577 | NeedPlus = true; | |||
2578 | } | |||
2579 | ||||
2580 | if (BaseReg) { | |||
2581 | OS << (NeedPlus ? " + " : "") | |||
2582 | << "Base:"; | |||
2583 | BaseReg->printAsOperand(OS, /*PrintType=*/false); | |||
2584 | NeedPlus = true; | |||
2585 | } | |||
2586 | if (Scale) { | |||
2587 | OS << (NeedPlus ? " + " : "") | |||
2588 | << Scale << "*"; | |||
2589 | ScaledReg->printAsOperand(OS, /*PrintType=*/false); | |||
2590 | } | |||
2591 | ||||
2592 | OS << ']'; | |||
2593 | } | |||
2594 | ||||
2595 | LLVM_DUMP_METHOD__attribute__((noinline)) void ExtAddrMode::dump() const { | |||
2596 | print(dbgs()); | |||
2597 | dbgs() << '\n'; | |||
2598 | } | |||
2599 | #endif | |||
2600 | ||||
2601 | namespace { | |||
2602 | ||||
2603 | /// This class provides transaction based operation on the IR. | |||
2604 | /// Every change made through this class is recorded in the internal state and | |||
2605 | /// can be undone (rollback) until commit is called. | |||
2606 | /// CGP does not check if instructions could be speculatively executed when | |||
2607 | /// moved. Preserving the original location would pessimize the debugging | |||
2608 | /// experience, as well as negatively impact the quality of sample PGO. | |||
2609 | class TypePromotionTransaction { | |||
2610 | /// This represents the common interface of the individual transaction. | |||
2611 | /// Each class implements the logic for doing one specific modification on | |||
2612 | /// the IR via the TypePromotionTransaction. | |||
2613 | class TypePromotionAction { | |||
2614 | protected: | |||
2615 | /// The Instruction modified. | |||
2616 | Instruction *Inst; | |||
2617 | ||||
2618 | public: | |||
2619 | /// Constructor of the action. | |||
2620 | /// The constructor performs the related action on the IR. | |||
2621 | TypePromotionAction(Instruction *Inst) : Inst(Inst) {} | |||
2622 | ||||
2623 | virtual ~TypePromotionAction() = default; | |||
2624 | ||||
2625 | /// Undo the modification done by this action. | |||
2626 | /// When this method is called, the IR must be in the same state as it was | |||
2627 | /// before this action was applied. | |||
2628 | /// \pre Undoing the action works if and only if the IR is in the exact same | |||
2629 | /// state as it was directly after this action was applied. | |||
2630 | virtual void undo() = 0; | |||
2631 | ||||
2632 | /// Advocate every change made by this action. | |||
2633 | /// When the results on the IR of the action are to be kept, it is important | |||
2634 | /// to call this function, otherwise hidden information may be kept forever. | |||
2635 | virtual void commit() { | |||
2636 | // Nothing to be done, this action is not doing anything. | |||
2637 | } | |||
2638 | }; | |||
2639 | ||||
2640 | /// Utility to remember the position of an instruction. | |||
2641 | class InsertionHandler { | |||
2642 | /// Position of an instruction. | |||
2643 | /// Either an instruction: | |||
2644 | /// - Is the first in a basic block: BB is used. | |||
2645 | /// - Has a previous instruction: PrevInst is used. | |||
2646 | union { | |||
2647 | Instruction *PrevInst; | |||
2648 | BasicBlock *BB; | |||
2649 | } Point; | |||
2650 | ||||
2651 | /// Remember whether or not the instruction had a previous instruction. | |||
2652 | bool HasPrevInstruction; | |||
2653 | ||||
2654 | public: | |||
2655 | /// Record the position of \p Inst. | |||
2656 | InsertionHandler(Instruction *Inst) { | |||
2657 | BasicBlock::iterator It = Inst->getIterator(); | |||
2658 | HasPrevInstruction = (It != (Inst->getParent()->begin())); | |||
2659 | if (HasPrevInstruction) | |||
2660 | Point.PrevInst = &*--It; | |||
2661 | else | |||
2662 | Point.BB = Inst->getParent(); | |||
2663 | } | |||
2664 | ||||
2665 | /// Insert \p Inst at the recorded position. | |||
2666 | void insert(Instruction *Inst) { | |||
2667 | if (HasPrevInstruction) { | |||
2668 | if (Inst->getParent()) | |||
2669 | Inst->removeFromParent(); | |||
2670 | Inst->insertAfter(Point.PrevInst); | |||
2671 | } else { | |||
2672 | Instruction *Position = &*Point.BB->getFirstInsertionPt(); | |||
2673 | if (Inst->getParent()) | |||
2674 | Inst->moveBefore(Position); | |||
2675 | else | |||
2676 | Inst->insertBefore(Position); | |||
2677 | } | |||
2678 | } | |||
2679 | }; | |||
2680 | ||||
2681 | /// Move an instruction before another. | |||
2682 | class InstructionMoveBefore : public TypePromotionAction { | |||
2683 | /// Original position of the instruction. | |||
2684 | InsertionHandler Position; | |||
2685 | ||||
2686 | public: | |||
2687 | /// Move \p Inst before \p Before. | |||
2688 | InstructionMoveBefore(Instruction *Inst, Instruction *Before) | |||
2689 | : TypePromotionAction(Inst), Position(Inst) { | |||
2690 | LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Beforedo { } while (false) | |||
2691 | << "\n")do { } while (false); | |||
2692 | Inst->moveBefore(Before); | |||
2693 | } | |||
2694 | ||||
2695 | /// Move the instruction back to its original position. | |||
2696 | void undo() override { | |||
2697 | LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n")do { } while (false); | |||
2698 | Position.insert(Inst); | |||
2699 | } | |||
2700 | }; | |||
2701 | ||||
2702 | /// Set the operand of an instruction with a new value. | |||
2703 | class OperandSetter : public TypePromotionAction { | |||
2704 | /// Original operand of the instruction. | |||
2705 | Value *Origin; | |||
2706 | ||||
2707 | /// Index of the modified instruction. | |||
2708 | unsigned Idx; | |||
2709 | ||||
2710 | public: | |||
2711 | /// Set \p Idx operand of \p Inst with \p NewVal. | |||
2712 | OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) | |||
2713 | : TypePromotionAction(Inst), Idx(Idx) { | |||
2714 | LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"do { } while (false) | |||
2715 | << "for:" << *Inst << "\n"do { } while (false) | |||
2716 | << "with:" << *NewVal << "\n")do { } while (false); | |||
2717 | Origin = Inst->getOperand(Idx); | |||
2718 | Inst->setOperand(Idx, NewVal); | |||
2719 | } | |||
2720 | ||||
2721 | /// Restore the original value of the instruction. | |||
2722 | void undo() override { | |||
2723 | LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"do { } while (false) | |||
2724 | << "for: " << *Inst << "\n"do { } while (false) | |||
2725 | << "with: " << *Origin << "\n")do { } while (false); | |||
2726 | Inst->setOperand(Idx, Origin); | |||
2727 | } | |||
2728 | }; | |||
2729 | ||||
2730 | /// Hide the operands of an instruction. | |||
2731 | /// Do as if this instruction was not using any of its operands. | |||
2732 | class OperandsHider : public TypePromotionAction { | |||
2733 | /// The list of original operands. | |||
2734 | SmallVector<Value *, 4> OriginalValues; | |||
2735 | ||||
2736 | public: | |||
2737 | /// Remove \p Inst from the uses of the operands of \p Inst. | |||
2738 | OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { | |||
2739 | LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n")do { } while (false); | |||
2740 | unsigned NumOpnds = Inst->getNumOperands(); | |||
2741 | OriginalValues.reserve(NumOpnds); | |||
2742 | for (unsigned It = 0; It < NumOpnds; ++It) { | |||
2743 | // Save the current operand. | |||
2744 | Value *Val = Inst->getOperand(It); | |||
2745 | OriginalValues.push_back(Val); | |||
2746 | // Set a dummy one. | |||
2747 | // We could use OperandSetter here, but that would imply an overhead | |||
2748 | // that we are not willing to pay. | |||
2749 | Inst->setOperand(It, UndefValue::get(Val->getType())); | |||
2750 | } | |||
2751 | } | |||
2752 | ||||
2753 | /// Restore the original list of uses. | |||
2754 | void undo() override { | |||
2755 | LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n")do { } while (false); | |||
2756 | for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) | |||
2757 | Inst->setOperand(It, OriginalValues[It]); | |||
2758 | } | |||
2759 | }; | |||
2760 | ||||
2761 | /// Build a truncate instruction. | |||
2762 | class TruncBuilder : public TypePromotionAction { | |||
2763 | Value *Val; | |||
2764 | ||||
2765 | public: | |||
2766 | /// Build a truncate instruction of \p Opnd producing a \p Ty | |||
2767 | /// result. | |||
2768 | /// trunc Opnd to Ty. | |||
2769 | TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { | |||
2770 | IRBuilder<> Builder(Opnd); | |||
2771 | Builder.SetCurrentDebugLocation(DebugLoc()); | |||
2772 | Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); | |||
2773 | LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n")do { } while (false); | |||
2774 | } | |||
2775 | ||||
2776 | /// Get the built value. | |||
2777 | Value *getBuiltValue() { return Val; } | |||
2778 | ||||
2779 | /// Remove the built instruction. | |||
2780 | void undo() override { | |||
2781 | LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n")do { } while (false); | |||
2782 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2783 | IVal->eraseFromParent(); | |||
2784 | } | |||
2785 | }; | |||
2786 | ||||
2787 | /// Build a sign extension instruction. | |||
2788 | class SExtBuilder : public TypePromotionAction { | |||
2789 | Value *Val; | |||
2790 | ||||
2791 | public: | |||
2792 | /// Build a sign extension instruction of \p Opnd producing a \p Ty | |||
2793 | /// result. | |||
2794 | /// sext Opnd to Ty. | |||
2795 | SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
2796 | : TypePromotionAction(InsertPt) { | |||
2797 | IRBuilder<> Builder(InsertPt); | |||
2798 | Val = Builder.CreateSExt(Opnd, Ty, "promoted"); | |||
2799 | LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n")do { } while (false); | |||
2800 | } | |||
2801 | ||||
2802 | /// Get the built value. | |||
2803 | Value *getBuiltValue() { return Val; } | |||
2804 | ||||
2805 | /// Remove the built instruction. | |||
2806 | void undo() override { | |||
2807 | LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n")do { } while (false); | |||
2808 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2809 | IVal->eraseFromParent(); | |||
2810 | } | |||
2811 | }; | |||
2812 | ||||
2813 | /// Build a zero extension instruction. | |||
2814 | class ZExtBuilder : public TypePromotionAction { | |||
2815 | Value *Val; | |||
2816 | ||||
2817 | public: | |||
2818 | /// Build a zero extension instruction of \p Opnd producing a \p Ty | |||
2819 | /// result. | |||
2820 | /// zext Opnd to Ty. | |||
2821 | ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
2822 | : TypePromotionAction(InsertPt) { | |||
2823 | IRBuilder<> Builder(InsertPt); | |||
2824 | Builder.SetCurrentDebugLocation(DebugLoc()); | |||
2825 | Val = Builder.CreateZExt(Opnd, Ty, "promoted"); | |||
2826 | LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n")do { } while (false); | |||
2827 | } | |||
2828 | ||||
2829 | /// Get the built value. | |||
2830 | Value *getBuiltValue() { return Val; } | |||
2831 | ||||
2832 | /// Remove the built instruction. | |||
2833 | void undo() override { | |||
2834 | LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n")do { } while (false); | |||
2835 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2836 | IVal->eraseFromParent(); | |||
2837 | } | |||
2838 | }; | |||
2839 | ||||
2840 | /// Mutate an instruction to another type. | |||
2841 | class TypeMutator : public TypePromotionAction { | |||
2842 | /// Record the original type. | |||
2843 | Type *OrigTy; | |||
2844 | ||||
2845 | public: | |||
2846 | /// Mutate the type of \p Inst into \p NewTy. | |||
2847 | TypeMutator(Instruction *Inst, Type *NewTy) | |||
2848 | : TypePromotionAction(Inst), OrigTy(Inst->getType()) { | |||
2849 | LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTydo { } while (false) | |||
2850 | << "\n")do { } while (false); | |||
2851 | Inst->mutateType(NewTy); | |||
2852 | } | |||
2853 | ||||
2854 | /// Mutate the instruction back to its original type. | |||
2855 | void undo() override { | |||
2856 | LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTydo { } while (false) | |||
2857 | << "\n")do { } while (false); | |||
2858 | Inst->mutateType(OrigTy); | |||
2859 | } | |||
2860 | }; | |||
2861 | ||||
2862 | /// Replace the uses of an instruction by another instruction. | |||
2863 | class UsesReplacer : public TypePromotionAction { | |||
2864 | /// Helper structure to keep track of the replaced uses. | |||
2865 | struct InstructionAndIdx { | |||
2866 | /// The instruction using the instruction. | |||
2867 | Instruction *Inst; | |||
2868 | ||||
2869 | /// The index where this instruction is used for Inst. | |||
2870 | unsigned Idx; | |||
2871 | ||||
2872 | InstructionAndIdx(Instruction *Inst, unsigned Idx) | |||
2873 | : Inst(Inst), Idx(Idx) {} | |||
2874 | }; | |||
2875 | ||||
2876 | /// Keep track of the original uses (pair Instruction, Index). | |||
2877 | SmallVector<InstructionAndIdx, 4> OriginalUses; | |||
2878 | /// Keep track of the debug users. | |||
2879 | SmallVector<DbgValueInst *, 1> DbgValues; | |||
2880 | ||||
2881 | /// Keep track of the new value so that we can undo it by replacing | |||
2882 | /// instances of the new value with the original value. | |||
2883 | Value *New; | |||
2884 | ||||
2885 | using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; | |||
2886 | ||||
2887 | public: | |||
2888 | /// Replace all the use of \p Inst by \p New. | |||
2889 | UsesReplacer(Instruction *Inst, Value *New) | |||
2890 | : TypePromotionAction(Inst), New(New) { | |||
2891 | LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *Newdo { } while (false) | |||
2892 | << "\n")do { } while (false); | |||
2893 | // Record the original uses. | |||
2894 | for (Use &U : Inst->uses()) { | |||
2895 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
2896 | OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); | |||
2897 | } | |||
2898 | // Record the debug uses separately. They are not in the instruction's | |||
2899 | // use list, but they are replaced by RAUW. | |||
2900 | findDbgValues(DbgValues, Inst); | |||
2901 | ||||
2902 | // Now, we can replace the uses. | |||
2903 | Inst->replaceAllUsesWith(New); | |||
2904 | } | |||
2905 | ||||
2906 | /// Reassign the original uses of Inst to Inst. | |||
2907 | void undo() override { | |||
2908 | LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n")do { } while (false); | |||
2909 | for (InstructionAndIdx &Use : OriginalUses) | |||
2910 | Use.Inst->setOperand(Use.Idx, Inst); | |||
2911 | // RAUW has replaced all original uses with references to the new value, | |||
2912 | // including the debug uses. Since we are undoing the replacements, | |||
2913 | // the original debug uses must also be reinstated to maintain the | |||
2914 | // correctness and utility of debug value instructions. | |||
2915 | for (auto *DVI : DbgValues) | |||
2916 | DVI->replaceVariableLocationOp(New, Inst); | |||
2917 | } | |||
2918 | }; | |||
2919 | ||||
2920 | /// Remove an instruction from the IR. | |||
2921 | class InstructionRemover : public TypePromotionAction { | |||
2922 | /// Original position of the instruction. | |||
2923 | InsertionHandler Inserter; | |||
2924 | ||||
2925 | /// Helper structure to hide all the link to the instruction. In other | |||
2926 | /// words, this helps to do as if the instruction was removed. | |||
2927 | OperandsHider Hider; | |||
2928 | ||||
2929 | /// Keep track of the uses replaced, if any. | |||
2930 | UsesReplacer *Replacer = nullptr; | |||
2931 | ||||
2932 | /// Keep track of instructions removed. | |||
2933 | SetOfInstrs &RemovedInsts; | |||
2934 | ||||
2935 | public: | |||
2936 | /// Remove all reference of \p Inst and optionally replace all its | |||
2937 | /// uses with New. | |||
2938 | /// \p RemovedInsts Keep track of the instructions removed by this Action. | |||
2939 | /// \pre If !Inst->use_empty(), then New != nullptr | |||
2940 | InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, | |||
2941 | Value *New = nullptr) | |||
2942 | : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), | |||
2943 | RemovedInsts(RemovedInsts) { | |||
2944 | if (New) | |||
2945 | Replacer = new UsesReplacer(Inst, New); | |||
2946 | LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n")do { } while (false); | |||
2947 | RemovedInsts.insert(Inst); | |||
2948 | /// The instructions removed here will be freed after completing | |||
2949 | /// optimizeBlock() for all blocks as we need to keep track of the | |||
2950 | /// removed instructions during promotion. | |||
2951 | Inst->removeFromParent(); | |||
2952 | } | |||
2953 | ||||
2954 | ~InstructionRemover() override { delete Replacer; } | |||
2955 | ||||
2956 | /// Resurrect the instruction and reassign it to the proper uses if | |||
2957 | /// new value was provided when build this action. | |||
2958 | void undo() override { | |||
2959 | LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n")do { } while (false); | |||
2960 | Inserter.insert(Inst); | |||
2961 | if (Replacer) | |||
2962 | Replacer->undo(); | |||
2963 | Hider.undo(); | |||
2964 | RemovedInsts.erase(Inst); | |||
2965 | } | |||
2966 | }; | |||
2967 | ||||
2968 | public: | |||
2969 | /// Restoration point. | |||
2970 | /// The restoration point is a pointer to an action instead of an iterator | |||
2971 | /// because the iterator may be invalidated but not the pointer. | |||
2972 | using ConstRestorationPt = const TypePromotionAction *; | |||
2973 | ||||
2974 | TypePromotionTransaction(SetOfInstrs &RemovedInsts) | |||
2975 | : RemovedInsts(RemovedInsts) {} | |||
2976 | ||||
2977 | /// Advocate every changes made in that transaction. Return true if any change | |||
2978 | /// happen. | |||
2979 | bool commit(); | |||
2980 | ||||
2981 | /// Undo all the changes made after the given point. | |||
2982 | void rollback(ConstRestorationPt Point); | |||
2983 | ||||
2984 | /// Get the current restoration point. | |||
2985 | ConstRestorationPt getRestorationPoint() const; | |||
2986 | ||||
2987 | /// \name API for IR modification with state keeping to support rollback. | |||
2988 | /// @{ | |||
2989 | /// Same as Instruction::setOperand. | |||
2990 | void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); | |||
2991 | ||||
2992 | /// Same as Instruction::eraseFromParent. | |||
2993 | void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); | |||
2994 | ||||
2995 | /// Same as Value::replaceAllUsesWith. | |||
2996 | void replaceAllUsesWith(Instruction *Inst, Value *New); | |||
2997 | ||||
2998 | /// Same as Value::mutateType. | |||
2999 | void mutateType(Instruction *Inst, Type *NewTy); | |||
3000 | ||||
3001 | /// Same as IRBuilder::createTrunc. | |||
3002 | Value *createTrunc(Instruction *Opnd, Type *Ty); | |||
3003 | ||||
3004 | /// Same as IRBuilder::createSExt. | |||
3005 | Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
3006 | ||||
3007 | /// Same as IRBuilder::createZExt. | |||
3008 | Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
3009 | ||||
3010 | /// Same as Instruction::moveBefore. | |||
3011 | void moveBefore(Instruction *Inst, Instruction *Before); | |||
3012 | /// @} | |||
3013 | ||||
3014 | private: | |||
3015 | /// The ordered list of actions made so far. | |||
3016 | SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; | |||
3017 | ||||
3018 | using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; | |||
3019 | ||||
3020 | SetOfInstrs &RemovedInsts; | |||
3021 | }; | |||
3022 | ||||
3023 | } // end anonymous namespace | |||
3024 | ||||
3025 | void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, | |||
3026 | Value *NewVal) { | |||
3027 | Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>( | |||
3028 | Inst, Idx, NewVal)); | |||
3029 | } | |||
3030 | ||||
3031 | void TypePromotionTransaction::eraseInstruction(Instruction *Inst, | |||
3032 | Value *NewVal) { | |||
3033 | Actions.push_back( | |||
3034 | std::make_unique<TypePromotionTransaction::InstructionRemover>( | |||
3035 | Inst, RemovedInsts, NewVal)); | |||
3036 | } | |||
3037 | ||||
3038 | void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, | |||
3039 | Value *New) { | |||
3040 | Actions.push_back( | |||
3041 | std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); | |||
3042 | } | |||
3043 | ||||
3044 | void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { | |||
3045 | Actions.push_back( | |||
3046 | std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); | |||
3047 | } | |||
3048 | ||||
3049 | Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, | |||
3050 | Type *Ty) { | |||
3051 | std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); | |||
3052 | Value *Val = Ptr->getBuiltValue(); | |||
3053 | Actions.push_back(std::move(Ptr)); | |||
3054 | return Val; | |||
3055 | } | |||
3056 | ||||
3057 | Value *TypePromotionTransaction::createSExt(Instruction *Inst, | |||
3058 | Value *Opnd, Type *Ty) { | |||
3059 | std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); | |||
3060 | Value *Val = Ptr->getBuiltValue(); | |||
3061 | Actions.push_back(std::move(Ptr)); | |||
3062 | return Val; | |||
3063 | } | |||
3064 | ||||
3065 | Value *TypePromotionTransaction::createZExt(Instruction *Inst, | |||
3066 | Value *Opnd, Type *Ty) { | |||
3067 | std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); | |||
3068 | Value *Val = Ptr->getBuiltValue(); | |||
3069 | Actions.push_back(std::move(Ptr)); | |||
3070 | return Val; | |||
3071 | } | |||
3072 | ||||
3073 | void TypePromotionTransaction::moveBefore(Instruction *Inst, | |||
3074 | Instruction *Before) { | |||
3075 | Actions.push_back( | |||
3076 | std::make_unique<TypePromotionTransaction::InstructionMoveBefore>( | |||
3077 | Inst, Before)); | |||
3078 | } | |||
3079 | ||||
3080 | TypePromotionTransaction::ConstRestorationPt | |||
3081 | TypePromotionTransaction::getRestorationPoint() const { | |||
3082 | return !Actions.empty() ? Actions.back().get() : nullptr; | |||
3083 | } | |||
3084 | ||||
3085 | bool TypePromotionTransaction::commit() { | |||
3086 | for (std::unique_ptr<TypePromotionAction> &Action : Actions) | |||
3087 | Action->commit(); | |||
3088 | bool Modified = !Actions.empty(); | |||
3089 | Actions.clear(); | |||
3090 | return Modified; | |||
3091 | } | |||
3092 | ||||
3093 | void TypePromotionTransaction::rollback( | |||
3094 | TypePromotionTransaction::ConstRestorationPt Point) { | |||
3095 | while (!Actions.empty() && Point != Actions.back().get()) { | |||
3096 | std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); | |||
3097 | Curr->undo(); | |||
3098 | } | |||
3099 | } | |||
3100 | ||||
3101 | namespace { | |||
3102 | ||||
3103 | /// A helper class for matching addressing modes. | |||
3104 | /// | |||
3105 | /// This encapsulates the logic for matching the target-legal addressing modes. | |||
3106 | class AddressingModeMatcher { | |||
3107 | SmallVectorImpl<Instruction*> &AddrModeInsts; | |||
3108 | const TargetLowering &TLI; | |||
3109 | const TargetRegisterInfo &TRI; | |||
3110 | const DataLayout &DL; | |||
3111 | const LoopInfo &LI; | |||
3112 | const std::function<const DominatorTree &()> getDTFn; | |||
3113 | ||||
3114 | /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and | |||
3115 | /// the memory instruction that we're computing this address for. | |||
3116 | Type *AccessTy; | |||
3117 | unsigned AddrSpace; | |||
3118 | Instruction *MemoryInst; | |||
3119 | ||||
3120 | /// This is the addressing mode that we're building up. This is | |||
3121 | /// part of the return value of this addressing mode matching stuff. | |||
3122 | ExtAddrMode &AddrMode; | |||
3123 | ||||
3124 | /// The instructions inserted by other CodeGenPrepare optimizations. | |||
3125 | const SetOfInstrs &InsertedInsts; | |||
3126 | ||||
3127 | /// A map from the instructions to their type before promotion. | |||
3128 | InstrToOrigTy &PromotedInsts; | |||
3129 | ||||
3130 | /// The ongoing transaction where every action should be registered. | |||
3131 | TypePromotionTransaction &TPT; | |||
3132 | ||||
3133 | // A GEP which has too large offset to be folded into the addressing mode. | |||
3134 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; | |||
3135 | ||||
3136 | /// This is set to true when we should not do profitability checks. | |||
3137 | /// When true, IsProfitableToFoldIntoAddressingMode always returns true. | |||
3138 | bool IgnoreProfitability; | |||
3139 | ||||
3140 | /// True if we are optimizing for size. | |||
3141 | bool OptSize; | |||
3142 | ||||
3143 | ProfileSummaryInfo *PSI; | |||
3144 | BlockFrequencyInfo *BFI; | |||
3145 | ||||
3146 | AddressingModeMatcher( | |||
3147 | SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, | |||
3148 | const TargetRegisterInfo &TRI, const LoopInfo &LI, | |||
3149 | const std::function<const DominatorTree &()> getDTFn, | |||
3150 | Type *AT, unsigned AS, Instruction *MI, ExtAddrMode &AM, | |||
3151 | const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, | |||
3152 | TypePromotionTransaction &TPT, | |||
3153 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, | |||
3154 | bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) | |||
3155 | : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), | |||
3156 | DL(MI->getModule()->getDataLayout()), LI(LI), getDTFn(getDTFn), | |||
3157 | AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM), | |||
3158 | InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT), | |||
3159 | LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) { | |||
3160 | IgnoreProfitability = false; | |||
3161 | } | |||
3162 | ||||
3163 | public: | |||
3164 | /// Find the maximal addressing mode that a load/store of V can fold, | |||
3165 | /// give an access type of AccessTy. This returns a list of involved | |||
3166 | /// instructions in AddrModeInsts. | |||
3167 | /// \p InsertedInsts The instructions inserted by other CodeGenPrepare | |||
3168 | /// optimizations. | |||
3169 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3170 | /// \p The ongoing transaction where every action should be registered. | |||
3171 | static ExtAddrMode | |||
3172 | Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, | |||
3173 | SmallVectorImpl<Instruction *> &AddrModeInsts, | |||
3174 | const TargetLowering &TLI, const LoopInfo &LI, | |||
3175 | const std::function<const DominatorTree &()> getDTFn, | |||
3176 | const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts, | |||
3177 | InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, | |||
3178 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, | |||
3179 | bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { | |||
3180 | ExtAddrMode Result; | |||
3181 | ||||
3182 | bool Success = AddressingModeMatcher( | |||
3183 | AddrModeInsts, TLI, TRI, LI, getDTFn, AccessTy, AS, MemoryInst, Result, | |||
3184 | InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, | |||
3185 | BFI).matchAddr(V, 0); | |||
3186 | (void)Success; assert(Success && "Couldn't select *anything*?")((void)0); | |||
3187 | return Result; | |||
3188 | } | |||
3189 | ||||
3190 | private: | |||
3191 | bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); | |||
3192 | bool matchAddr(Value *Addr, unsigned Depth); | |||
3193 | bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, | |||
3194 | bool *MovedAway = nullptr); | |||
3195 | bool isProfitableToFoldIntoAddressingMode(Instruction *I, | |||
3196 | ExtAddrMode &AMBefore, | |||
3197 | ExtAddrMode &AMAfter); | |||
3198 | bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); | |||
3199 | bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, | |||
3200 | Value *PromotedOperand) const; | |||
3201 | }; | |||
3202 | ||||
3203 | class PhiNodeSet; | |||
3204 | ||||
3205 | /// An iterator for PhiNodeSet. | |||
3206 | class PhiNodeSetIterator { | |||
3207 | PhiNodeSet * const Set; | |||
3208 | size_t CurrentIndex = 0; | |||
3209 | ||||
3210 | public: | |||
3211 | /// The constructor. Start should point to either a valid element, or be equal | |||
3212 | /// to the size of the underlying SmallVector of the PhiNodeSet. | |||
3213 | PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start); | |||
3214 | PHINode * operator*() const; | |||
3215 | PhiNodeSetIterator& operator++(); | |||
3216 | bool operator==(const PhiNodeSetIterator &RHS) const; | |||
3217 | bool operator!=(const PhiNodeSetIterator &RHS) const; | |||
3218 | }; | |||
3219 | ||||
3220 | /// Keeps a set of PHINodes. | |||
3221 | /// | |||
3222 | /// This is a minimal set implementation for a specific use case: | |||
3223 | /// It is very fast when there are very few elements, but also provides good | |||
3224 | /// performance when there are many. It is similar to SmallPtrSet, but also | |||
3225 | /// provides iteration by insertion order, which is deterministic and stable | |||
3226 | /// across runs. It is also similar to SmallSetVector, but provides removing | |||
3227 | /// elements in O(1) time. This is achieved by not actually removing the element | |||
3228 | /// from the underlying vector, so comes at the cost of using more memory, but | |||
3229 | /// that is fine, since PhiNodeSets are used as short lived objects. | |||
3230 | class PhiNodeSet { | |||
3231 | friend class PhiNodeSetIterator; | |||
3232 | ||||
3233 | using MapType = SmallDenseMap<PHINode *, size_t, 32>; | |||
3234 | using iterator = PhiNodeSetIterator; | |||
3235 | ||||
3236 | /// Keeps the elements in the order of their insertion in the underlying | |||
3237 | /// vector. To achieve constant time removal, it never deletes any element. | |||
3238 | SmallVector<PHINode *, 32> NodeList; | |||
3239 | ||||
3240 | /// Keeps the elements in the underlying set implementation. This (and not the | |||
3241 | /// NodeList defined above) is the source of truth on whether an element | |||
3242 | /// is actually in the collection. | |||
3243 | MapType NodeMap; | |||
3244 | ||||
3245 | /// Points to the first valid (not deleted) element when the set is not empty | |||
3246 | /// and the value is not zero. Equals to the size of the underlying vector | |||
3247 | /// when the set is empty. When the value is 0, as in the beginning, the | |||
3248 | /// first element may or may not be valid. | |||
3249 | size_t FirstValidElement = 0; | |||
3250 | ||||
3251 | public: | |||
3252 | /// Inserts a new element to the collection. | |||
3253 | /// \returns true if the element is actually added, i.e. was not in the | |||
3254 | /// collection before the operation. | |||
3255 | bool insert(PHINode *Ptr) { | |||
3256 | if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) { | |||
3257 | NodeList.push_back(Ptr); | |||
3258 | return true; | |||
3259 | } | |||
3260 | return false; | |||
3261 | } | |||
3262 | ||||
3263 | /// Removes the element from the collection. | |||
3264 | /// \returns whether the element is actually removed, i.e. was in the | |||
3265 | /// collection before the operation. | |||
3266 | bool erase(PHINode *Ptr) { | |||
3267 | if (NodeMap.erase(Ptr)) { | |||
3268 | SkipRemovedElements(FirstValidElement); | |||
3269 | return true; | |||
3270 | } | |||
3271 | return false; | |||
3272 | } | |||
3273 | ||||
3274 | /// Removes all elements and clears the collection. | |||
3275 | void clear() { | |||
3276 | NodeMap.clear(); | |||
3277 | NodeList.clear(); | |||
3278 | FirstValidElement = 0; | |||
3279 | } | |||
3280 | ||||
3281 | /// \returns an iterator that will iterate the elements in the order of | |||
3282 | /// insertion. | |||
3283 | iterator begin() { | |||
3284 | if (FirstValidElement == 0) | |||
3285 | SkipRemovedElements(FirstValidElement); | |||
3286 | return PhiNodeSetIterator(this, FirstValidElement); | |||
3287 | } | |||
3288 | ||||
3289 | /// \returns an iterator that points to the end of the collection. | |||
3290 | iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } | |||
3291 | ||||
3292 | /// Returns the number of elements in the collection. | |||
3293 | size_t size() const { | |||
3294 | return NodeMap.size(); | |||
3295 | } | |||
3296 | ||||
3297 | /// \returns 1 if the given element is in the collection, and 0 if otherwise. | |||
3298 | size_t count(PHINode *Ptr) const { | |||
3299 | return NodeMap.count(Ptr); | |||
3300 | } | |||
3301 | ||||
3302 | private: | |||
3303 | /// Updates the CurrentIndex so that it will point to a valid element. | |||
3304 | /// | |||
3305 | /// If the element of NodeList at CurrentIndex is valid, it does not | |||
3306 | /// change it. If there are no more valid elements, it updates CurrentIndex | |||
3307 | /// to point to the end of the NodeList. | |||
3308 | void SkipRemovedElements(size_t &CurrentIndex) { | |||
3309 | while (CurrentIndex < NodeList.size()) { | |||
3310 | auto it = NodeMap.find(NodeList[CurrentIndex]); | |||
3311 | // If the element has been deleted and added again later, NodeMap will | |||
3312 | // point to a different index, so CurrentIndex will still be invalid. | |||
3313 | if (it != NodeMap.end() && it->second == CurrentIndex) | |||
3314 | break; | |||
3315 | ++CurrentIndex; | |||
3316 | } | |||
3317 | } | |||
3318 | }; | |||
3319 | ||||
3320 | PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) | |||
3321 | : Set(Set), CurrentIndex(Start) {} | |||
3322 | ||||
3323 | PHINode * PhiNodeSetIterator::operator*() const { | |||
3324 | assert(CurrentIndex < Set->NodeList.size() &&((void)0) | |||
3325 | "PhiNodeSet access out of range")((void)0); | |||
3326 | return Set->NodeList[CurrentIndex]; | |||
3327 | } | |||
3328 | ||||
3329 | PhiNodeSetIterator& PhiNodeSetIterator::operator++() { | |||
3330 | assert(CurrentIndex < Set->NodeList.size() &&((void)0) | |||
3331 | "PhiNodeSet access out of range")((void)0); | |||
3332 | ++CurrentIndex; | |||
3333 | Set->SkipRemovedElements(CurrentIndex); | |||
3334 | return *this; | |||
3335 | } | |||
3336 | ||||
3337 | bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { | |||
3338 | return CurrentIndex == RHS.CurrentIndex; | |||
3339 | } | |||
3340 | ||||
3341 | bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { | |||
3342 | return !((*this) == RHS); | |||
3343 | } | |||
3344 | ||||
3345 | /// Keep track of simplification of Phi nodes. | |||
3346 | /// Accept the set of all phi nodes and erase phi node from this set | |||
3347 | /// if it is simplified. | |||
3348 | class SimplificationTracker { | |||
3349 | DenseMap<Value *, Value *> Storage; | |||
3350 | const SimplifyQuery &SQ; | |||
3351 | // Tracks newly created Phi nodes. The elements are iterated by insertion | |||
3352 | // order. | |||
3353 | PhiNodeSet AllPhiNodes; | |||
3354 | // Tracks newly created Select nodes. | |||
3355 | SmallPtrSet<SelectInst *, 32> AllSelectNodes; | |||
3356 | ||||
3357 | public: | |||
3358 | SimplificationTracker(const SimplifyQuery &sq) | |||
3359 | : SQ(sq) {} | |||
3360 | ||||
3361 | Value *Get(Value *V) { | |||
3362 | do { | |||
3363 | auto SV = Storage.find(V); | |||
3364 | if (SV == Storage.end()) | |||
3365 | return V; | |||
3366 | V = SV->second; | |||
3367 | } while (true); | |||
3368 | } | |||
3369 | ||||
3370 | Value *Simplify(Value *Val) { | |||
3371 | SmallVector<Value *, 32> WorkList; | |||
3372 | SmallPtrSet<Value *, 32> Visited; | |||
3373 | WorkList.push_back(Val); | |||
3374 | while (!WorkList.empty()) { | |||
3375 | auto *P = WorkList.pop_back_val(); | |||
3376 | if (!Visited.insert(P).second) | |||
3377 | continue; | |||
3378 | if (auto *PI = dyn_cast<Instruction>(P)) | |||
3379 | if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) { | |||
3380 | for (auto *U : PI->users()) | |||
3381 | WorkList.push_back(cast<Value>(U)); | |||
3382 | Put(PI, V); | |||
3383 | PI->replaceAllUsesWith(V); | |||
3384 | if (auto *PHI = dyn_cast<PHINode>(PI)) | |||
3385 | AllPhiNodes.erase(PHI); | |||
3386 | if (auto *Select = dyn_cast<SelectInst>(PI)) | |||
3387 | AllSelectNodes.erase(Select); | |||
3388 | PI->eraseFromParent(); | |||
3389 | } | |||
3390 | } | |||
3391 | return Get(Val); | |||
3392 | } | |||
3393 | ||||
3394 | void Put(Value *From, Value *To) { | |||
3395 | Storage.insert({ From, To }); | |||
3396 | } | |||
3397 | ||||
3398 | void ReplacePhi(PHINode *From, PHINode *To) { | |||
3399 | Value* OldReplacement = Get(From); | |||
3400 | while (OldReplacement != From) { | |||
3401 | From = To; | |||
3402 | To = dyn_cast<PHINode>(OldReplacement); | |||
3403 | OldReplacement = Get(From); | |||
3404 | } | |||
3405 | assert(To && Get(To) == To && "Replacement PHI node is already replaced.")((void)0); | |||
3406 | Put(From, To); | |||
3407 | From->replaceAllUsesWith(To); | |||
3408 | AllPhiNodes.erase(From); | |||
3409 | From->eraseFromParent(); | |||
3410 | } | |||
3411 | ||||
3412 | PhiNodeSet& newPhiNodes() { return AllPhiNodes; } | |||
3413 | ||||
3414 | void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } | |||
3415 | ||||
3416 | void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } | |||
3417 | ||||
3418 | unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } | |||
3419 | ||||
3420 | unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } | |||
3421 | ||||
3422 | void destroyNewNodes(Type *CommonType) { | |||
3423 | // For safe erasing, replace the uses with dummy value first. | |||
3424 | auto *Dummy = UndefValue::get(CommonType); | |||
3425 | for (auto *I : AllPhiNodes) { | |||
3426 | I->replaceAllUsesWith(Dummy); | |||
3427 | I->eraseFromParent(); | |||
3428 | } | |||
3429 | AllPhiNodes.clear(); | |||
3430 | for (auto *I : AllSelectNodes) { | |||
3431 | I->replaceAllUsesWith(Dummy); | |||
3432 | I->eraseFromParent(); | |||
3433 | } | |||
3434 | AllSelectNodes.clear(); | |||
3435 | } | |||
3436 | }; | |||
3437 | ||||
3438 | /// A helper class for combining addressing modes. | |||
3439 | class AddressingModeCombiner { | |||
3440 | typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; | |||
3441 | typedef std::pair<PHINode *, PHINode *> PHIPair; | |||
3442 | ||||
3443 | private: | |||
3444 | /// The addressing modes we've collected. | |||
3445 | SmallVector<ExtAddrMode, 16> AddrModes; | |||
3446 | ||||
3447 | /// The field in which the AddrModes differ, when we have more than one. | |||
3448 | ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; | |||
3449 | ||||
3450 | /// Are the AddrModes that we have all just equal to their original values? | |||
3451 | bool AllAddrModesTrivial = true; | |||
3452 | ||||
3453 | /// Common Type for all different fields in addressing modes. | |||
3454 | Type *CommonType; | |||
3455 | ||||
3456 | /// SimplifyQuery for simplifyInstruction utility. | |||
3457 | const SimplifyQuery &SQ; | |||
3458 | ||||
3459 | /// Original Address. | |||
3460 | Value *Original; | |||
3461 | ||||
3462 | public: | |||
3463 | AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) | |||
3464 | : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} | |||
3465 | ||||
3466 | /// Get the combined AddrMode | |||
3467 | const ExtAddrMode &getAddrMode() const { | |||
3468 | return AddrModes[0]; | |||
3469 | } | |||
3470 | ||||
3471 | /// Add a new AddrMode if it's compatible with the AddrModes we already | |||
3472 | /// have. | |||
3473 | /// \return True iff we succeeded in doing so. | |||
3474 | bool addNewAddrMode(ExtAddrMode &NewAddrMode) { | |||
3475 | // Take note of if we have any non-trivial AddrModes, as we need to detect | |||
3476 | // when all AddrModes are trivial as then we would introduce a phi or select | |||
3477 | // which just duplicates what's already there. | |||
3478 | AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); | |||
3479 | ||||
3480 | // If this is the first addrmode then everything is fine. | |||
3481 | if (AddrModes.empty()) { | |||
3482 | AddrModes.emplace_back(NewAddrMode); | |||
3483 | return true; | |||
3484 | } | |||
3485 | ||||
3486 | // Figure out how different this is from the other address modes, which we | |||
3487 | // can do just by comparing against the first one given that we only care | |||
3488 | // about the cumulative difference. | |||
3489 | ExtAddrMode::FieldName ThisDifferentField = | |||
3490 | AddrModes[0].compare(NewAddrMode); | |||
3491 | if (DifferentField == ExtAddrMode::NoField) | |||
3492 | DifferentField = ThisDifferentField; | |||
3493 | else if (DifferentField != ThisDifferentField) | |||
3494 | DifferentField = ExtAddrMode::MultipleFields; | |||
3495 | ||||
3496 | // If NewAddrMode differs in more than one dimension we cannot handle it. | |||
3497 | bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; | |||
3498 | ||||
3499 | // If Scale Field is different then we reject. | |||
3500 | CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; | |||
3501 | ||||
3502 | // We also must reject the case when base offset is different and | |||
3503 | // scale reg is not null, we cannot handle this case due to merge of | |||
3504 | // different offsets will be used as ScaleReg. | |||
3505 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || | |||
3506 | !NewAddrMode.ScaledReg); | |||
3507 | ||||
3508 | // We also must reject the case when GV is different and BaseReg installed | |||
3509 | // due to we want to use base reg as a merge of GV values. | |||
3510 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || | |||
3511 | !NewAddrMode.HasBaseReg); | |||
3512 | ||||
3513 | // Even if NewAddMode is the same we still need to collect it due to | |||
3514 | // original value is different. And later we will need all original values | |||
3515 | // as anchors during finding the common Phi node. | |||
3516 | if (CanHandle) | |||
3517 | AddrModes.emplace_back(NewAddrMode); | |||
3518 | else | |||
3519 | AddrModes.clear(); | |||
3520 | ||||
3521 | return CanHandle; | |||
3522 | } | |||
3523 | ||||
3524 | /// Combine the addressing modes we've collected into a single | |||
3525 | /// addressing mode. | |||
3526 | /// \return True iff we successfully combined them or we only had one so | |||
3527 | /// didn't need to combine them anyway. | |||
3528 | bool combineAddrModes() { | |||
3529 | // If we have no AddrModes then they can't be combined. | |||
3530 | if (AddrModes.size() == 0) | |||
3531 | return false; | |||
3532 | ||||
3533 | // A single AddrMode can trivially be combined. | |||
3534 | if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) | |||
3535 | return true; | |||
3536 | ||||
3537 | // If the AddrModes we collected are all just equal to the value they are | |||
3538 | // derived from then combining them wouldn't do anything useful. | |||
3539 | if (AllAddrModesTrivial) | |||
3540 | return false; | |||
3541 | ||||
3542 | if (!addrModeCombiningAllowed()) | |||
3543 | return false; | |||
3544 | ||||
3545 | // Build a map between <original value, basic block where we saw it> to | |||
3546 | // value of base register. | |||
3547 | // Bail out if there is no common type. | |||
3548 | FoldAddrToValueMapping Map; | |||
3549 | if (!initializeMap(Map)) | |||
3550 | return false; | |||
3551 | ||||
3552 | Value *CommonValue = findCommon(Map); | |||
3553 | if (CommonValue) | |||
3554 | AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); | |||
3555 | return CommonValue != nullptr; | |||
3556 | } | |||
3557 | ||||
3558 | private: | |||
3559 | /// Initialize Map with anchor values. For address seen | |||
3560 | /// we set the value of different field saw in this address. | |||
3561 | /// At the same time we find a common type for different field we will | |||
3562 | /// use to create new Phi/Select nodes. Keep it in CommonType field. | |||
3563 | /// Return false if there is no common type found. | |||
3564 | bool initializeMap(FoldAddrToValueMapping &Map) { | |||
3565 | // Keep track of keys where the value is null. We will need to replace it | |||
3566 | // with constant null when we know the common type. | |||
3567 | SmallVector<Value *, 2> NullValue; | |||
3568 | Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); | |||
3569 | for (auto &AM : AddrModes) { | |||
3570 | Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); | |||
3571 | if (DV) { | |||
3572 | auto *Type = DV->getType(); | |||
3573 | if (CommonType && CommonType != Type) | |||
3574 | return false; | |||
3575 | CommonType = Type; | |||
3576 | Map[AM.OriginalValue] = DV; | |||
3577 | } else { | |||
3578 | NullValue.push_back(AM.OriginalValue); | |||
3579 | } | |||
3580 | } | |||
3581 | assert(CommonType && "At least one non-null value must be!")((void)0); | |||
3582 | for (auto *V : NullValue) | |||
3583 | Map[V] = Constant::getNullValue(CommonType); | |||
3584 | return true; | |||
3585 | } | |||
3586 | ||||
3587 | /// We have mapping between value A and other value B where B was a field in | |||
3588 | /// addressing mode represented by A. Also we have an original value C | |||
3589 | /// representing an address we start with. Traversing from C through phi and | |||
3590 | /// selects we ended up with A's in a map. This utility function tries to find | |||
3591 | /// a value V which is a field in addressing mode C and traversing through phi | |||
3592 | /// nodes and selects we will end up in corresponded values B in a map. | |||
3593 | /// The utility will create a new Phi/Selects if needed. | |||
3594 | // The simple example looks as follows: | |||
3595 | // BB1: | |||
3596 | // p1 = b1 + 40 | |||
3597 | // br cond BB2, BB3 | |||
3598 | // BB2: | |||
3599 | // p2 = b2 + 40 | |||
3600 | // br BB3 | |||
3601 | // BB3: | |||
3602 | // p = phi [p1, BB1], [p2, BB2] | |||
3603 | // v = load p | |||
3604 | // Map is | |||
3605 | // p1 -> b1 | |||
3606 | // p2 -> b2 | |||
3607 | // Request is | |||
3608 | // p -> ? | |||
3609 | // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. | |||
3610 | Value *findCommon(FoldAddrToValueMapping &Map) { | |||
3611 | // Tracks the simplification of newly created phi nodes. The reason we use | |||
3612 | // this mapping is because we will add new created Phi nodes in AddrToBase. | |||
3613 | // Simplification of Phi nodes is recursive, so some Phi node may | |||
3614 | // be simplified after we added it to AddrToBase. In reality this | |||
3615 | // simplification is possible only if original phi/selects were not | |||
3616 | // simplified yet. | |||
3617 | // Using this mapping we can find the current value in AddrToBase. | |||
3618 | SimplificationTracker ST(SQ); | |||
3619 | ||||
3620 | // First step, DFS to create PHI nodes for all intermediate blocks. | |||
3621 | // Also fill traverse order for the second step. | |||
3622 | SmallVector<Value *, 32> TraverseOrder; | |||
3623 | InsertPlaceholders(Map, TraverseOrder, ST); | |||
3624 | ||||
3625 | // Second Step, fill new nodes by merged values and simplify if possible. | |||
3626 | FillPlaceholders(Map, TraverseOrder, ST); | |||
3627 | ||||
3628 | if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { | |||
3629 | ST.destroyNewNodes(CommonType); | |||
3630 | return nullptr; | |||
3631 | } | |||
3632 | ||||
3633 | // Now we'd like to match New Phi nodes to existed ones. | |||
3634 | unsigned PhiNotMatchedCount = 0; | |||
3635 | if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { | |||
3636 | ST.destroyNewNodes(CommonType); | |||
3637 | return nullptr; | |||
3638 | } | |||
3639 | ||||
3640 | auto *Result = ST.Get(Map.find(Original)->second); | |||
3641 | if (Result) { | |||
3642 | NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; | |||
3643 | NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); | |||
3644 | } | |||
3645 | return Result; | |||
3646 | } | |||
3647 | ||||
3648 | /// Try to match PHI node to Candidate. | |||
3649 | /// Matcher tracks the matched Phi nodes. | |||
3650 | bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, | |||
3651 | SmallSetVector<PHIPair, 8> &Matcher, | |||
3652 | PhiNodeSet &PhiNodesToMatch) { | |||
3653 | SmallVector<PHIPair, 8> WorkList; | |||
3654 | Matcher.insert({ PHI, Candidate }); | |||
3655 | SmallSet<PHINode *, 8> MatchedPHIs; | |||
3656 | MatchedPHIs.insert(PHI); | |||
3657 | WorkList.push_back({ PHI, Candidate }); | |||
3658 | SmallSet<PHIPair, 8> Visited; | |||
3659 | while (!WorkList.empty()) { | |||
3660 | auto Item = WorkList.pop_back_val(); | |||
3661 | if (!Visited.insert(Item).second) | |||
3662 | continue; | |||
3663 | // We iterate over all incoming values to Phi to compare them. | |||
3664 | // If values are different and both of them Phi and the first one is a | |||
3665 | // Phi we added (subject to match) and both of them is in the same basic | |||
3666 | // block then we can match our pair if values match. So we state that | |||
3667 | // these values match and add it to work list to verify that. | |||
3668 | for (auto B : Item.first->blocks()) { | |||
3669 | Value *FirstValue = Item.first->getIncomingValueForBlock(B); | |||
3670 | Value *SecondValue = Item.second->getIncomingValueForBlock(B); | |||
3671 | if (FirstValue == SecondValue) | |||
3672 | continue; | |||
3673 | ||||
3674 | PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); | |||
3675 | PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); | |||
3676 | ||||
3677 | // One of them is not Phi or | |||
3678 | // The first one is not Phi node from the set we'd like to match or | |||
3679 | // Phi nodes from different basic blocks then | |||
3680 | // we will not be able to match. | |||
3681 | if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || | |||
3682 | FirstPhi->getParent() != SecondPhi->getParent()) | |||
3683 | return false; | |||
3684 | ||||
3685 | // If we already matched them then continue. | |||
3686 | if (Matcher.count({ FirstPhi, SecondPhi })) | |||
3687 | continue; | |||
3688 | // So the values are different and does not match. So we need them to | |||
3689 | // match. (But we register no more than one match per PHI node, so that | |||
3690 | // we won't later try to replace them twice.) | |||
3691 | if (MatchedPHIs.insert(FirstPhi).second) | |||
3692 | Matcher.insert({ FirstPhi, SecondPhi }); | |||
3693 | // But me must check it. | |||
3694 | WorkList.push_back({ FirstPhi, SecondPhi }); | |||
3695 | } | |||
3696 | } | |||
3697 | return true; | |||
3698 | } | |||
3699 | ||||
3700 | /// For the given set of PHI nodes (in the SimplificationTracker) try | |||
3701 | /// to find their equivalents. | |||
3702 | /// Returns false if this matching fails and creation of new Phi is disabled. | |||
3703 | bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, | |||
3704 | unsigned &PhiNotMatchedCount) { | |||
3705 | // Matched and PhiNodesToMatch iterate their elements in a deterministic | |||
3706 | // order, so the replacements (ReplacePhi) are also done in a deterministic | |||
3707 | // order. | |||
3708 | SmallSetVector<PHIPair, 8> Matched; | |||
3709 | SmallPtrSet<PHINode *, 8> WillNotMatch; | |||
3710 | PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); | |||
3711 | while (PhiNodesToMatch.size()) { | |||
3712 | PHINode *PHI = *PhiNodesToMatch.begin(); | |||
3713 | ||||
3714 | // Add us, if no Phi nodes in the basic block we do not match. | |||
3715 | WillNotMatch.clear(); | |||
3716 | WillNotMatch.insert(PHI); | |||
3717 | ||||
3718 | // Traverse all Phis until we found equivalent or fail to do that. | |||
3719 | bool IsMatched = false; | |||
3720 | for (auto &P : PHI->getParent()->phis()) { | |||
3721 | if (&P == PHI) | |||
3722 | continue; | |||
3723 | if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) | |||
3724 | break; | |||
3725 | // If it does not match, collect all Phi nodes from matcher. | |||
3726 | // if we end up with no match, them all these Phi nodes will not match | |||
3727 | // later. | |||
3728 | for (auto M : Matched) | |||
3729 | WillNotMatch.insert(M.first); | |||
3730 | Matched.clear(); | |||
3731 | } | |||
3732 | if (IsMatched) { | |||
3733 | // Replace all matched values and erase them. | |||
3734 | for (auto MV : Matched) | |||
3735 | ST.ReplacePhi(MV.first, MV.second); | |||
3736 | Matched.clear(); | |||
3737 | continue; | |||
3738 | } | |||
3739 | // If we are not allowed to create new nodes then bail out. | |||
3740 | if (!AllowNewPhiNodes) | |||
3741 | return false; | |||
3742 | // Just remove all seen values in matcher. They will not match anything. | |||
3743 | PhiNotMatchedCount += WillNotMatch.size(); | |||
3744 | for (auto *P : WillNotMatch) | |||
3745 | PhiNodesToMatch.erase(P); | |||
3746 | } | |||
3747 | return true; | |||
3748 | } | |||
3749 | /// Fill the placeholders with values from predecessors and simplify them. | |||
3750 | void FillPlaceholders(FoldAddrToValueMapping &Map, | |||
3751 | SmallVectorImpl<Value *> &TraverseOrder, | |||
3752 | SimplificationTracker &ST) { | |||
3753 | while (!TraverseOrder.empty()) { | |||
3754 | Value *Current = TraverseOrder.pop_back_val(); | |||
3755 | assert(Map.find(Current) != Map.end() && "No node to fill!!!")((void)0); | |||
3756 | Value *V = Map[Current]; | |||
3757 | ||||
3758 | if (SelectInst *Select = dyn_cast<SelectInst>(V)) { | |||
3759 | // CurrentValue also must be Select. | |||
3760 | auto *CurrentSelect = cast<SelectInst>(Current); | |||
3761 | auto *TrueValue = CurrentSelect->getTrueValue(); | |||
3762 | assert(Map.find(TrueValue) != Map.end() && "No True Value!")((void)0); | |||
3763 | Select->setTrueValue(ST.Get(Map[TrueValue])); | |||
3764 | auto *FalseValue = CurrentSelect->getFalseValue(); | |||
3765 | assert(Map.find(FalseValue) != Map.end() && "No False Value!")((void)0); | |||
3766 | Select->setFalseValue(ST.Get(Map[FalseValue])); | |||
3767 | } else { | |||
3768 | // Must be a Phi node then. | |||
3769 | auto *PHI = cast<PHINode>(V); | |||
3770 | // Fill the Phi node with values from predecessors. | |||
3771 | for (auto *B : predecessors(PHI->getParent())) { | |||
3772 | Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B); | |||
3773 | assert(Map.find(PV) != Map.end() && "No predecessor Value!")((void)0); | |||
3774 | PHI->addIncoming(ST.Get(Map[PV]), B); | |||
3775 | } | |||
3776 | } | |||
3777 | Map[Current] = ST.Simplify(V); | |||
3778 | } | |||
3779 | } | |||
3780 | ||||
3781 | /// Starting from original value recursively iterates over def-use chain up to | |||
3782 | /// known ending values represented in a map. For each traversed phi/select | |||
3783 | /// inserts a placeholder Phi or Select. | |||
3784 | /// Reports all new created Phi/Select nodes by adding them to set. | |||
3785 | /// Also reports and order in what values have been traversed. | |||
3786 | void InsertPlaceholders(FoldAddrToValueMapping &Map, | |||
3787 | SmallVectorImpl<Value *> &TraverseOrder, | |||
3788 | SimplificationTracker &ST) { | |||
3789 | SmallVector<Value *, 32> Worklist; | |||
3790 | assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&((void)0) | |||
3791 | "Address must be a Phi or Select node")((void)0); | |||
3792 | auto *Dummy = UndefValue::get(CommonType); | |||
3793 | Worklist.push_back(Original); | |||
3794 | while (!Worklist.empty()) { | |||
3795 | Value *Current = Worklist.pop_back_val(); | |||
3796 | // if it is already visited or it is an ending value then skip it. | |||
3797 | if (Map.find(Current) != Map.end()) | |||
3798 | continue; | |||
3799 | TraverseOrder.push_back(Current); | |||
3800 | ||||
3801 | // CurrentValue must be a Phi node or select. All others must be covered | |||
3802 | // by anchors. | |||
3803 | if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) { | |||
3804 | // Is it OK to get metadata from OrigSelect?! | |||
3805 | // Create a Select placeholder with dummy value. | |||
3806 | SelectInst *Select = SelectInst::Create( | |||
3807 | CurrentSelect->getCondition(), Dummy, Dummy, | |||
3808 | CurrentSelect->getName(), CurrentSelect, CurrentSelect); | |||
3809 | Map[Current] = Select; | |||
3810 | ST.insertNewSelect(Select); | |||
3811 | // We are interested in True and False values. | |||
3812 | Worklist.push_back(CurrentSelect->getTrueValue()); | |||
3813 | Worklist.push_back(CurrentSelect->getFalseValue()); | |||
3814 | } else { | |||
3815 | // It must be a Phi node then. | |||
3816 | PHINode *CurrentPhi = cast<PHINode>(Current); | |||
3817 | unsigned PredCount = CurrentPhi->getNumIncomingValues(); | |||
3818 | PHINode *PHI = | |||
3819 | PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi); | |||
3820 | Map[Current] = PHI; | |||
3821 | ST.insertNewPhi(PHI); | |||
3822 | append_range(Worklist, CurrentPhi->incoming_values()); | |||
3823 | } | |||
3824 | } | |||
3825 | } | |||
3826 | ||||
3827 | bool addrModeCombiningAllowed() { | |||
3828 | if (DisableComplexAddrModes) | |||
3829 | return false; | |||
3830 | switch (DifferentField) { | |||
3831 | default: | |||
3832 | return false; | |||
3833 | case ExtAddrMode::BaseRegField: | |||
3834 | return AddrSinkCombineBaseReg; | |||
3835 | case ExtAddrMode::BaseGVField: | |||
3836 | return AddrSinkCombineBaseGV; | |||
3837 | case ExtAddrMode::BaseOffsField: | |||
3838 | return AddrSinkCombineBaseOffs; | |||
3839 | case ExtAddrMode::ScaledRegField: | |||
3840 | return AddrSinkCombineScaledReg; | |||
3841 | } | |||
3842 | } | |||
3843 | }; | |||
3844 | } // end anonymous namespace | |||
3845 | ||||
3846 | /// Try adding ScaleReg*Scale to the current addressing mode. | |||
3847 | /// Return true and update AddrMode if this addr mode is legal for the target, | |||
3848 | /// false if not. | |||
3849 | bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, | |||
3850 | unsigned Depth) { | |||
3851 | // If Scale is 1, then this is the same as adding ScaleReg to the addressing | |||
3852 | // mode. Just process that directly. | |||
3853 | if (Scale == 1) | |||
3854 | return matchAddr(ScaleReg, Depth); | |||
3855 | ||||
3856 | // If the scale is 0, it takes nothing to add this. | |||
3857 | if (Scale == 0) | |||
3858 | return true; | |||
3859 | ||||
3860 | // If we already have a scale of this value, we can add to it, otherwise, we | |||
3861 | // need an available scale field. | |||
3862 | if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) | |||
3863 | return false; | |||
3864 | ||||
3865 | ExtAddrMode TestAddrMode = AddrMode; | |||
3866 | ||||
3867 | // Add scale to turn X*4+X*3 -> X*7. This could also do things like | |||
3868 | // [A+B + A*7] -> [B+A*8]. | |||
3869 | TestAddrMode.Scale += Scale; | |||
3870 | TestAddrMode.ScaledReg = ScaleReg; | |||
3871 | ||||
3872 | // If the new address isn't legal, bail out. | |||
3873 | if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) | |||
3874 | return false; | |||
3875 | ||||
3876 | // It was legal, so commit it. | |||
3877 | AddrMode = TestAddrMode; | |||
3878 | ||||
3879 | // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now | |||
3880 | // to see if ScaleReg is actually X+C. If so, we can turn this into adding | |||
3881 | // X*Scale + C*Scale to addr mode. If we found available IV increment, do not | |||
3882 | // go any further: we can reuse it and cannot eliminate it. | |||
3883 | ConstantInt *CI = nullptr; Value *AddLHS = nullptr; | |||
3884 | if (isa<Instruction>(ScaleReg) && // not a constant expr. | |||
3885 | match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) && | |||
3886 | !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) { | |||
3887 | TestAddrMode.InBounds = false; | |||
3888 | TestAddrMode.ScaledReg = AddLHS; | |||
3889 | TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale; | |||
3890 | ||||
3891 | // If this addressing mode is legal, commit it and remember that we folded | |||
3892 | // this instruction. | |||
3893 | if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { | |||
3894 | AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); | |||
3895 | AddrMode = TestAddrMode; | |||
3896 | return true; | |||
3897 | } | |||
3898 | // Restore status quo. | |||
3899 | TestAddrMode = AddrMode; | |||
3900 | } | |||
3901 | ||||
3902 | // If this is an add recurrence with a constant step, return the increment | |||
3903 | // instruction and the canonicalized step. | |||
3904 | auto GetConstantStep = [this](const Value * V) | |||
3905 | ->Optional<std::pair<Instruction *, APInt> > { | |||
3906 | auto *PN = dyn_cast<PHINode>(V); | |||
3907 | if (!PN) | |||
3908 | return None; | |||
3909 | auto IVInc = getIVIncrement(PN, &LI); | |||
3910 | if (!IVInc) | |||
3911 | return None; | |||
3912 | // TODO: The result of the intrinsics above is two-compliment. However when | |||
3913 | // IV inc is expressed as add or sub, iv.next is potentially a poison value. | |||
3914 | // If it has nuw or nsw flags, we need to make sure that these flags are | |||
3915 | // inferrable at the point of memory instruction. Otherwise we are replacing | |||
3916 | // well-defined two-compliment computation with poison. Currently, to avoid | |||
3917 | // potentially complex analysis needed to prove this, we reject such cases. | |||
3918 | if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first)) | |||
3919 | if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap()) | |||
3920 | return None; | |||
3921 | if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second)) | |||
3922 | return std::make_pair(IVInc->first, ConstantStep->getValue()); | |||
3923 | return None; | |||
3924 | }; | |||
3925 | ||||
3926 | // Try to account for the following special case: | |||
3927 | // 1. ScaleReg is an inductive variable; | |||
3928 | // 2. We use it with non-zero offset; | |||
3929 | // 3. IV's increment is available at the point of memory instruction. | |||
3930 | // | |||
3931 | // In this case, we may reuse the IV increment instead of the IV Phi to | |||
3932 | // achieve the following advantages: | |||
3933 | // 1. If IV step matches the offset, we will have no need in the offset; | |||
3934 | // 2. Even if they don't match, we will reduce the overlap of living IV | |||
3935 | // and IV increment, that will potentially lead to better register | |||
3936 | // assignment. | |||
3937 | if (AddrMode.BaseOffs) { | |||
3938 | if (auto IVStep = GetConstantStep(ScaleReg)) { | |||
3939 | Instruction *IVInc = IVStep->first; | |||
3940 | // The following assert is important to ensure a lack of infinite loops. | |||
3941 | // This transforms is (intentionally) the inverse of the one just above. | |||
3942 | // If they don't agree on the definition of an increment, we'd alternate | |||
3943 | // back and forth indefinitely. | |||
3944 | assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep")((void)0); | |||
3945 | APInt Step = IVStep->second; | |||
3946 | APInt Offset = Step * AddrMode.Scale; | |||
3947 | if (Offset.isSignedIntN(64)) { | |||
3948 | TestAddrMode.InBounds = false; | |||
3949 | TestAddrMode.ScaledReg = IVInc; | |||
3950 | TestAddrMode.BaseOffs -= Offset.getLimitedValue(); | |||
3951 | // If this addressing mode is legal, commit it.. | |||
3952 | // (Note that we defer the (expensive) domtree base legality check | |||
3953 | // to the very last possible point.) | |||
3954 | if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) && | |||
3955 | getDTFn().dominates(IVInc, MemoryInst)) { | |||
3956 | AddrModeInsts.push_back(cast<Instruction>(IVInc)); | |||
3957 | AddrMode = TestAddrMode; | |||
3958 | return true; | |||
3959 | } | |||
3960 | // Restore status quo. | |||
3961 | TestAddrMode = AddrMode; | |||
3962 | } | |||
3963 | } | |||
3964 | } | |||
3965 | ||||
3966 | // Otherwise, just return what we have. | |||
3967 | return true; | |||
3968 | } | |||
3969 | ||||
3970 | /// This is a little filter, which returns true if an addressing computation | |||
3971 | /// involving I might be folded into a load/store accessing it. | |||
3972 | /// This doesn't need to be perfect, but needs to accept at least | |||
3973 | /// the set of instructions that MatchOperationAddr can. | |||
3974 | static bool MightBeFoldableInst(Instruction *I) { | |||
3975 | switch (I->getOpcode()) { | |||
3976 | case Instruction::BitCast: | |||
3977 | case Instruction::AddrSpaceCast: | |||
3978 | // Don't touch identity bitcasts. | |||
3979 | if (I->getType() == I->getOperand(0)->getType()) | |||
3980 | return false; | |||
3981 | return I->getType()->isIntOrPtrTy(); | |||
3982 | case Instruction::PtrToInt: | |||
3983 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
3984 | return true; | |||
3985 | case Instruction::IntToPtr: | |||
3986 | // We know the input is intptr_t, so this is foldable. | |||
3987 | return true; | |||
3988 | case Instruction::Add: | |||
3989 | return true; | |||
3990 | case Instruction::Mul: | |||
3991 | case Instruction::Shl: | |||
3992 | // Can only handle X*C and X << C. | |||
3993 | return isa<ConstantInt>(I->getOperand(1)); | |||
3994 | case Instruction::GetElementPtr: | |||
3995 | return true; | |||
3996 | default: | |||
3997 | return false; | |||
3998 | } | |||
3999 | } | |||
4000 | ||||
4001 | /// Check whether or not \p Val is a legal instruction for \p TLI. | |||
4002 | /// \note \p Val is assumed to be the product of some type promotion. | |||
4003 | /// Therefore if \p Val has an undefined state in \p TLI, this is assumed | |||
4004 | /// to be legal, as the non-promoted value would have had the same state. | |||
4005 | static bool isPromotedInstructionLegal(const TargetLowering &TLI, | |||
4006 | const DataLayout &DL, Value *Val) { | |||
4007 | Instruction *PromotedInst = dyn_cast<Instruction>(Val); | |||
4008 | if (!PromotedInst) | |||
4009 | return false; | |||
4010 | int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); | |||
4011 | // If the ISDOpcode is undefined, it was undefined before the promotion. | |||
4012 | if (!ISDOpcode) | |||
4013 | return true; | |||
4014 | // Otherwise, check if the promoted instruction is legal or not. | |||
4015 | return TLI.isOperationLegalOrCustom( | |||
4016 | ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); | |||
4017 | } | |||
4018 | ||||
4019 | namespace { | |||
4020 | ||||
4021 | /// Hepler class to perform type promotion. | |||
4022 | class TypePromotionHelper { | |||
4023 | /// Utility function to add a promoted instruction \p ExtOpnd to | |||
4024 | /// \p PromotedInsts and record the type of extension we have seen. | |||
4025 | static void addPromotedInst(InstrToOrigTy &PromotedInsts, | |||
4026 | Instruction *ExtOpnd, | |||
4027 | bool IsSExt) { | |||
4028 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; | |||
4029 | InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); | |||
4030 | if (It != PromotedInsts.end()) { | |||
4031 | // If the new extension is same as original, the information in | |||
4032 | // PromotedInsts[ExtOpnd] is still correct. | |||
4033 | if (It->second.getInt() == ExtTy) | |||
4034 | return; | |||
4035 | ||||
4036 | // Now the new extension is different from old extension, we make | |||
4037 | // the type information invalid by setting extension type to | |||
4038 | // BothExtension. | |||
4039 | ExtTy = BothExtension; | |||
4040 | } | |||
4041 | PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); | |||
4042 | } | |||
4043 | ||||
4044 | /// Utility function to query the original type of instruction \p Opnd | |||
4045 | /// with a matched extension type. If the extension doesn't match, we | |||
4046 | /// cannot use the information we had on the original type. | |||
4047 | /// BothExtension doesn't match any extension type. | |||
4048 | static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, | |||
4049 | Instruction *Opnd, | |||
4050 | bool IsSExt) { | |||
4051 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; | |||
4052 | InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); | |||
4053 | if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) | |||
4054 | return It->second.getPointer(); | |||
4055 | return nullptr; | |||
4056 | } | |||
4057 | ||||
4058 | /// Utility function to check whether or not a sign or zero extension | |||
4059 | /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by | |||
4060 | /// either using the operands of \p Inst or promoting \p Inst. | |||
4061 | /// The type of the extension is defined by \p IsSExt. | |||
4062 | /// In other words, check if: | |||
4063 | /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. | |||
4064 | /// #1 Promotion applies: | |||
4065 | /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). | |||
4066 | /// #2 Operand reuses: | |||
4067 | /// ext opnd1 to ConsideredExtType. | |||
4068 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
4069 | static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, | |||
4070 | const InstrToOrigTy &PromotedInsts, bool IsSExt); | |||
4071 | ||||
4072 | /// Utility function to determine if \p OpIdx should be promoted when | |||
4073 | /// promoting \p Inst. | |||
4074 | static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { | |||
4075 | return !(isa<SelectInst>(Inst) && OpIdx == 0); | |||
4076 | } | |||
4077 | ||||
4078 | /// Utility function to promote the operand of \p Ext when this | |||
4079 | /// operand is a promotable trunc or sext or zext. | |||
4080 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
4081 | /// \p CreatedInstsCost[out] contains the cost of all instructions | |||
4082 | /// created to promote the operand of Ext. | |||
4083 | /// Newly added extensions are inserted in \p Exts. | |||
4084 | /// Newly added truncates are inserted in \p Truncs. | |||
4085 | /// Should never be called directly. | |||
4086 | /// \return The promoted value which is used instead of Ext. | |||
4087 | static Value *promoteOperandForTruncAndAnyExt( | |||
4088 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
4089 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4090 | SmallVectorImpl<Instruction *> *Exts, | |||
4091 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); | |||
4092 | ||||
4093 | /// Utility function to promote the operand of \p Ext when this | |||
4094 | /// operand is promotable and is not a supported trunc or sext. | |||
4095 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
4096 | /// \p CreatedInstsCost[out] contains the cost of all the instructions | |||
4097 | /// created to promote the operand of Ext. | |||
4098 | /// Newly added extensions are inserted in \p Exts. | |||
4099 | /// Newly added truncates are inserted in \p Truncs. | |||
4100 | /// Should never be called directly. | |||
4101 | /// \return The promoted value which is used instead of Ext. | |||
4102 | static Value *promoteOperandForOther(Instruction *Ext, | |||
4103 | TypePromotionTransaction &TPT, | |||
4104 | InstrToOrigTy &PromotedInsts, | |||
4105 | unsigned &CreatedInstsCost, | |||
4106 | SmallVectorImpl<Instruction *> *Exts, | |||
4107 | SmallVectorImpl<Instruction *> *Truncs, | |||
4108 | const TargetLowering &TLI, bool IsSExt); | |||
4109 | ||||
4110 | /// \see promoteOperandForOther. | |||
4111 | static Value *signExtendOperandForOther( | |||
4112 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
4113 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4114 | SmallVectorImpl<Instruction *> *Exts, | |||
4115 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
4116 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | |||
4117 | Exts, Truncs, TLI, true); | |||
4118 | } | |||
4119 | ||||
4120 | /// \see promoteOperandForOther. | |||
4121 | static Value *zeroExtendOperandForOther( | |||
4122 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
4123 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4124 | SmallVectorImpl<Instruction *> *Exts, | |||
4125 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
4126 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | |||
4127 | Exts, Truncs, TLI, false); | |||
4128 | } | |||
4129 | ||||
4130 | public: | |||
4131 | /// Type for the utility function that promotes the operand of Ext. | |||
4132 | using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, | |||
4133 | InstrToOrigTy &PromotedInsts, | |||
4134 | unsigned &CreatedInstsCost, | |||
4135 | SmallVectorImpl<Instruction *> *Exts, | |||
4136 | SmallVectorImpl<Instruction *> *Truncs, | |||
4137 | const TargetLowering &TLI); | |||
4138 | ||||
4139 | /// Given a sign/zero extend instruction \p Ext, return the appropriate | |||
4140 | /// action to promote the operand of \p Ext instead of using Ext. | |||
4141 | /// \return NULL if no promotable action is possible with the current | |||
4142 | /// sign extension. | |||
4143 | /// \p InsertedInsts keeps track of all the instructions inserted by the | |||
4144 | /// other CodeGenPrepare optimizations. This information is important | |||
4145 | /// because we do not want to promote these instructions as CodeGenPrepare | |||
4146 | /// will reinsert them later. Thus creating an infinite loop: create/remove. | |||
4147 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
4148 | static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, | |||
4149 | const TargetLowering &TLI, | |||
4150 | const InstrToOrigTy &PromotedInsts); | |||
4151 | }; | |||
4152 | ||||
4153 | } // end anonymous namespace | |||
4154 | ||||
4155 | bool TypePromotionHelper::canGetThrough(const Instruction *Inst, | |||
4156 | Type *ConsideredExtType, | |||
4157 | const InstrToOrigTy &PromotedInsts, | |||
4158 | bool IsSExt) { | |||
4159 | // The promotion helper does not know how to deal with vector types yet. | |||
4160 | // To be able to fix that, we would need to fix the places where we | |||
4161 | // statically extend, e.g., constants and such. | |||
4162 | if (Inst->getType()->isVectorTy()) | |||
4163 | return false; | |||
4164 | ||||
4165 | // We can always get through zext. | |||
4166 | if (isa<ZExtInst>(Inst)) | |||
4167 | return true; | |||
4168 | ||||
4169 | // sext(sext) is ok too. | |||
4170 | if (IsSExt
| |||
4171 | return true; | |||
4172 | ||||
4173 | // We can get through binary operator, if it is legal. In other words, the | |||
4174 | // binary operator must have a nuw or nsw flag. | |||
4175 | const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); | |||
4176 | if (isa_and_nonnull<OverflowingBinaryOperator>(BinOp) && | |||
4177 | ((!IsSExt
| |||
| ||||
4178 | (IsSExt && BinOp->hasNoSignedWrap()))) | |||
4179 | return true; | |||
4180 | ||||
4181 | // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) | |||
4182 | if ((Inst->getOpcode() == Instruction::And || | |||
4183 | Inst->getOpcode() == Instruction::Or)) | |||
4184 | return true; | |||
4185 | ||||
4186 | // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) | |||
4187 | if (Inst->getOpcode() == Instruction::Xor) { | |||
4188 | const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)); | |||
4189 | // Make sure it is not a NOT. | |||
4190 | if (Cst && !Cst->getValue().isAllOnesValue()) | |||
4191 | return true; | |||
4192 | } | |||
4193 | ||||
4194 | // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) | |||
4195 | // It may change a poisoned value into a regular value, like | |||
4196 | // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 | |||
4197 | // poisoned value regular value | |||
4198 | // It should be OK since undef covers valid value. | |||
4199 | if (Inst->getOpcode() == Instruction::LShr && !IsSExt) | |||
4200 | return true; | |||
4201 | ||||
4202 | // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) | |||
4203 | // It may change a poisoned value into a regular value, like | |||
4204 | // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 | |||
4205 | // poisoned value regular value | |||
4206 | // It should be OK since undef covers valid value. | |||
4207 | if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { | |||
4208 | const auto *ExtInst = cast<const Instruction>(*Inst->user_begin()); | |||
4209 | if (ExtInst->hasOneUse()) { | |||
4210 | const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin()); | |||
4211 | if (AndInst && AndInst->getOpcode() == Instruction::And) { | |||
4212 | const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); | |||
4213 | if (Cst && | |||
4214 | Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) | |||
4215 | return true; | |||
4216 | } | |||
4217 | } | |||
4218 | } | |||
4219 | ||||
4220 | // Check if we can do the following simplification. | |||
4221 | // ext(trunc(opnd)) --> ext(opnd) | |||
4222 | if (!isa<TruncInst>(Inst)) | |||
4223 | return false; | |||
4224 | ||||
4225 | Value *OpndVal = Inst->getOperand(0); | |||
4226 | // Check if we can use this operand in the extension. | |||
4227 | // If the type is larger than the result type of the extension, we cannot. | |||
4228 | if (!OpndVal->getType()->isIntegerTy() || | |||
4229 | OpndVal->getType()->getIntegerBitWidth() > | |||
4230 | ConsideredExtType->getIntegerBitWidth()) | |||
4231 | return false; | |||
4232 | ||||
4233 | // If the operand of the truncate is not an instruction, we will not have | |||
4234 | // any information on the dropped bits. | |||
4235 | // (Actually we could for constant but it is not worth the extra logic). | |||
4236 | Instruction *Opnd = dyn_cast<Instruction>(OpndVal); | |||
4237 | if (!Opnd) | |||
4238 | return false; | |||
4239 | ||||
4240 | // Check if the source of the type is narrow enough. | |||
4241 | // I.e., check that trunc just drops extended bits of the same kind of | |||
4242 | // the extension. | |||
4243 | // #1 get the type of the operand and check the kind of the extended bits. | |||
4244 | const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); | |||
4245 | if (OpndType) | |||
4246 | ; | |||
4247 | else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) | |||
4248 | OpndType = Opnd->getOperand(0)->getType(); | |||
4249 | else | |||
4250 | return false; | |||
4251 | ||||
4252 | // #2 check that the truncate just drops extended bits. | |||
4253 | return Inst->getType()->getIntegerBitWidth() >= | |||
4254 | OpndType->getIntegerBitWidth(); | |||
4255 | } | |||
4256 | ||||
4257 | TypePromotionHelper::Action TypePromotionHelper::getAction( | |||
4258 | Instruction *Ext, const SetOfInstrs &InsertedInsts, | |||
4259 | const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { | |||
4260 | assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&((void)0) | |||
4261 | "Unexpected instruction type")((void)0); | |||
4262 | Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); | |||
4263 | Type *ExtTy = Ext->getType(); | |||
4264 | bool IsSExt = isa<SExtInst>(Ext); | |||
4265 | // If the operand of the extension is not an instruction, we cannot | |||
4266 | // get through. | |||
4267 | // If it, check we can get through. | |||
4268 | if (!ExtOpnd
| |||
4269 | return nullptr; | |||
4270 | ||||
4271 | // Do not promote if the operand has been added by codegenprepare. | |||
4272 | // Otherwise, it means we are undoing an optimization that is likely to be | |||
4273 | // redone, thus causing potential infinite loop. | |||
4274 | if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) | |||
4275 | return nullptr; | |||
4276 | ||||
4277 | // SExt or Trunc instructions. | |||
4278 | // Return the related handler. | |||
4279 | if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || | |||
4280 | isa<ZExtInst>(ExtOpnd)) | |||
4281 | return promoteOperandForTruncAndAnyExt; | |||
4282 | ||||
4283 | // Regular instruction. | |||
4284 | // Abort early if we will have to insert non-free instructions. | |||
4285 | if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) | |||
4286 | return nullptr; | |||
4287 | return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; | |||
4288 | } | |||
4289 | ||||
4290 | Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( | |||
4291 | Instruction *SExt, TypePromotionTransaction &TPT, | |||
4292 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4293 | SmallVectorImpl<Instruction *> *Exts, | |||
4294 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
4295 | // By construction, the operand of SExt is an instruction. Otherwise we cannot | |||
4296 | // get through it and this method should not be called. | |||
4297 | Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); | |||
4298 | Value *ExtVal = SExt; | |||
4299 | bool HasMergedNonFreeExt = false; | |||
4300 | if (isa<ZExtInst>(SExtOpnd)) { | |||
4301 | // Replace s|zext(zext(opnd)) | |||
4302 | // => zext(opnd). | |||
4303 | HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); | |||
4304 | Value *ZExt = | |||
4305 | TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); | |||
4306 | TPT.replaceAllUsesWith(SExt, ZExt); | |||
4307 | TPT.eraseInstruction(SExt); | |||
4308 | ExtVal = ZExt; | |||
4309 | } else { | |||
4310 | // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) | |||
4311 | // => z|sext(opnd). | |||
4312 | TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); | |||
4313 | } | |||
4314 | CreatedInstsCost = 0; | |||
4315 | ||||
4316 | // Remove dead code. | |||
4317 | if (SExtOpnd->use_empty()) | |||
4318 | TPT.eraseInstruction(SExtOpnd); | |||
4319 | ||||
4320 | // Check if the extension is still needed. | |||
4321 | Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); | |||
4322 | if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { | |||
4323 | if (ExtInst) { | |||
4324 | if (Exts) | |||
4325 | Exts->push_back(ExtInst); | |||
4326 | CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; | |||
4327 | } | |||
4328 | return ExtVal; | |||
4329 | } | |||
4330 | ||||
4331 | // At this point we have: ext ty opnd to ty. | |||
4332 | // Reassign the uses of ExtInst to the opnd and remove ExtInst. | |||
4333 | Value *NextVal = ExtInst->getOperand(0); | |||
4334 | TPT.eraseInstruction(ExtInst, NextVal); | |||
4335 | return NextVal; | |||
4336 | } | |||
4337 | ||||
4338 | Value *TypePromotionHelper::promoteOperandForOther( | |||
4339 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
4340 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4341 | SmallVectorImpl<Instruction *> *Exts, | |||
4342 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, | |||
4343 | bool IsSExt) { | |||
4344 | // By construction, the operand of Ext is an instruction. Otherwise we cannot | |||
4345 | // get through it and this method should not be called. | |||
4346 | Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); | |||
4347 | CreatedInstsCost = 0; | |||
4348 | if (!ExtOpnd->hasOneUse()) { | |||
4349 | // ExtOpnd will be promoted. | |||
4350 | // All its uses, but Ext, will need to use a truncated value of the | |||
4351 | // promoted version. | |||
4352 | // Create the truncate now. | |||
4353 | Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); | |||
4354 | if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { | |||
4355 | // Insert it just after the definition. | |||
4356 | ITrunc->moveAfter(ExtOpnd); | |||
4357 | if (Truncs) | |||
4358 | Truncs->push_back(ITrunc); | |||
4359 | } | |||
4360 | ||||
4361 | TPT.replaceAllUsesWith(ExtOpnd, Trunc); | |||
4362 | // Restore the operand of Ext (which has been replaced by the previous call | |||
4363 | // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. | |||
4364 | TPT.setOperand(Ext, 0, ExtOpnd); | |||
4365 | } | |||
4366 | ||||
4367 | // Get through the Instruction: | |||
4368 | // 1. Update its type. | |||
4369 | // 2. Replace the uses of Ext by Inst. | |||
4370 | // 3. Extend each operand that needs to be extended. | |||
4371 | ||||
4372 | // Remember the original type of the instruction before promotion. | |||
4373 | // This is useful to know that the high bits are sign extended bits. | |||
4374 | addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); | |||
4375 | // Step #1. | |||
4376 | TPT.mutateType(ExtOpnd, Ext->getType()); | |||
4377 | // Step #2. | |||
4378 | TPT.replaceAllUsesWith(Ext, ExtOpnd); | |||
4379 | // Step #3. | |||
4380 | Instruction *ExtForOpnd = Ext; | |||
4381 | ||||
4382 | LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n")do { } while (false); | |||
4383 | for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; | |||
4384 | ++OpIdx) { | |||
4385 | LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n')do { } while (false); | |||
4386 | if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || | |||
4387 | !shouldExtOperand(ExtOpnd, OpIdx)) { | |||
4388 | LLVM_DEBUG(dbgs() << "No need to propagate\n")do { } while (false); | |||
4389 | continue; | |||
4390 | } | |||
4391 | // Check if we can statically extend the operand. | |||
4392 | Value *Opnd = ExtOpnd->getOperand(OpIdx); | |||
4393 | if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { | |||
4394 | LLVM_DEBUG(dbgs() << "Statically extend\n")do { } while (false); | |||
4395 | unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); | |||
4396 | APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) | |||
4397 | : Cst->getValue().zext(BitWidth); | |||
4398 | TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); | |||
4399 | continue; | |||
4400 | } | |||
4401 | // UndefValue are typed, so we have to statically sign extend them. | |||
4402 | if (isa<UndefValue>(Opnd)) { | |||
4403 | LLVM_DEBUG(dbgs() << "Statically extend\n")do { } while (false); | |||
4404 | TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); | |||
4405 | continue; | |||
4406 | } | |||
4407 | ||||
4408 | // Otherwise we have to explicitly sign extend the operand. | |||
4409 | // Check if Ext was reused to extend an operand. | |||
4410 | if (!ExtForOpnd) { | |||
4411 | // If yes, create a new one. | |||
4412 | LLVM_DEBUG(dbgs() << "More operands to ext\n")do { } while (false); | |||
4413 | Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) | |||
4414 | : TPT.createZExt(Ext, Opnd, Ext->getType()); | |||
4415 | if (!isa<Instruction>(ValForExtOpnd)) { | |||
4416 | TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); | |||
4417 | continue; | |||
4418 | } | |||
4419 | ExtForOpnd = cast<Instruction>(ValForExtOpnd); | |||
4420 | } | |||
4421 | if (Exts) | |||
4422 | Exts->push_back(ExtForOpnd); | |||
4423 | TPT.setOperand(ExtForOpnd, 0, Opnd); | |||
4424 | ||||
4425 | // Move the sign extension before the insertion point. | |||
4426 | TPT.moveBefore(ExtForOpnd, ExtOpnd); | |||
4427 | TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); | |||
4428 | CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); | |||
4429 | // If more sext are required, new instructions will have to be created. | |||
4430 | ExtForOpnd = nullptr; | |||
4431 | } | |||
4432 | if (ExtForOpnd == Ext) { | |||
4433 | LLVM_DEBUG(dbgs() << "Extension is useless now\n")do { } while (false); | |||
4434 | TPT.eraseInstruction(Ext); | |||
4435 | } | |||
4436 | return ExtOpnd; | |||
4437 | } | |||
4438 | ||||
4439 | /// Check whether or not promoting an instruction to a wider type is profitable. | |||
4440 | /// \p NewCost gives the cost of extension instructions created by the | |||
4441 | /// promotion. | |||
4442 | /// \p OldCost gives the cost of extension instructions before the promotion | |||
4443 | /// plus the number of instructions that have been | |||
4444 | /// matched in the addressing mode the promotion. | |||
4445 | /// \p PromotedOperand is the value that has been promoted. | |||
4446 | /// \return True if the promotion is profitable, false otherwise. | |||
4447 | bool AddressingModeMatcher::isPromotionProfitable( | |||
4448 | unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { | |||
4449 | LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCostdo { } while (false) | |||
4450 | << '\n')do { } while (false); | |||
4451 | // The cost of the new extensions is greater than the cost of the | |||
4452 | // old extension plus what we folded. | |||
4453 | // This is not profitable. | |||
4454 | if (NewCost > OldCost) | |||
4455 | return false; | |||
4456 | if (NewCost < OldCost) | |||
4457 | return true; | |||
4458 | // The promotion is neutral but it may help folding the sign extension in | |||
4459 | // loads for instance. | |||
4460 | // Check that we did not create an illegal instruction. | |||
4461 | return isPromotedInstructionLegal(TLI, DL, PromotedOperand); | |||
4462 | } | |||
4463 | ||||
4464 | /// Given an instruction or constant expr, see if we can fold the operation | |||
4465 | /// into the addressing mode. If so, update the addressing mode and return | |||
4466 | /// true, otherwise return false without modifying AddrMode. | |||
4467 | /// If \p MovedAway is not NULL, it contains the information of whether or | |||
4468 | /// not AddrInst has to be folded into the addressing mode on success. | |||
4469 | /// If \p MovedAway == true, \p AddrInst will not be part of the addressing | |||
4470 | /// because it has been moved away. | |||
4471 | /// Thus AddrInst must not be added in the matched instructions. | |||
4472 | /// This state can happen when AddrInst is a sext, since it may be moved away. | |||
4473 | /// Therefore, AddrInst may not be valid when MovedAway is true and it must | |||
4474 | /// not be referenced anymore. | |||
4475 | bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, | |||
4476 | unsigned Depth, | |||
4477 | bool *MovedAway) { | |||
4478 | // Avoid exponential behavior on extremely deep expression trees. | |||
4479 | if (Depth
| |||
4480 | ||||
4481 | // By default, all matched instructions stay in place. | |||
4482 | if (MovedAway
| |||
4483 | *MovedAway = false; | |||
4484 | ||||
4485 | switch (Opcode) { | |||
4486 | case Instruction::PtrToInt: | |||
4487 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
4488 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
4489 | case Instruction::IntToPtr: { | |||
4490 | auto AS = AddrInst->getType()->getPointerAddressSpace(); | |||
4491 | auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); | |||
4492 | // This inttoptr is a no-op if the integer type is pointer sized. | |||
4493 | if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) | |||
4494 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
4495 | return false; | |||
4496 | } | |||
4497 | case Instruction::BitCast: | |||
4498 | // BitCast is always a noop, and we can handle it as long as it is | |||
4499 | // int->int or pointer->pointer (we don't want int<->fp or something). | |||
4500 | if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && | |||
4501 | // Don't touch identity bitcasts. These were probably put here by LSR, | |||
4502 | // and we don't want to mess around with them. Assume it knows what it | |||
4503 | // is doing. | |||
4504 | AddrInst->getOperand(0)->getType() != AddrInst->getType()) | |||
4505 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
4506 | return false; | |||
4507 | case Instruction::AddrSpaceCast: { | |||
4508 | unsigned SrcAS | |||
4509 | = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); | |||
4510 | unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); | |||
4511 | if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS)) | |||
4512 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
4513 | return false; | |||
4514 | } | |||
4515 | case Instruction::Add: { | |||
4516 | // Check to see if we can merge in the RHS then the LHS. If so, we win. | |||
4517 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4518 | unsigned OldSize = AddrModeInsts.size(); | |||
4519 | // Start a transaction at this point. | |||
4520 | // The LHS may match but not the RHS. | |||
4521 | // Therefore, we need a higher level restoration point to undo partially | |||
4522 | // matched operation. | |||
4523 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4524 | TPT.getRestorationPoint(); | |||
4525 | ||||
4526 | AddrMode.InBounds = false; | |||
4527 | if (matchAddr(AddrInst->getOperand(1), Depth+1) && | |||
4528 | matchAddr(AddrInst->getOperand(0), Depth+1)) | |||
4529 | return true; | |||
4530 | ||||
4531 | // Restore the old addr mode info. | |||
4532 | AddrMode = BackupAddrMode; | |||
4533 | AddrModeInsts.resize(OldSize); | |||
4534 | TPT.rollback(LastKnownGood); | |||
4535 | ||||
4536 | // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. | |||
4537 | if (matchAddr(AddrInst->getOperand(0), Depth+1) && | |||
4538 | matchAddr(AddrInst->getOperand(1), Depth+1)) | |||
4539 | return true; | |||
4540 | ||||
4541 | // Otherwise we definitely can't merge the ADD in. | |||
4542 | AddrMode = BackupAddrMode; | |||
4543 | AddrModeInsts.resize(OldSize); | |||
4544 | TPT.rollback(LastKnownGood); | |||
4545 | break; | |||
4546 | } | |||
4547 | //case Instruction::Or: | |||
4548 | // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. | |||
4549 | //break; | |||
4550 | case Instruction::Mul: | |||
4551 | case Instruction::Shl: { | |||
4552 | // Can only handle X*C and X << C. | |||
4553 | AddrMode.InBounds = false; | |||
4554 | ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); | |||
4555 | if (!RHS || RHS->getBitWidth() > 64) | |||
4556 | return false; | |||
4557 | int64_t Scale = RHS->getSExtValue(); | |||
4558 | if (Opcode == Instruction::Shl) | |||
4559 | Scale = 1LL << Scale; | |||
4560 | ||||
4561 | return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); | |||
4562 | } | |||
4563 | case Instruction::GetElementPtr: { | |||
4564 | // Scan the GEP. We check it if it contains constant offsets and at most | |||
4565 | // one variable offset. | |||
4566 | int VariableOperand = -1; | |||
4567 | unsigned VariableScale = 0; | |||
4568 | ||||
4569 | int64_t ConstantOffset = 0; | |||
4570 | gep_type_iterator GTI = gep_type_begin(AddrInst); | |||
4571 | for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { | |||
4572 | if (StructType *STy = GTI.getStructTypeOrNull()) { | |||
4573 | const StructLayout *SL = DL.getStructLayout(STy); | |||
4574 | unsigned Idx = | |||
4575 | cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); | |||
4576 | ConstantOffset += SL->getElementOffset(Idx); | |||
4577 | } else { | |||
4578 | TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType()); | |||
4579 | if (TS.isNonZero()) { | |||
4580 | // The optimisations below currently only work for fixed offsets. | |||
4581 | if (TS.isScalable()) | |||
4582 | return false; | |||
4583 | int64_t TypeSize = TS.getFixedSize(); | |||
4584 | if (ConstantInt *CI = | |||
4585 | dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { | |||
4586 | const APInt &CVal = CI->getValue(); | |||
4587 | if (CVal.getMinSignedBits() <= 64) { | |||
4588 | ConstantOffset += CVal.getSExtValue() * TypeSize; | |||
4589 | continue; | |||
4590 | } | |||
4591 | } | |||
4592 | // We only allow one variable index at the moment. | |||
4593 | if (VariableOperand != -1) | |||
4594 | return false; | |||
4595 | ||||
4596 | // Remember the variable index. | |||
4597 | VariableOperand = i; | |||
4598 | VariableScale = TypeSize; | |||
4599 | } | |||
4600 | } | |||
4601 | } | |||
4602 | ||||
4603 | // A common case is for the GEP to only do a constant offset. In this case, | |||
4604 | // just add it to the disp field and check validity. | |||
4605 | if (VariableOperand == -1) { | |||
4606 | AddrMode.BaseOffs += ConstantOffset; | |||
4607 | if (ConstantOffset == 0 || | |||
4608 | TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { | |||
4609 | // Check to see if we can fold the base pointer in too. | |||
4610 | if (matchAddr(AddrInst->getOperand(0), Depth+1)) { | |||
4611 | if (!cast<GEPOperator>(AddrInst)->isInBounds()) | |||
4612 | AddrMode.InBounds = false; | |||
4613 | return true; | |||
4614 | } | |||
4615 | } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && | |||
4616 | TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && | |||
4617 | ConstantOffset > 0) { | |||
4618 | // Record GEPs with non-zero offsets as candidates for splitting in the | |||
4619 | // event that the offset cannot fit into the r+i addressing mode. | |||
4620 | // Simple and common case that only one GEP is used in calculating the | |||
4621 | // address for the memory access. | |||
4622 | Value *Base = AddrInst->getOperand(0); | |||
4623 | auto *BaseI = dyn_cast<Instruction>(Base); | |||
4624 | auto *GEP = cast<GetElementPtrInst>(AddrInst); | |||
4625 | if (isa<Argument>(Base) || isa<GlobalValue>(Base) || | |||
4626 | (BaseI && !isa<CastInst>(BaseI) && | |||
4627 | !isa<GetElementPtrInst>(BaseI))) { | |||
4628 | // Make sure the parent block allows inserting non-PHI instructions | |||
4629 | // before the terminator. | |||
4630 | BasicBlock *Parent = | |||
4631 | BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock(); | |||
4632 | if (!Parent->getTerminator()->isEHPad()) | |||
4633 | LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); | |||
4634 | } | |||
4635 | } | |||
4636 | AddrMode.BaseOffs -= ConstantOffset; | |||
4637 | return false; | |||
4638 | } | |||
4639 | ||||
4640 | // Save the valid addressing mode in case we can't match. | |||
4641 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4642 | unsigned OldSize = AddrModeInsts.size(); | |||
4643 | ||||
4644 | // See if the scale and offset amount is valid for this target. | |||
4645 | AddrMode.BaseOffs += ConstantOffset; | |||
4646 | if (!cast<GEPOperator>(AddrInst)->isInBounds()) | |||
4647 | AddrMode.InBounds = false; | |||
4648 | ||||
4649 | // Match the base operand of the GEP. | |||
4650 | if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { | |||
4651 | // If it couldn't be matched, just stuff the value in a register. | |||
4652 | if (AddrMode.HasBaseReg) { | |||
4653 | AddrMode = BackupAddrMode; | |||
4654 | AddrModeInsts.resize(OldSize); | |||
4655 | return false; | |||
4656 | } | |||
4657 | AddrMode.HasBaseReg = true; | |||
4658 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
4659 | } | |||
4660 | ||||
4661 | // Match the remaining variable portion of the GEP. | |||
4662 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, | |||
4663 | Depth)) { | |||
4664 | // If it couldn't be matched, try stuffing the base into a register | |||
4665 | // instead of matching it, and retrying the match of the scale. | |||
4666 | AddrMode = BackupAddrMode; | |||
4667 | AddrModeInsts.resize(OldSize); | |||
4668 | if (AddrMode.HasBaseReg) | |||
4669 | return false; | |||
4670 | AddrMode.HasBaseReg = true; | |||
4671 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
4672 | AddrMode.BaseOffs += ConstantOffset; | |||
4673 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), | |||
4674 | VariableScale, Depth)) { | |||
4675 | // If even that didn't work, bail. | |||
4676 | AddrMode = BackupAddrMode; | |||
4677 | AddrModeInsts.resize(OldSize); | |||
4678 | return false; | |||
4679 | } | |||
4680 | } | |||
4681 | ||||
4682 | return true; | |||
4683 | } | |||
4684 | case Instruction::SExt: | |||
4685 | case Instruction::ZExt: { | |||
4686 | Instruction *Ext = dyn_cast<Instruction>(AddrInst); | |||
4687 | if (!Ext
| |||
4688 | return false; | |||
4689 | ||||
4690 | // Try to move this ext out of the way of the addressing mode. | |||
4691 | // Ask for a method for doing so. | |||
4692 | TypePromotionHelper::Action TPH = | |||
4693 | TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); | |||
4694 | if (!TPH) | |||
4695 | return false; | |||
4696 | ||||
4697 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4698 | TPT.getRestorationPoint(); | |||
4699 | unsigned CreatedInstsCost = 0; | |||
4700 | unsigned ExtCost = !TLI.isExtFree(Ext); | |||
4701 | Value *PromotedOperand = | |||
4702 | TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); | |||
4703 | // SExt has been moved away. | |||
4704 | // Thus either it will be rematched later in the recursive calls or it is | |||
4705 | // gone. Anyway, we must not fold it into the addressing mode at this point. | |||
4706 | // E.g., | |||
4707 | // op = add opnd, 1 | |||
4708 | // idx = ext op | |||
4709 | // addr = gep base, idx | |||
4710 | // is now: | |||
4711 | // promotedOpnd = ext opnd <- no match here | |||
4712 | // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) | |||
4713 | // addr = gep base, op <- match | |||
4714 | if (MovedAway) | |||
4715 | *MovedAway = true; | |||
4716 | ||||
4717 | assert(PromotedOperand &&((void)0) | |||
4718 | "TypePromotionHelper should have filtered out those cases")((void)0); | |||
4719 | ||||
4720 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4721 | unsigned OldSize = AddrModeInsts.size(); | |||
4722 | ||||
4723 | if (!matchAddr(PromotedOperand, Depth) || | |||
4724 | // The total of the new cost is equal to the cost of the created | |||
4725 | // instructions. | |||
4726 | // The total of the old cost is equal to the cost of the extension plus | |||
4727 | // what we have saved in the addressing mode. | |||
4728 | !isPromotionProfitable(CreatedInstsCost, | |||
4729 | ExtCost + (AddrModeInsts.size() - OldSize), | |||
4730 | PromotedOperand)) { | |||
4731 | AddrMode = BackupAddrMode; | |||
4732 | AddrModeInsts.resize(OldSize); | |||
4733 | LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n")do { } while (false); | |||
4734 | TPT.rollback(LastKnownGood); | |||
4735 | return false; | |||
4736 | } | |||
4737 | return true; | |||
4738 | } | |||
4739 | } | |||
4740 | return false; | |||
4741 | } | |||
4742 | ||||
4743 | /// If we can, try to add the value of 'Addr' into the current addressing mode. | |||
4744 | /// If Addr can't be added to AddrMode this returns false and leaves AddrMode | |||
4745 | /// unmodified. This assumes that Addr is either a pointer type or intptr_t | |||
4746 | /// for the target. | |||
4747 | /// | |||
4748 | bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { | |||
4749 | // Start a transaction at this point that we will rollback if the matching | |||
4750 | // fails. | |||
4751 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4752 | TPT.getRestorationPoint(); | |||
4753 | if (ConstantInt *CI
| |||
4754 | if (CI->getValue().isSignedIntN(64)) { | |||
4755 | // Fold in immediates if legal for the target. | |||
4756 | AddrMode.BaseOffs += CI->getSExtValue(); | |||
4757 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4758 | return true; | |||
4759 | AddrMode.BaseOffs -= CI->getSExtValue(); | |||
4760 | } | |||
4761 | } else if (GlobalValue *GV
| |||
4762 | // If this is a global variable, try to fold it into the addressing mode. | |||
4763 | if (!AddrMode.BaseGV) { | |||
4764 | AddrMode.BaseGV = GV; | |||
4765 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4766 | return true; | |||
4767 | AddrMode.BaseGV = nullptr; | |||
4768 | } | |||
4769 | } else if (Instruction *I
| |||
4770 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4771 | unsigned OldSize = AddrModeInsts.size(); | |||
4772 | ||||
4773 | // Check to see if it is possible to fold this operation. | |||
4774 | bool MovedAway = false; | |||
4775 | if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { | |||
4776 | // This instruction may have been moved away. If so, there is nothing | |||
4777 | // to check here. | |||
4778 | if (MovedAway) | |||
4779 | return true; | |||
4780 | // Okay, it's possible to fold this. Check to see if it is actually | |||
4781 | // *profitable* to do so. We use a simple cost model to avoid increasing | |||
4782 | // register pressure too much. | |||
4783 | if (I->hasOneUse() || | |||
4784 | isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { | |||
4785 | AddrModeInsts.push_back(I); | |||
4786 | return true; | |||
4787 | } | |||
4788 | ||||
4789 | // It isn't profitable to do this, roll back. | |||
4790 | //cerr << "NOT FOLDING: " << *I; | |||
4791 | AddrMode = BackupAddrMode; | |||
4792 | AddrModeInsts.resize(OldSize); | |||
4793 | TPT.rollback(LastKnownGood); | |||
4794 | } | |||
4795 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { | |||
4796 | if (matchOperationAddr(CE, CE->getOpcode(), Depth)) | |||
4797 | return true; | |||
4798 | TPT.rollback(LastKnownGood); | |||
4799 | } else if (isa<ConstantPointerNull>(Addr)) { | |||
4800 | // Null pointer gets folded without affecting the addressing mode. | |||
4801 | return true; | |||
4802 | } | |||
4803 | ||||
4804 | // Worse case, the target should support [reg] addressing modes. :) | |||
4805 | if (!AddrMode.HasBaseReg) { | |||
4806 | AddrMode.HasBaseReg = true; | |||
4807 | AddrMode.BaseReg = Addr; | |||
4808 | // Still check for legality in case the target supports [imm] but not [i+r]. | |||
4809 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4810 | return true; | |||
4811 | AddrMode.HasBaseReg = false; | |||
4812 | AddrMode.BaseReg = nullptr; | |||
4813 | } | |||
4814 | ||||
4815 | // If the base register is already taken, see if we can do [r+r]. | |||
4816 | if (AddrMode.Scale == 0) { | |||
4817 | AddrMode.Scale = 1; | |||
4818 | AddrMode.ScaledReg = Addr; | |||
4819 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4820 | return true; | |||
4821 | AddrMode.Scale = 0; | |||
4822 | AddrMode.ScaledReg = nullptr; | |||
4823 | } | |||
4824 | // Couldn't match. | |||
4825 | TPT.rollback(LastKnownGood); | |||
4826 | return false; | |||
4827 | } | |||
4828 | ||||
4829 | /// Check to see if all uses of OpVal by the specified inline asm call are due | |||
4830 | /// to memory operands. If so, return true, otherwise return false. | |||
4831 | static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, | |||
4832 | const TargetLowering &TLI, | |||
4833 | const TargetRegisterInfo &TRI) { | |||
4834 | const Function *F = CI->getFunction(); | |||
4835 | TargetLowering::AsmOperandInfoVector TargetConstraints = | |||
4836 | TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI); | |||
4837 | ||||
4838 | for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { | |||
4839 | TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; | |||
4840 | ||||
4841 | // Compute the constraint code and ConstraintType to use. | |||
4842 | TLI.ComputeConstraintToUse(OpInfo, SDValue()); | |||
4843 | ||||
4844 | // If this asm operand is our Value*, and if it isn't an indirect memory | |||
4845 | // operand, we can't fold it! | |||
4846 | if (OpInfo.CallOperandVal == OpVal && | |||
4847 | (OpInfo.ConstraintType != TargetLowering::C_Memory || | |||
4848 | !OpInfo.isIndirect)) | |||
4849 | return false; | |||
4850 | } | |||
4851 | ||||
4852 | return true; | |||
4853 | } | |||
4854 | ||||
4855 | // Max number of memory uses to look at before aborting the search to conserve | |||
4856 | // compile time. | |||
4857 | static constexpr int MaxMemoryUsesToScan = 20; | |||
4858 | ||||
4859 | /// Recursively walk all the uses of I until we find a memory use. | |||
4860 | /// If we find an obviously non-foldable instruction, return true. | |||
4861 | /// Add the ultimately found memory instructions to MemoryUses. | |||
4862 | static bool FindAllMemoryUses( | |||
4863 | Instruction *I, | |||
4864 | SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, | |||
4865 | SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, | |||
4866 | const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, | |||
4867 | BlockFrequencyInfo *BFI, int SeenInsts = 0) { | |||
4868 | // If we already considered this instruction, we're done. | |||
4869 | if (!ConsideredInsts.insert(I).second) | |||
4870 | return false; | |||
4871 | ||||
4872 | // If this is an obviously unfoldable instruction, bail out. | |||
4873 | if (!MightBeFoldableInst(I)) | |||
4874 | return true; | |||
4875 | ||||
4876 | // Loop over all the uses, recursively processing them. | |||
4877 | for (Use &U : I->uses()) { | |||
4878 | // Conservatively return true if we're seeing a large number or a deep chain | |||
4879 | // of users. This avoids excessive compilation times in pathological cases. | |||
4880 | if (SeenInsts++ >= MaxMemoryUsesToScan) | |||
4881 | return true; | |||
4882 | ||||
4883 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
4884 | if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { | |||
4885 | MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); | |||
4886 | continue; | |||
4887 | } | |||
4888 | ||||
4889 | if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { | |||
4890 | unsigned opNo = U.getOperandNo(); | |||
4891 | if (opNo != StoreInst::getPointerOperandIndex()) | |||
4892 | return true; // Storing addr, not into addr. | |||
4893 | MemoryUses.push_back(std::make_pair(SI, opNo)); | |||
4894 | continue; | |||
4895 | } | |||
4896 | ||||
4897 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { | |||
4898 | unsigned opNo = U.getOperandNo(); | |||
4899 | if (opNo != AtomicRMWInst::getPointerOperandIndex()) | |||
4900 | return true; // Storing addr, not into addr. | |||
4901 | MemoryUses.push_back(std::make_pair(RMW, opNo)); | |||
4902 | continue; | |||
4903 | } | |||
4904 | ||||
4905 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { | |||
4906 | unsigned opNo = U.getOperandNo(); | |||
4907 | if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) | |||
4908 | return true; // Storing addr, not into addr. | |||
4909 | MemoryUses.push_back(std::make_pair(CmpX, opNo)); | |||
4910 | continue; | |||
4911 | } | |||
4912 | ||||
4913 | if (CallInst *CI = dyn_cast<CallInst>(UserI)) { | |||
4914 | if (CI->hasFnAttr(Attribute::Cold)) { | |||
4915 | // If this is a cold call, we can sink the addressing calculation into | |||
4916 | // the cold path. See optimizeCallInst | |||
4917 | bool OptForSize = OptSize || | |||
4918 | llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI); | |||
4919 | if (!OptForSize) | |||
4920 | continue; | |||
4921 | } | |||
4922 | ||||
4923 | InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand()); | |||
4924 | if (!IA) return true; | |||
4925 | ||||
4926 | // If this is a memory operand, we're cool, otherwise bail out. | |||
4927 | if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) | |||
4928 | return true; | |||
4929 | continue; | |||
4930 | } | |||
4931 | ||||
4932 | if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, | |||
4933 | PSI, BFI, SeenInsts)) | |||
4934 | return true; | |||
4935 | } | |||
4936 | ||||
4937 | return false; | |||
4938 | } | |||
4939 | ||||
4940 | /// Return true if Val is already known to be live at the use site that we're | |||
4941 | /// folding it into. If so, there is no cost to include it in the addressing | |||
4942 | /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the | |||
4943 | /// instruction already. | |||
4944 | bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, | |||
4945 | Value *KnownLive2) { | |||
4946 | // If Val is either of the known-live values, we know it is live! | |||
4947 | if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) | |||
4948 | return true; | |||
4949 | ||||
4950 | // All values other than instructions and arguments (e.g. constants) are live. | |||
4951 | if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; | |||
4952 | ||||
4953 | // If Val is a constant sized alloca in the entry block, it is live, this is | |||
4954 | // true because it is just a reference to the stack/frame pointer, which is | |||
4955 | // live for the whole function. | |||
4956 | if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) | |||
4957 | if (AI->isStaticAlloca()) | |||
4958 | return true; | |||
4959 | ||||
4960 | // Check to see if this value is already used in the memory instruction's | |||
4961 | // block. If so, it's already live into the block at the very least, so we | |||
4962 | // can reasonably fold it. | |||
4963 | return Val->isUsedInBasicBlock(MemoryInst->getParent()); | |||
4964 | } | |||
4965 | ||||
4966 | /// It is possible for the addressing mode of the machine to fold the specified | |||
4967 | /// instruction into a load or store that ultimately uses it. | |||
4968 | /// However, the specified instruction has multiple uses. | |||
4969 | /// Given this, it may actually increase register pressure to fold it | |||
4970 | /// into the load. For example, consider this code: | |||
4971 | /// | |||
4972 | /// X = ... | |||
4973 | /// Y = X+1 | |||
4974 | /// use(Y) -> nonload/store | |||
4975 | /// Z = Y+1 | |||
4976 | /// load Z | |||
4977 | /// | |||
4978 | /// In this case, Y has multiple uses, and can be folded into the load of Z | |||
4979 | /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to | |||
4980 | /// be live at the use(Y) line. If we don't fold Y into load Z, we use one | |||
4981 | /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the | |||
4982 | /// number of computations either. | |||
4983 | /// | |||
4984 | /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If | |||
4985 | /// X was live across 'load Z' for other reasons, we actually *would* want to | |||
4986 | /// fold the addressing mode in the Z case. This would make Y die earlier. | |||
4987 | bool AddressingModeMatcher:: | |||
4988 | isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, | |||
4989 | ExtAddrMode &AMAfter) { | |||
4990 | if (IgnoreProfitability) return true; | |||
4991 | ||||
4992 | // AMBefore is the addressing mode before this instruction was folded into it, | |||
4993 | // and AMAfter is the addressing mode after the instruction was folded. Get | |||
4994 | // the set of registers referenced by AMAfter and subtract out those | |||
4995 | // referenced by AMBefore: this is the set of values which folding in this | |||
4996 | // address extends the lifetime of. | |||
4997 | // | |||
4998 | // Note that there are only two potential values being referenced here, | |||
4999 | // BaseReg and ScaleReg (global addresses are always available, as are any | |||
5000 | // folded immediates). | |||
5001 | Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; | |||
5002 | ||||
5003 | // If the BaseReg or ScaledReg was referenced by the previous addrmode, their | |||
5004 | // lifetime wasn't extended by adding this instruction. | |||
5005 | if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
5006 | BaseReg = nullptr; | |||
5007 | if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
5008 | ScaledReg = nullptr; | |||
5009 | ||||
5010 | // If folding this instruction (and it's subexprs) didn't extend any live | |||
5011 | // ranges, we're ok with it. | |||
5012 | if (!BaseReg && !ScaledReg) | |||
5013 | return true; | |||
5014 | ||||
5015 | // If all uses of this instruction can have the address mode sunk into them, | |||
5016 | // we can remove the addressing mode and effectively trade one live register | |||
5017 | // for another (at worst.) In this context, folding an addressing mode into | |||
5018 | // the use is just a particularly nice way of sinking it. | |||
5019 | SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; | |||
5020 | SmallPtrSet<Instruction*, 16> ConsideredInsts; | |||
5021 | if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, | |||
5022 | PSI, BFI)) | |||
5023 | return false; // Has a non-memory, non-foldable use! | |||
5024 | ||||
5025 | // Now that we know that all uses of this instruction are part of a chain of | |||
5026 | // computation involving only operations that could theoretically be folded | |||
5027 | // into a memory use, loop over each of these memory operation uses and see | |||
5028 | // if they could *actually* fold the instruction. The assumption is that | |||
5029 | // addressing modes are cheap and that duplicating the computation involved | |||
5030 | // many times is worthwhile, even on a fastpath. For sinking candidates | |||
5031 | // (i.e. cold call sites), this serves as a way to prevent excessive code | |||
5032 | // growth since most architectures have some reasonable small and fast way to | |||
5033 | // compute an effective address. (i.e LEA on x86) | |||
5034 | SmallVector<Instruction*, 32> MatchedAddrModeInsts; | |||
5035 | for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { | |||
5036 | Instruction *User = MemoryUses[i].first; | |||
5037 | unsigned OpNo = MemoryUses[i].second; | |||
5038 | ||||
5039 | // Get the access type of this use. If the use isn't a pointer, we don't | |||
5040 | // know what it accesses. | |||
5041 | Value *Address = User->getOperand(OpNo); | |||
5042 | PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); | |||
5043 | if (!AddrTy) | |||
5044 | return false; | |||
5045 | Type *AddressAccessTy = AddrTy->getElementType(); | |||
5046 | unsigned AS = AddrTy->getAddressSpace(); | |||
5047 | ||||
5048 | // Do a match against the root of this address, ignoring profitability. This | |||
5049 | // will tell us if the addressing mode for the memory operation will | |||
5050 | // *actually* cover the shared instruction. | |||
5051 | ExtAddrMode Result; | |||
5052 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, | |||
5053 | 0); | |||
5054 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
5055 | TPT.getRestorationPoint(); | |||
5056 | AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn, | |||
5057 | AddressAccessTy, AS, MemoryInst, Result, | |||
5058 | InsertedInsts, PromotedInsts, TPT, | |||
5059 | LargeOffsetGEP, OptSize, PSI, BFI); | |||
5060 | Matcher.IgnoreProfitability = true; | |||
5061 | bool Success = Matcher.matchAddr(Address, 0); | |||
5062 | (void)Success; assert(Success && "Couldn't select *anything*?")((void)0); | |||
5063 | ||||
5064 | // The match was to check the profitability, the changes made are not | |||
5065 | // part of the original matcher. Therefore, they should be dropped | |||
5066 | // otherwise the original matcher will not present the right state. | |||
5067 | TPT.rollback(LastKnownGood); | |||
5068 | ||||
5069 | // If the match didn't cover I, then it won't be shared by it. | |||
5070 | if (!is_contained(MatchedAddrModeInsts, I)) | |||
5071 | return false; | |||
5072 | ||||
5073 | MatchedAddrModeInsts.clear(); | |||
5074 | } | |||
5075 | ||||
5076 | return true; | |||
5077 | } | |||
5078 | ||||
5079 | /// Return true if the specified values are defined in a | |||
5080 | /// different basic block than BB. | |||
5081 | static bool IsNonLocalValue(Value *V, BasicBlock *BB) { | |||
5082 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
5083 | return I->getParent() != BB; | |||
5084 | return false; | |||
5085 | } | |||
5086 | ||||
5087 | /// Sink addressing mode computation immediate before MemoryInst if doing so | |||
5088 | /// can be done without increasing register pressure. The need for the | |||
5089 | /// register pressure constraint means this can end up being an all or nothing | |||
5090 | /// decision for all uses of the same addressing computation. | |||
5091 | /// | |||
5092 | /// Load and Store Instructions often have addressing modes that can do | |||
5093 | /// significant amounts of computation. As such, instruction selection will try | |||
5094 | /// to get the load or store to do as much computation as possible for the | |||
5095 | /// program. The problem is that isel can only see within a single block. As | |||
5096 | /// such, we sink as much legal addressing mode work into the block as possible. | |||
5097 | /// | |||
5098 | /// This method is used to optimize both load/store and inline asms with memory | |||
5099 | /// operands. It's also used to sink addressing computations feeding into cold | |||
5100 | /// call sites into their (cold) basic block. | |||
5101 | /// | |||
5102 | /// The motivation for handling sinking into cold blocks is that doing so can | |||
5103 | /// both enable other address mode sinking (by satisfying the register pressure | |||
5104 | /// constraint above), and reduce register pressure globally (by removing the | |||
5105 | /// addressing mode computation from the fast path entirely.). | |||
5106 | bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, | |||
5107 | Type *AccessTy, unsigned AddrSpace) { | |||
5108 | Value *Repl = Addr; | |||
5109 | ||||
5110 | // Try to collapse single-value PHI nodes. This is necessary to undo | |||
5111 | // unprofitable PRE transformations. | |||
5112 | SmallVector<Value*, 8> worklist; | |||
5113 | SmallPtrSet<Value*, 16> Visited; | |||
5114 | worklist.push_back(Addr); | |||
5115 | ||||
5116 | // Use a worklist to iteratively look through PHI and select nodes, and | |||
5117 | // ensure that the addressing mode obtained from the non-PHI/select roots of | |||
5118 | // the graph are compatible. | |||
5119 | bool PhiOrSelectSeen = false; | |||
5120 | SmallVector<Instruction*, 16> AddrModeInsts; | |||
5121 | const SimplifyQuery SQ(*DL, TLInfo); | |||
5122 | AddressingModeCombiner AddrModes(SQ, Addr); | |||
5123 | TypePromotionTransaction TPT(RemovedInsts); | |||
5124 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
5125 | TPT.getRestorationPoint(); | |||
5126 | while (!worklist.empty()) { | |||
| ||||
5127 | Value *V = worklist.back(); | |||
5128 | worklist.pop_back(); | |||
5129 | ||||
5130 | // We allow traversing cyclic Phi nodes. | |||
5131 | // In case of success after this loop we ensure that traversing through | |||
5132 | // Phi nodes ends up with all cases to compute address of the form | |||
5133 | // BaseGV + Base + Scale * Index + Offset | |||
5134 | // where Scale and Offset are constans and BaseGV, Base and Index | |||
5135 | // are exactly the same Values in all cases. | |||
5136 | // It means that BaseGV, Scale and Offset dominate our memory instruction | |||
5137 | // and have the same value as they had in address computation represented | |||
5138 | // as Phi. So we can safely sink address computation to memory instruction. | |||
5139 | if (!Visited.insert(V).second) | |||
5140 | continue; | |||
5141 | ||||
5142 | // For a PHI node, push all of its incoming values. | |||
5143 | if (PHINode *P
| |||
5144 | append_range(worklist, P->incoming_values()); | |||
5145 | PhiOrSelectSeen = true; | |||
5146 | continue; | |||
5147 | } | |||
5148 | // Similar for select. | |||
5149 | if (SelectInst *SI
| |||
5150 | worklist.push_back(SI->getFalseValue()); | |||
5151 | worklist.push_back(SI->getTrueValue()); | |||
5152 | PhiOrSelectSeen = true; | |||
5153 | continue; | |||
5154 | } | |||
5155 | ||||
5156 | // For non-PHIs, determine the addressing mode being computed. Note that | |||
5157 | // the result may differ depending on what other uses our candidate | |||
5158 | // addressing instructions might have. | |||
5159 | AddrModeInsts.clear(); | |||
5160 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, | |||
5161 | 0); | |||
5162 | // Defer the query (and possible computation of) the dom tree to point of | |||
5163 | // actual use. It's expected that most address matches don't actually need | |||
5164 | // the domtree. | |||
5165 | auto getDTFn = [MemoryInst, this]() -> const DominatorTree & { | |||
5166 | Function *F = MemoryInst->getParent()->getParent(); | |||
5167 | return this->getDT(*F); | |||
5168 | }; | |||
5169 | ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( | |||
5170 | V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn, | |||
5171 | *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, | |||
5172 | BFI.get()); | |||
5173 | ||||
5174 | GetElementPtrInst *GEP = LargeOffsetGEP.first; | |||
5175 | if (GEP && !NewGEPBases.count(GEP)) { | |||
5176 | // If splitting the underlying data structure can reduce the offset of a | |||
5177 | // GEP, collect the GEP. Skip the GEPs that are the new bases of | |||
5178 | // previously split data structures. | |||
5179 | LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); | |||
5180 | if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end()) | |||
5181 | LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size(); | |||
5182 | } | |||
5183 | ||||
5184 | NewAddrMode.OriginalValue = V; | |||
5185 | if (!AddrModes.addNewAddrMode(NewAddrMode)) | |||
5186 | break; | |||
5187 | } | |||
5188 | ||||
5189 | // Try to combine the AddrModes we've collected. If we couldn't collect any, | |||
5190 | // or we have multiple but either couldn't combine them or combining them | |||
5191 | // wouldn't do anything useful, bail out now. | |||
5192 | if (!AddrModes.combineAddrModes()) { | |||
5193 | TPT.rollback(LastKnownGood); | |||
5194 | return false; | |||
5195 | } | |||
5196 | bool Modified = TPT.commit(); | |||
5197 | ||||
5198 | // Get the combined AddrMode (or the only AddrMode, if we only had one). | |||
5199 | ExtAddrMode AddrMode = AddrModes.getAddrMode(); | |||
5200 | ||||
5201 | // If all the instructions matched are already in this BB, don't do anything. | |||
5202 | // If we saw a Phi node then it is not local definitely, and if we saw a select | |||
5203 | // then we want to push the address calculation past it even if it's already | |||
5204 | // in this BB. | |||
5205 | if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { | |||
5206 | return IsNonLocalValue(V, MemoryInst->getParent()); | |||
5207 | })) { | |||
5208 | LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrModedo { } while (false) | |||
5209 | << "\n")do { } while (false); | |||
5210 | return Modified; | |||
5211 | } | |||
5212 | ||||
5213 | // Insert this computation right after this user. Since our caller is | |||
5214 | // scanning from the top of the BB to the bottom, reuse of the expr are | |||
5215 | // guaranteed to happen later. | |||
5216 | IRBuilder<> Builder(MemoryInst); | |||
5217 | ||||
5218 | // Now that we determined the addressing expression we want to use and know | |||
5219 | // that we have to sink it into this block. Check to see if we have already | |||
5220 | // done this for some other load/store instr in this block. If so, reuse | |||
5221 | // the computation. Before attempting reuse, check if the address is valid | |||
5222 | // as it may have been erased. | |||
5223 | ||||
5224 | WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; | |||
5225 | ||||
5226 | Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; | |||
5227 | if (SunkAddr) { | |||
5228 | LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrModedo { } while (false) | |||
5229 | << " for " << *MemoryInst << "\n")do { } while (false); | |||
5230 | if (SunkAddr->getType() != Addr->getType()) | |||
5231 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | |||
5232 | } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && | |||
5233 | SubtargetInfo->addrSinkUsingGEPs())) { | |||
5234 | // By default, we use the GEP-based method when AA is used later. This | |||
5235 | // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. | |||
5236 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrModedo { } while (false) | |||
5237 | << " for " << *MemoryInst << "\n")do { } while (false); | |||
5238 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | |||
5239 | Value *ResultPtr = nullptr, *ResultIndex = nullptr; | |||
5240 | ||||
5241 | // First, find the pointer. | |||
5242 | if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { | |||
5243 | ResultPtr = AddrMode.BaseReg; | |||
5244 | AddrMode.BaseReg = nullptr; | |||
5245 | } | |||
5246 | ||||
5247 | if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { | |||
5248 | // We can't add more than one pointer together, nor can we scale a | |||
5249 | // pointer (both of which seem meaningless). | |||
5250 | if (ResultPtr || AddrMode.Scale != 1) | |||
5251 | return Modified; | |||
5252 | ||||
5253 | ResultPtr = AddrMode.ScaledReg; | |||
5254 | AddrMode.Scale = 0; | |||
5255 | } | |||
5256 | ||||
5257 | // It is only safe to sign extend the BaseReg if we know that the math | |||
5258 | // required to create it did not overflow before we extend it. Since | |||
5259 | // the original IR value was tossed in favor of a constant back when | |||
5260 | // the AddrMode was created we need to bail out gracefully if widths | |||
5261 | // do not match instead of extending it. | |||
5262 | // | |||
5263 | // (See below for code to add the scale.) | |||
5264 | if (AddrMode.Scale) { | |||
5265 | Type *ScaledRegTy = AddrMode.ScaledReg->getType(); | |||
5266 | if (cast<IntegerType>(IntPtrTy)->getBitWidth() > | |||
5267 | cast<IntegerType>(ScaledRegTy)->getBitWidth()) | |||
5268 | return Modified; | |||
5269 | } | |||
5270 | ||||
5271 | if (AddrMode.BaseGV) { | |||
5272 | if (ResultPtr) | |||
5273 | return Modified; | |||
5274 | ||||
5275 | ResultPtr = AddrMode.BaseGV; | |||
5276 | } | |||
5277 | ||||
5278 | // If the real base value actually came from an inttoptr, then the matcher | |||
5279 | // will look through it and provide only the integer value. In that case, | |||
5280 | // use it here. | |||
5281 | if (!DL->isNonIntegralPointerType(Addr->getType())) { | |||
5282 | if (!ResultPtr && AddrMode.BaseReg) { | |||
5283 | ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), | |||
5284 | "sunkaddr"); | |||
5285 | AddrMode.BaseReg = nullptr; | |||
5286 | } else if (!ResultPtr && AddrMode.Scale == 1) { | |||
5287 | ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), | |||
5288 | "sunkaddr"); | |||
5289 | AddrMode.Scale = 0; | |||
5290 | } | |||
5291 | } | |||
5292 | ||||
5293 | if (!ResultPtr && | |||
5294 | !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { | |||
5295 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
5296 | } else if (!ResultPtr) { | |||
5297 | return Modified; | |||
5298 | } else { | |||
5299 | Type *I8PtrTy = | |||
5300 | Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); | |||
5301 | Type *I8Ty = Builder.getInt8Ty(); | |||
5302 | ||||
5303 | // Start with the base register. Do this first so that subsequent address | |||
5304 | // matching finds it last, which will prevent it from trying to match it | |||
5305 | // as the scaled value in case it happens to be a mul. That would be | |||
5306 | // problematic if we've sunk a different mul for the scale, because then | |||
5307 | // we'd end up sinking both muls. | |||
5308 | if (AddrMode.BaseReg) { | |||
5309 | Value *V = AddrMode.BaseReg; | |||
5310 | if (V->getType() != IntPtrTy) | |||
5311 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
5312 | ||||
5313 | ResultIndex = V; | |||
5314 | } | |||
5315 | ||||
5316 | // Add the scale value. | |||
5317 | if (AddrMode.Scale) { | |||
5318 | Value *V = AddrMode.ScaledReg; | |||
5319 | if (V->getType() == IntPtrTy) { | |||
5320 | // done. | |||
5321 | } else { | |||
5322 | assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <((void)0) | |||
5323 | cast<IntegerType>(V->getType())->getBitWidth() &&((void)0) | |||
5324 | "We can't transform if ScaledReg is too narrow")((void)0); | |||
5325 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
5326 | } | |||
5327 | ||||
5328 | if (AddrMode.Scale != 1) | |||
5329 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
5330 | "sunkaddr"); | |||
5331 | if (ResultIndex) | |||
5332 | ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); | |||
5333 | else | |||
5334 | ResultIndex = V; | |||
5335 | } | |||
5336 | ||||
5337 | // Add in the Base Offset if present. | |||
5338 | if (AddrMode.BaseOffs) { | |||
5339 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
5340 | if (ResultIndex) { | |||
5341 | // We need to add this separately from the scale above to help with | |||
5342 | // SDAG consecutive load/store merging. | |||
5343 | if (ResultPtr->getType() != I8PtrTy) | |||
5344 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | |||
5345 | ResultPtr = | |||
5346 | AddrMode.InBounds | |||
5347 | ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, | |||
5348 | "sunkaddr") | |||
5349 | : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); | |||
5350 | } | |||
5351 | ||||
5352 | ResultIndex = V; | |||
5353 | } | |||
5354 | ||||
5355 | if (!ResultIndex) { | |||
5356 | SunkAddr = ResultPtr; | |||
5357 | } else { | |||
5358 | if (ResultPtr->getType() != I8PtrTy) | |||
5359 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | |||
5360 | SunkAddr = | |||
5361 | AddrMode.InBounds | |||
5362 | ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, | |||
5363 | "sunkaddr") | |||
5364 | : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); | |||
5365 | } | |||
5366 | ||||
5367 | if (SunkAddr->getType() != Addr->getType()) | |||
5368 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | |||
5369 | } | |||
5370 | } else { | |||
5371 | // We'd require a ptrtoint/inttoptr down the line, which we can't do for | |||
5372 | // non-integral pointers, so in that case bail out now. | |||
5373 | Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; | |||
5374 | Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; | |||
5375 | PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); | |||
5376 | PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); | |||
5377 | if (DL->isNonIntegralPointerType(Addr->getType()) || | |||
5378 | (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || | |||
5379 | (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || | |||
5380 | (AddrMode.BaseGV && | |||
5381 | DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) | |||
5382 | return Modified; | |||
5383 | ||||
5384 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrModedo { } while (false) | |||
5385 | << " for " << *MemoryInst << "\n")do { } while (false); | |||
5386 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | |||
5387 | Value *Result = nullptr; | |||
5388 | ||||
5389 | // Start with the base register. Do this first so that subsequent address | |||
5390 | // matching finds it last, which will prevent it from trying to match it | |||
5391 | // as the scaled value in case it happens to be a mul. That would be | |||
5392 | // problematic if we've sunk a different mul for the scale, because then | |||
5393 | // we'd end up sinking both muls. | |||
5394 | if (AddrMode.BaseReg) { | |||
5395 | Value *V = AddrMode.BaseReg; | |||
5396 | if (V->getType()->isPointerTy()) | |||
5397 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
5398 | if (V->getType() != IntPtrTy) | |||
5399 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
5400 | Result = V; | |||
5401 | } | |||
5402 | ||||
5403 | // Add the scale value. | |||
5404 | if (AddrMode.Scale) { | |||
5405 | Value *V = AddrMode.ScaledReg; | |||
5406 | if (V->getType() == IntPtrTy) { | |||
5407 | // done. | |||
5408 | } else if (V->getType()->isPointerTy()) { | |||
5409 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
5410 | } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < | |||
5411 | cast<IntegerType>(V->getType())->getBitWidth()) { | |||
5412 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
5413 | } else { | |||
5414 | // It is only safe to sign extend the BaseReg if we know that the math | |||
5415 | // required to create it did not overflow before we extend it. Since | |||
5416 | // the original IR value was tossed in favor of a constant back when | |||
5417 | // the AddrMode was created we need to bail out gracefully if widths | |||
5418 | // do not match instead of extending it. | |||
5419 | Instruction *I = dyn_cast_or_null<Instruction>(Result); | |||
5420 | if (I && (Result != AddrMode.BaseReg)) | |||
5421 | I->eraseFromParent(); | |||
5422 | return Modified; | |||
5423 | } | |||
5424 | if (AddrMode.Scale != 1) | |||
5425 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
5426 | "sunkaddr"); | |||
5427 | if (Result) | |||
5428 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
5429 | else | |||
5430 | Result = V; | |||
5431 | } | |||
5432 | ||||
5433 | // Add in the BaseGV if present. | |||
5434 | if (AddrMode.BaseGV) { | |||
5435 | Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); | |||
5436 | if (Result) | |||
5437 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
5438 | else | |||
5439 | Result = V; | |||
5440 | } | |||
5441 | ||||
5442 | // Add in the Base Offset if present. | |||
5443 | if (AddrMode.BaseOffs) { | |||
5444 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
5445 | if (Result) | |||
5446 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
5447 | else | |||
5448 | Result = V; | |||
5449 | } | |||
5450 | ||||
5451 | if (!Result) | |||
5452 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
5453 | else | |||
5454 | SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); | |||
5455 | } | |||
5456 | ||||
5457 | MemoryInst->replaceUsesOfWith(Repl, SunkAddr); | |||
5458 | // Store the newly computed address into the cache. In the case we reused a | |||
5459 | // value, this should be idempotent. | |||
5460 | SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); | |||
5461 | ||||
5462 | // If we have no uses, recursively delete the value and all dead instructions | |||
5463 | // using it. | |||
5464 | if (Repl->use_empty()) { | |||
5465 | resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() { | |||
5466 | RecursivelyDeleteTriviallyDeadInstructions( | |||
5467 | Repl, TLInfo, nullptr, | |||
5468 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | |||
5469 | }); | |||
5470 | } | |||
5471 | ++NumMemoryInsts; | |||
5472 | return true; | |||
5473 | } | |||
5474 | ||||
5475 | /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find | |||
5476 | /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can | |||
5477 | /// only handle a 2 operand GEP in the same basic block or a splat constant | |||
5478 | /// vector. The 2 operands to the GEP must have a scalar pointer and a vector | |||
5479 | /// index. | |||
5480 | /// | |||
5481 | /// If the existing GEP has a vector base pointer that is splat, we can look | |||
5482 | /// through the splat to find the scalar pointer. If we can't find a scalar | |||
5483 | /// pointer there's nothing we can do. | |||
5484 | /// | |||
5485 | /// If we have a GEP with more than 2 indices where the middle indices are all | |||
5486 | /// zeroes, we can replace it with 2 GEPs where the second has 2 operands. | |||
5487 | /// | |||
5488 | /// If the final index isn't a vector or is a splat, we can emit a scalar GEP | |||
5489 | /// followed by a GEP with an all zeroes vector index. This will enable | |||
5490 | /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a | |||
5491 | /// zero index. | |||
5492 | bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst, | |||
5493 | Value *Ptr) { | |||
5494 | Value *NewAddr; | |||
5495 | ||||
5496 | if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { | |||
5497 | // Don't optimize GEPs that don't have indices. | |||
5498 | if (!GEP->hasIndices()) | |||
5499 | return false; | |||
5500 | ||||
5501 | // If the GEP and the gather/scatter aren't in the same BB, don't optimize. | |||
5502 | // FIXME: We should support this by sinking the GEP. | |||
5503 | if (MemoryInst->getParent() != GEP->getParent()) | |||
5504 | return false; | |||
5505 | ||||
5506 | SmallVector<Value *, 2> Ops(GEP->operands()); | |||
5507 | ||||
5508 | bool RewriteGEP = false; | |||
5509 | ||||
5510 | if (Ops[0]->getType()->isVectorTy()) { | |||
5511 | Ops[0] = getSplatValue(Ops[0]); | |||
5512 | if (!Ops[0]) | |||
5513 | return false; | |||
5514 | RewriteGEP = true; | |||
5515 | } | |||
5516 | ||||
5517 | unsigned FinalIndex = Ops.size() - 1; | |||
5518 | ||||
5519 | // Ensure all but the last index is 0. | |||
5520 | // FIXME: This isn't strictly required. All that's required is that they are | |||
5521 | // all scalars or splats. | |||
5522 | for (unsigned i = 1; i < FinalIndex; ++i) { | |||
5523 | auto *C = dyn_cast<Constant>(Ops[i]); | |||
5524 | if (!C) | |||
5525 | return false; | |||
5526 | if (isa<VectorType>(C->getType())) | |||
5527 | C = C->getSplatValue(); | |||
5528 | auto *CI = dyn_cast_or_null<ConstantInt>(C); | |||
5529 | if (!CI || !CI->isZero()) | |||
5530 | return false; | |||
5531 | // Scalarize the index if needed. | |||
5532 | Ops[i] = CI; | |||
5533 | } | |||
5534 | ||||
5535 | // Try to scalarize the final index. | |||
5536 | if (Ops[FinalIndex]->getType()->isVectorTy()) { | |||
5537 | if (Value *V = getSplatValue(Ops[FinalIndex])) { | |||
5538 | auto *C = dyn_cast<ConstantInt>(V); | |||
5539 | // Don't scalarize all zeros vector. | |||
5540 | if (!C || !C->isZero()) { | |||
5541 | Ops[FinalIndex] = V; | |||
5542 | RewriteGEP = true; | |||
5543 | } | |||
5544 | } | |||
5545 | } | |||
5546 | ||||
5547 | // If we made any changes or the we have extra operands, we need to generate | |||
5548 | // new instructions. | |||
5549 | if (!RewriteGEP && Ops.size() == 2) | |||
5550 | return false; | |||
5551 | ||||
5552 | auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); | |||
5553 | ||||
5554 | IRBuilder<> Builder(MemoryInst); | |||
5555 | ||||
5556 | Type *SourceTy = GEP->getSourceElementType(); | |||
5557 | Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType()); | |||
5558 | ||||
5559 | // If the final index isn't a vector, emit a scalar GEP containing all ops | |||
5560 | // and a vector GEP with all zeroes final index. | |||
5561 | if (!Ops[FinalIndex]->getType()->isVectorTy()) { | |||
5562 | NewAddr = Builder.CreateGEP(SourceTy, Ops[0], | |||
5563 | makeArrayRef(Ops).drop_front()); | |||
5564 | auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); | |||
5565 | auto *SecondTy = GetElementPtrInst::getIndexedType( | |||
5566 | SourceTy, makeArrayRef(Ops).drop_front()); | |||
5567 | NewAddr = | |||
5568 | Builder.CreateGEP(SecondTy, NewAddr, Constant::getNullValue(IndexTy)); | |||
5569 | } else { | |||
5570 | Value *Base = Ops[0]; | |||
5571 | Value *Index = Ops[FinalIndex]; | |||
5572 | ||||
5573 | // Create a scalar GEP if there are more than 2 operands. | |||
5574 | if (Ops.size() != 2) { | |||
5575 | // Replace the last index with 0. | |||
5576 | Ops[FinalIndex] = Constant::getNullValue(ScalarIndexTy); | |||
5577 | Base = Builder.CreateGEP(SourceTy, Base, | |||
5578 | makeArrayRef(Ops).drop_front()); | |||
5579 | SourceTy = GetElementPtrInst::getIndexedType( | |||
5580 | SourceTy, makeArrayRef(Ops).drop_front()); | |||
5581 | } | |||
5582 | ||||
5583 | // Now create the GEP with scalar pointer and vector index. | |||
5584 | NewAddr = Builder.CreateGEP(SourceTy, Base, Index); | |||
5585 | } | |||
5586 | } else if (!isa<Constant>(Ptr)) { | |||
5587 | // Not a GEP, maybe its a splat and we can create a GEP to enable | |||
5588 | // SelectionDAGBuilder to use it as a uniform base. | |||
5589 | Value *V = getSplatValue(Ptr); | |||
5590 | if (!V) | |||
5591 | return false; | |||
5592 | ||||
5593 | auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); | |||
5594 | ||||
5595 | IRBuilder<> Builder(MemoryInst); | |||
5596 | ||||
5597 | // Emit a vector GEP with a scalar pointer and all 0s vector index. | |||
5598 | Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType()); | |||
5599 | auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); | |||
5600 | Type *ScalarTy; | |||
5601 | if (cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() == | |||
5602 | Intrinsic::masked_gather) { | |||
5603 | ScalarTy = MemoryInst->getType()->getScalarType(); | |||
5604 | } else { | |||
5605 | assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==((void)0) | |||
5606 | Intrinsic::masked_scatter)((void)0); | |||
5607 | ScalarTy = MemoryInst->getOperand(0)->getType()->getScalarType(); | |||
5608 | } | |||
5609 | NewAddr = Builder.CreateGEP(ScalarTy, V, Constant::getNullValue(IndexTy)); | |||
5610 | } else { | |||
5611 | // Constant, SelectionDAGBuilder knows to check if its a splat. | |||
5612 | return false; | |||
5613 | } | |||
5614 | ||||
5615 | MemoryInst->replaceUsesOfWith(Ptr, NewAddr); | |||
5616 | ||||
5617 | // If we have no uses, recursively delete the value and all dead instructions | |||
5618 | // using it. | |||
5619 | if (Ptr->use_empty()) | |||
5620 | RecursivelyDeleteTriviallyDeadInstructions( | |||
5621 | Ptr, TLInfo, nullptr, | |||
5622 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | |||
5623 | ||||
5624 | return true; | |||
5625 | } | |||
5626 | ||||
5627 | /// If there are any memory operands, use OptimizeMemoryInst to sink their | |||
5628 | /// address computing into the block when possible / profitable. | |||
5629 | bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { | |||
5630 | bool MadeChange = false; | |||
5631 | ||||
5632 | const TargetRegisterInfo *TRI = | |||
5633 | TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); | |||
5634 | TargetLowering::AsmOperandInfoVector TargetConstraints = | |||
5635 | TLI->ParseConstraints(*DL, TRI, *CS); | |||
5636 | unsigned ArgNo = 0; | |||
5637 | for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { | |||
5638 | TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; | |||
5639 | ||||
5640 | // Compute the constraint code and ConstraintType to use. | |||
5641 | TLI->ComputeConstraintToUse(OpInfo, SDValue()); | |||
5642 | ||||
5643 | if (OpInfo.ConstraintType == TargetLowering::C_Memory && | |||
5644 | OpInfo.isIndirect) { | |||
5645 | Value *OpVal = CS->getArgOperand(ArgNo++); | |||
5646 | MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); | |||
5647 | } else if (OpInfo.Type == InlineAsm::isInput) | |||
5648 | ArgNo++; | |||
5649 | } | |||
5650 | ||||
5651 | return MadeChange; | |||
5652 | } | |||
5653 | ||||
5654 | /// Check if all the uses of \p Val are equivalent (or free) zero or | |||
5655 | /// sign extensions. | |||
5656 | static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { | |||
5657 | assert(!Val->use_empty() && "Input must have at least one use")((void)0); | |||
5658 | const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); | |||
5659 | bool IsSExt = isa<SExtInst>(FirstUser); | |||
5660 | Type *ExtTy = FirstUser->getType(); | |||
5661 | for (const User *U : Val->users()) { | |||
5662 | const Instruction *UI = cast<Instruction>(U); | |||
5663 | if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) | |||
5664 | return false; | |||
5665 | Type *CurTy = UI->getType(); | |||
5666 | // Same input and output types: Same instruction after CSE. | |||
5667 | if (CurTy == ExtTy) | |||
5668 | continue; | |||
5669 | ||||
5670 | // If IsSExt is true, we are in this situation: | |||
5671 | // a = Val | |||
5672 | // b = sext ty1 a to ty2 | |||
5673 | // c = sext ty1 a to ty3 | |||
5674 | // Assuming ty2 is shorter than ty3, this could be turned into: | |||
5675 | // a = Val | |||
5676 | // b = sext ty1 a to ty2 | |||
5677 | // c = sext ty2 b to ty3 | |||
5678 | // However, the last sext is not free. | |||
5679 | if (IsSExt) | |||
5680 | return false; | |||
5681 | ||||
5682 | // This is a ZExt, maybe this is free to extend from one type to another. | |||
5683 | // In that case, we would not account for a different use. | |||
5684 | Type *NarrowTy; | |||
5685 | Type *LargeTy; | |||
5686 | if (ExtTy->getScalarType()->getIntegerBitWidth() > | |||
5687 | CurTy->getScalarType()->getIntegerBitWidth()) { | |||
5688 | NarrowTy = CurTy; | |||
5689 | LargeTy = ExtTy; | |||
5690 | } else { | |||
5691 | NarrowTy = ExtTy; | |||
5692 | LargeTy = CurTy; | |||
5693 | } | |||
5694 | ||||
5695 | if (!TLI.isZExtFree(NarrowTy, LargeTy)) | |||
5696 | return false; | |||
5697 | } | |||
5698 | // All uses are the same or can be derived from one another for free. | |||
5699 | return true; | |||
5700 | } | |||
5701 | ||||
5702 | /// Try to speculatively promote extensions in \p Exts and continue | |||
5703 | /// promoting through newly promoted operands recursively as far as doing so is | |||
5704 | /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. | |||
5705 | /// When some promotion happened, \p TPT contains the proper state to revert | |||
5706 | /// them. | |||
5707 | /// | |||
5708 | /// \return true if some promotion happened, false otherwise. | |||
5709 | bool CodeGenPrepare::tryToPromoteExts( | |||
5710 | TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, | |||
5711 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | |||
5712 | unsigned CreatedInstsCost) { | |||
5713 | bool Promoted = false; | |||
5714 | ||||
5715 | // Iterate over all the extensions to try to promote them. | |||
5716 | for (auto *I : Exts) { | |||
5717 | // Early check if we directly have ext(load). | |||
5718 | if (isa<LoadInst>(I->getOperand(0))) { | |||
5719 | ProfitablyMovedExts.push_back(I); | |||
5720 | continue; | |||
5721 | } | |||
5722 | ||||
5723 | // Check whether or not we want to do any promotion. The reason we have | |||
5724 | // this check inside the for loop is to catch the case where an extension | |||
5725 | // is directly fed by a load because in such case the extension can be moved | |||
5726 | // up without any promotion on its operands. | |||
5727 | if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion) | |||
5728 | return false; | |||
5729 | ||||
5730 | // Get the action to perform the promotion. | |||
5731 | TypePromotionHelper::Action TPH = | |||
5732 | TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); | |||
5733 | // Check if we can promote. | |||
5734 | if (!TPH) { | |||
5735 | // Save the current extension as we cannot move up through its operand. | |||
5736 | ProfitablyMovedExts.push_back(I); | |||
5737 | continue; | |||
5738 | } | |||
5739 | ||||
5740 | // Save the current state. | |||
5741 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
5742 | TPT.getRestorationPoint(); | |||
5743 | SmallVector<Instruction *, 4> NewExts; | |||
5744 | unsigned NewCreatedInstsCost = 0; | |||
5745 | unsigned ExtCost = !TLI->isExtFree(I); | |||
5746 | // Promote. | |||
5747 | Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, | |||
5748 | &NewExts, nullptr, *TLI); | |||
5749 | assert(PromotedVal &&((void)0) | |||
5750 | "TypePromotionHelper should have filtered out those cases")((void)0); | |||
5751 | ||||
5752 | // We would be able to merge only one extension in a load. | |||
5753 | // Therefore, if we have more than 1 new extension we heuristically | |||
5754 | // cut this search path, because it means we degrade the code quality. | |||
5755 | // With exactly 2, the transformation is neutral, because we will merge | |||
5756 | // one extension but leave one. However, we optimistically keep going, | |||
5757 | // because the new extension may be removed too. | |||
5758 | long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; | |||
5759 | // FIXME: It would be possible to propagate a negative value instead of | |||
5760 | // conservatively ceiling it to 0. | |||
5761 | TotalCreatedInstsCost = | |||
5762 | std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); | |||
5763 | if (!StressExtLdPromotion && | |||
5764 | (TotalCreatedInstsCost > 1 || | |||
5765 | !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { | |||
5766 | // This promotion is not profitable, rollback to the previous state, and | |||
5767 | // save the current extension in ProfitablyMovedExts as the latest | |||
5768 | // speculative promotion turned out to be unprofitable. | |||
5769 | TPT.rollback(LastKnownGood); | |||
5770 | ProfitablyMovedExts.push_back(I); | |||
5771 | continue; | |||
5772 | } | |||
5773 | // Continue promoting NewExts as far as doing so is profitable. | |||
5774 | SmallVector<Instruction *, 2> NewlyMovedExts; | |||
5775 | (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); | |||
5776 | bool NewPromoted = false; | |||
5777 | for (auto *ExtInst : NewlyMovedExts) { | |||
5778 | Instruction *MovedExt = cast<Instruction>(ExtInst); | |||
5779 | Value *ExtOperand = MovedExt->getOperand(0); | |||
5780 | // If we have reached to a load, we need this extra profitability check | |||
5781 | // as it could potentially be merged into an ext(load). | |||
5782 | if (isa<LoadInst>(ExtOperand) && | |||
5783 | !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || | |||
5784 | (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) | |||
5785 | continue; | |||
5786 | ||||
5787 | ProfitablyMovedExts.push_back(MovedExt); | |||
5788 | NewPromoted = true; | |||
5789 | } | |||
5790 | ||||
5791 | // If none of speculative promotions for NewExts is profitable, rollback | |||
5792 | // and save the current extension (I) as the last profitable extension. | |||
5793 | if (!NewPromoted) { | |||
5794 | TPT.rollback(LastKnownGood); | |||
5795 | ProfitablyMovedExts.push_back(I); | |||
5796 | continue; | |||
5797 | } | |||
5798 | // The promotion is profitable. | |||
5799 | Promoted = true; | |||
5800 | } | |||
5801 | return Promoted; | |||
5802 | } | |||
5803 | ||||
5804 | /// Merging redundant sexts when one is dominating the other. | |||
5805 | bool CodeGenPrepare::mergeSExts(Function &F) { | |||
5806 | bool Changed = false; | |||
5807 | for (auto &Entry : ValToSExtendedUses) { | |||
5808 | SExts &Insts = Entry.second; | |||
5809 | SExts CurPts; | |||
5810 | for (Instruction *Inst : Insts) { | |||
5811 | if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || | |||
5812 | Inst->getOperand(0) != Entry.first) | |||
5813 | continue; | |||
5814 | bool inserted = false; | |||
5815 | for (auto &Pt : CurPts) { | |||
5816 | if (getDT(F).dominates(Inst, Pt)) { | |||
5817 | Pt->replaceAllUsesWith(Inst); | |||
5818 | RemovedInsts.insert(Pt); | |||
5819 | Pt->removeFromParent(); | |||
5820 | Pt = Inst; | |||
5821 | inserted = true; | |||
5822 | Changed = true; | |||
5823 | break; | |||
5824 | } | |||
5825 | if (!getDT(F).dominates(Pt, Inst)) | |||
5826 | // Give up if we need to merge in a common dominator as the | |||
5827 | // experiments show it is not profitable. | |||
5828 | continue; | |||
5829 | Inst->replaceAllUsesWith(Pt); | |||
5830 | RemovedInsts.insert(Inst); | |||
5831 | Inst->removeFromParent(); | |||
5832 | inserted = true; | |||
5833 | Changed = true; | |||
5834 | break; | |||
5835 | } | |||
5836 | if (!inserted) | |||
5837 | CurPts.push_back(Inst); | |||
5838 | } | |||
5839 | } | |||
5840 | return Changed; | |||
5841 | } | |||
5842 | ||||
5843 | // Splitting large data structures so that the GEPs accessing them can have | |||
5844 | // smaller offsets so that they can be sunk to the same blocks as their users. | |||
5845 | // For example, a large struct starting from %base is split into two parts | |||
5846 | // where the second part starts from %new_base. | |||
5847 | // | |||
5848 | // Before: | |||
5849 | // BB0: | |||
5850 | // %base = | |||
5851 | // | |||
5852 | // BB1: | |||
5853 | // %gep0 = gep %base, off0 | |||
5854 | // %gep1 = gep %base, off1 | |||
5855 | // %gep2 = gep %base, off2 | |||
5856 | // | |||
5857 | // BB2: | |||
5858 | // %load1 = load %gep0 | |||
5859 | // %load2 = load %gep1 | |||
5860 | // %load3 = load %gep2 | |||
5861 | // | |||
5862 | // After: | |||
5863 | // BB0: | |||
5864 | // %base = | |||
5865 | // %new_base = gep %base, off0 | |||
5866 | // | |||
5867 | // BB1: | |||
5868 | // %new_gep0 = %new_base | |||
5869 | // %new_gep1 = gep %new_base, off1 - off0 | |||
5870 | // %new_gep2 = gep %new_base, off2 - off0 | |||
5871 | // | |||
5872 | // BB2: | |||
5873 | // %load1 = load i32, i32* %new_gep0 | |||
5874 | // %load2 = load i32, i32* %new_gep1 | |||
5875 | // %load3 = load i32, i32* %new_gep2 | |||
5876 | // | |||
5877 | // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because | |||
5878 | // their offsets are smaller enough to fit into the addressing mode. | |||
5879 | bool CodeGenPrepare::splitLargeGEPOffsets() { | |||
5880 | bool Changed = false; | |||
5881 | for (auto &Entry : LargeOffsetGEPMap) { | |||
5882 | Value *OldBase = Entry.first; | |||
5883 | SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> | |||
5884 | &LargeOffsetGEPs = Entry.second; | |||
5885 | auto compareGEPOffset = | |||
5886 | [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, | |||
5887 | const std::pair<GetElementPtrInst *, int64_t> &RHS) { | |||
5888 | if (LHS.first == RHS.first) | |||
5889 | return false; | |||
5890 | if (LHS.second != RHS.second) | |||
5891 | return LHS.second < RHS.second; | |||
5892 | return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; | |||
5893 | }; | |||
5894 | // Sorting all the GEPs of the same data structures based on the offsets. | |||
5895 | llvm::sort(LargeOffsetGEPs, compareGEPOffset); | |||