File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/BranchFolding.cpp |
Warning: | line 1243, column 39 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- BranchFolding.cpp - Fold machine code branch instructions ----------===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // This pass forwards branches to unconditional branches to make them branch | ||||||||
10 | // directly to the target block. This pass often results in dead MBB's, which | ||||||||
11 | // it then removes. | ||||||||
12 | // | ||||||||
13 | // Note that this pass must be run after register allocation, it cannot handle | ||||||||
14 | // SSA form. It also must handle virtual registers for targets that emit virtual | ||||||||
15 | // ISA (e.g. NVPTX). | ||||||||
16 | // | ||||||||
17 | //===----------------------------------------------------------------------===// | ||||||||
18 | |||||||||
19 | #include "BranchFolding.h" | ||||||||
20 | #include "llvm/ADT/BitVector.h" | ||||||||
21 | #include "llvm/ADT/STLExtras.h" | ||||||||
22 | #include "llvm/ADT/SmallSet.h" | ||||||||
23 | #include "llvm/ADT/SmallVector.h" | ||||||||
24 | #include "llvm/ADT/Statistic.h" | ||||||||
25 | #include "llvm/Analysis/ProfileSummaryInfo.h" | ||||||||
26 | #include "llvm/CodeGen/Analysis.h" | ||||||||
27 | #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" | ||||||||
28 | #include "llvm/CodeGen/MachineBranchProbabilityInfo.h" | ||||||||
29 | #include "llvm/CodeGen/MachineFunction.h" | ||||||||
30 | #include "llvm/CodeGen/MachineFunctionPass.h" | ||||||||
31 | #include "llvm/CodeGen/MachineInstr.h" | ||||||||
32 | #include "llvm/CodeGen/MachineInstrBuilder.h" | ||||||||
33 | #include "llvm/CodeGen/MachineJumpTableInfo.h" | ||||||||
34 | #include "llvm/CodeGen/MachineLoopInfo.h" | ||||||||
35 | #include "llvm/CodeGen/MachineModuleInfo.h" | ||||||||
36 | #include "llvm/CodeGen/MachineOperand.h" | ||||||||
37 | #include "llvm/CodeGen/MachineRegisterInfo.h" | ||||||||
38 | #include "llvm/CodeGen/MachineSizeOpts.h" | ||||||||
39 | #include "llvm/CodeGen/MBFIWrapper.h" | ||||||||
40 | #include "llvm/CodeGen/TargetInstrInfo.h" | ||||||||
41 | #include "llvm/CodeGen/TargetOpcodes.h" | ||||||||
42 | #include "llvm/CodeGen/TargetPassConfig.h" | ||||||||
43 | #include "llvm/CodeGen/TargetRegisterInfo.h" | ||||||||
44 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | ||||||||
45 | #include "llvm/IR/DebugInfoMetadata.h" | ||||||||
46 | #include "llvm/IR/DebugLoc.h" | ||||||||
47 | #include "llvm/IR/Function.h" | ||||||||
48 | #include "llvm/InitializePasses.h" | ||||||||
49 | #include "llvm/MC/LaneBitmask.h" | ||||||||
50 | #include "llvm/MC/MCRegisterInfo.h" | ||||||||
51 | #include "llvm/Pass.h" | ||||||||
52 | #include "llvm/Support/BlockFrequency.h" | ||||||||
53 | #include "llvm/Support/BranchProbability.h" | ||||||||
54 | #include "llvm/Support/CommandLine.h" | ||||||||
55 | #include "llvm/Support/Debug.h" | ||||||||
56 | #include "llvm/Support/ErrorHandling.h" | ||||||||
57 | #include "llvm/Support/raw_ostream.h" | ||||||||
58 | #include "llvm/Target/TargetMachine.h" | ||||||||
59 | #include <cassert> | ||||||||
60 | #include <cstddef> | ||||||||
61 | #include <iterator> | ||||||||
62 | #include <numeric> | ||||||||
63 | |||||||||
64 | using namespace llvm; | ||||||||
65 | |||||||||
66 | #define DEBUG_TYPE"branch-folder" "branch-folder" | ||||||||
67 | |||||||||
68 | STATISTIC(NumDeadBlocks, "Number of dead blocks removed")static llvm::Statistic NumDeadBlocks = {"branch-folder", "NumDeadBlocks" , "Number of dead blocks removed"}; | ||||||||
69 | STATISTIC(NumBranchOpts, "Number of branches optimized")static llvm::Statistic NumBranchOpts = {"branch-folder", "NumBranchOpts" , "Number of branches optimized"}; | ||||||||
70 | STATISTIC(NumTailMerge , "Number of block tails merged")static llvm::Statistic NumTailMerge = {"branch-folder", "NumTailMerge" , "Number of block tails merged"}; | ||||||||
71 | STATISTIC(NumHoist , "Number of times common instructions are hoisted")static llvm::Statistic NumHoist = {"branch-folder", "NumHoist" , "Number of times common instructions are hoisted"}; | ||||||||
72 | STATISTIC(NumTailCalls, "Number of tail calls optimized")static llvm::Statistic NumTailCalls = {"branch-folder", "NumTailCalls" , "Number of tail calls optimized"}; | ||||||||
73 | |||||||||
74 | static cl::opt<cl::boolOrDefault> FlagEnableTailMerge("enable-tail-merge", | ||||||||
75 | cl::init(cl::BOU_UNSET), cl::Hidden); | ||||||||
76 | |||||||||
77 | // Throttle for huge numbers of predecessors (compile speed problems) | ||||||||
78 | static cl::opt<unsigned> | ||||||||
79 | TailMergeThreshold("tail-merge-threshold", | ||||||||
80 | cl::desc("Max number of predecessors to consider tail merging"), | ||||||||
81 | cl::init(150), cl::Hidden); | ||||||||
82 | |||||||||
83 | // Heuristic for tail merging (and, inversely, tail duplication). | ||||||||
84 | // TODO: This should be replaced with a target query. | ||||||||
85 | static cl::opt<unsigned> | ||||||||
86 | TailMergeSize("tail-merge-size", | ||||||||
87 | cl::desc("Min number of instructions to consider tail merging"), | ||||||||
88 | cl::init(3), cl::Hidden); | ||||||||
89 | |||||||||
90 | namespace { | ||||||||
91 | |||||||||
92 | /// BranchFolderPass - Wrap branch folder in a machine function pass. | ||||||||
93 | class BranchFolderPass : public MachineFunctionPass { | ||||||||
94 | public: | ||||||||
95 | static char ID; | ||||||||
96 | |||||||||
97 | explicit BranchFolderPass(): MachineFunctionPass(ID) {} | ||||||||
98 | |||||||||
99 | bool runOnMachineFunction(MachineFunction &MF) override; | ||||||||
100 | |||||||||
101 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||||||
102 | AU.addRequired<MachineBlockFrequencyInfo>(); | ||||||||
103 | AU.addRequired<MachineBranchProbabilityInfo>(); | ||||||||
104 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); | ||||||||
105 | AU.addRequired<TargetPassConfig>(); | ||||||||
106 | MachineFunctionPass::getAnalysisUsage(AU); | ||||||||
107 | } | ||||||||
108 | }; | ||||||||
109 | |||||||||
110 | } // end anonymous namespace | ||||||||
111 | |||||||||
112 | char BranchFolderPass::ID = 0; | ||||||||
113 | |||||||||
114 | char &llvm::BranchFolderPassID = BranchFolderPass::ID; | ||||||||
115 | |||||||||
116 | INITIALIZE_PASS(BranchFolderPass, DEBUG_TYPE,static void *initializeBranchFolderPassPassOnce(PassRegistry & Registry) { PassInfo *PI = new PassInfo( "Control Flow Optimizer" , "branch-folder", &BranchFolderPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<BranchFolderPass>), false, false); Registry .registerPass(*PI, true); return PI; } static llvm::once_flag InitializeBranchFolderPassPassFlag; void llvm::initializeBranchFolderPassPass (PassRegistry &Registry) { llvm::call_once(InitializeBranchFolderPassPassFlag , initializeBranchFolderPassPassOnce, std::ref(Registry)); } | ||||||||
117 | "Control Flow Optimizer", false, false)static void *initializeBranchFolderPassPassOnce(PassRegistry & Registry) { PassInfo *PI = new PassInfo( "Control Flow Optimizer" , "branch-folder", &BranchFolderPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<BranchFolderPass>), false, false); Registry .registerPass(*PI, true); return PI; } static llvm::once_flag InitializeBranchFolderPassPassFlag; void llvm::initializeBranchFolderPassPass (PassRegistry &Registry) { llvm::call_once(InitializeBranchFolderPassPassFlag , initializeBranchFolderPassPassOnce, std::ref(Registry)); } | ||||||||
118 | |||||||||
119 | bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) { | ||||||||
120 | if (skipFunction(MF.getFunction())) | ||||||||
121 | return false; | ||||||||
122 | |||||||||
123 | TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); | ||||||||
124 | // TailMerge can create jump into if branches that make CFG irreducible for | ||||||||
125 | // HW that requires structurized CFG. | ||||||||
126 | bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() && | ||||||||
127 | PassConfig->getEnableTailMerge(); | ||||||||
128 | MBFIWrapper MBBFreqInfo( | ||||||||
129 | getAnalysis<MachineBlockFrequencyInfo>()); | ||||||||
130 | BranchFolder Folder(EnableTailMerge, /*CommonHoist=*/true, MBBFreqInfo, | ||||||||
131 | getAnalysis<MachineBranchProbabilityInfo>(), | ||||||||
132 | &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI()); | ||||||||
133 | return Folder.OptimizeFunction(MF, MF.getSubtarget().getInstrInfo(), | ||||||||
134 | MF.getSubtarget().getRegisterInfo()); | ||||||||
135 | } | ||||||||
136 | |||||||||
137 | BranchFolder::BranchFolder(bool DefaultEnableTailMerge, bool CommonHoist, | ||||||||
138 | MBFIWrapper &FreqInfo, | ||||||||
139 | const MachineBranchProbabilityInfo &ProbInfo, | ||||||||
140 | ProfileSummaryInfo *PSI, unsigned MinTailLength) | ||||||||
141 | : EnableHoistCommonCode(CommonHoist), MinCommonTailLength(MinTailLength), | ||||||||
142 | MBBFreqInfo(FreqInfo), MBPI(ProbInfo), PSI(PSI) { | ||||||||
143 | if (MinCommonTailLength == 0) | ||||||||
144 | MinCommonTailLength = TailMergeSize; | ||||||||
145 | switch (FlagEnableTailMerge) { | ||||||||
146 | case cl::BOU_UNSET: | ||||||||
147 | EnableTailMerge = DefaultEnableTailMerge; | ||||||||
148 | break; | ||||||||
149 | case cl::BOU_TRUE: EnableTailMerge = true; break; | ||||||||
150 | case cl::BOU_FALSE: EnableTailMerge = false; break; | ||||||||
151 | } | ||||||||
152 | } | ||||||||
153 | |||||||||
154 | void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) { | ||||||||
155 | assert(MBB->pred_empty() && "MBB must be dead!")((void)0); | ||||||||
156 | LLVM_DEBUG(dbgs() << "\nRemoving MBB: " << *MBB)do { } while (false); | ||||||||
157 | |||||||||
158 | MachineFunction *MF = MBB->getParent(); | ||||||||
159 | // drop all successors. | ||||||||
160 | while (!MBB->succ_empty()) | ||||||||
161 | MBB->removeSuccessor(MBB->succ_end()-1); | ||||||||
162 | |||||||||
163 | // Avoid matching if this pointer gets reused. | ||||||||
164 | TriedMerging.erase(MBB); | ||||||||
165 | |||||||||
166 | // Update call site info. | ||||||||
167 | for (const MachineInstr &MI : *MBB) | ||||||||
168 | if (MI.shouldUpdateCallSiteInfo()) | ||||||||
169 | MF->eraseCallSiteInfo(&MI); | ||||||||
170 | |||||||||
171 | // Remove the block. | ||||||||
172 | MF->erase(MBB); | ||||||||
173 | EHScopeMembership.erase(MBB); | ||||||||
174 | if (MLI) | ||||||||
175 | MLI->removeBlock(MBB); | ||||||||
176 | } | ||||||||
177 | |||||||||
178 | bool BranchFolder::OptimizeFunction(MachineFunction &MF, | ||||||||
179 | const TargetInstrInfo *tii, | ||||||||
180 | const TargetRegisterInfo *tri, | ||||||||
181 | MachineLoopInfo *mli, bool AfterPlacement) { | ||||||||
182 | if (!tii) return false; | ||||||||
183 | |||||||||
184 | TriedMerging.clear(); | ||||||||
185 | |||||||||
186 | MachineRegisterInfo &MRI = MF.getRegInfo(); | ||||||||
187 | AfterBlockPlacement = AfterPlacement; | ||||||||
188 | TII = tii; | ||||||||
189 | TRI = tri; | ||||||||
190 | MLI = mli; | ||||||||
191 | this->MRI = &MRI; | ||||||||
192 | |||||||||
193 | UpdateLiveIns = MRI.tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF); | ||||||||
194 | if (!UpdateLiveIns) | ||||||||
195 | MRI.invalidateLiveness(); | ||||||||
196 | |||||||||
197 | bool MadeChange = false; | ||||||||
198 | |||||||||
199 | // Recalculate EH scope membership. | ||||||||
200 | EHScopeMembership = getEHScopeMembership(MF); | ||||||||
201 | |||||||||
202 | bool MadeChangeThisIteration = true; | ||||||||
203 | while (MadeChangeThisIteration) { | ||||||||
204 | MadeChangeThisIteration = TailMergeBlocks(MF); | ||||||||
205 | // No need to clean up if tail merging does not change anything after the | ||||||||
206 | // block placement. | ||||||||
207 | if (!AfterBlockPlacement || MadeChangeThisIteration) | ||||||||
208 | MadeChangeThisIteration |= OptimizeBranches(MF); | ||||||||
209 | if (EnableHoistCommonCode) | ||||||||
210 | MadeChangeThisIteration |= HoistCommonCode(MF); | ||||||||
211 | MadeChange |= MadeChangeThisIteration; | ||||||||
212 | } | ||||||||
213 | |||||||||
214 | // See if any jump tables have become dead as the code generator | ||||||||
215 | // did its thing. | ||||||||
216 | MachineJumpTableInfo *JTI = MF.getJumpTableInfo(); | ||||||||
217 | if (!JTI) | ||||||||
218 | return MadeChange; | ||||||||
219 | |||||||||
220 | // Walk the function to find jump tables that are live. | ||||||||
221 | BitVector JTIsLive(JTI->getJumpTables().size()); | ||||||||
222 | for (const MachineBasicBlock &BB : MF) { | ||||||||
223 | for (const MachineInstr &I : BB) | ||||||||
224 | for (const MachineOperand &Op : I.operands()) { | ||||||||
225 | if (!Op.isJTI()) continue; | ||||||||
226 | |||||||||
227 | // Remember that this JT is live. | ||||||||
228 | JTIsLive.set(Op.getIndex()); | ||||||||
229 | } | ||||||||
230 | } | ||||||||
231 | |||||||||
232 | // Finally, remove dead jump tables. This happens when the | ||||||||
233 | // indirect jump was unreachable (and thus deleted). | ||||||||
234 | for (unsigned i = 0, e = JTIsLive.size(); i != e; ++i) | ||||||||
235 | if (!JTIsLive.test(i)) { | ||||||||
236 | JTI->RemoveJumpTable(i); | ||||||||
237 | MadeChange = true; | ||||||||
238 | } | ||||||||
239 | |||||||||
240 | return MadeChange; | ||||||||
241 | } | ||||||||
242 | |||||||||
243 | //===----------------------------------------------------------------------===// | ||||||||
244 | // Tail Merging of Blocks | ||||||||
245 | //===----------------------------------------------------------------------===// | ||||||||
246 | |||||||||
247 | /// HashMachineInstr - Compute a hash value for MI and its operands. | ||||||||
248 | static unsigned HashMachineInstr(const MachineInstr &MI) { | ||||||||
249 | unsigned Hash = MI.getOpcode(); | ||||||||
250 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { | ||||||||
251 | const MachineOperand &Op = MI.getOperand(i); | ||||||||
252 | |||||||||
253 | // Merge in bits from the operand if easy. We can't use MachineOperand's | ||||||||
254 | // hash_code here because it's not deterministic and we sort by hash value | ||||||||
255 | // later. | ||||||||
256 | unsigned OperandHash = 0; | ||||||||
257 | switch (Op.getType()) { | ||||||||
258 | case MachineOperand::MO_Register: | ||||||||
259 | OperandHash = Op.getReg(); | ||||||||
260 | break; | ||||||||
261 | case MachineOperand::MO_Immediate: | ||||||||
262 | OperandHash = Op.getImm(); | ||||||||
263 | break; | ||||||||
264 | case MachineOperand::MO_MachineBasicBlock: | ||||||||
265 | OperandHash = Op.getMBB()->getNumber(); | ||||||||
266 | break; | ||||||||
267 | case MachineOperand::MO_FrameIndex: | ||||||||
268 | case MachineOperand::MO_ConstantPoolIndex: | ||||||||
269 | case MachineOperand::MO_JumpTableIndex: | ||||||||
270 | OperandHash = Op.getIndex(); | ||||||||
271 | break; | ||||||||
272 | case MachineOperand::MO_GlobalAddress: | ||||||||
273 | case MachineOperand::MO_ExternalSymbol: | ||||||||
274 | // Global address / external symbol are too hard, don't bother, but do | ||||||||
275 | // pull in the offset. | ||||||||
276 | OperandHash = Op.getOffset(); | ||||||||
277 | break; | ||||||||
278 | default: | ||||||||
279 | break; | ||||||||
280 | } | ||||||||
281 | |||||||||
282 | Hash += ((OperandHash << 3) | Op.getType()) << (i & 31); | ||||||||
283 | } | ||||||||
284 | return Hash; | ||||||||
285 | } | ||||||||
286 | |||||||||
287 | /// HashEndOfMBB - Hash the last instruction in the MBB. | ||||||||
288 | static unsigned HashEndOfMBB(const MachineBasicBlock &MBB) { | ||||||||
289 | MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr(false); | ||||||||
290 | if (I == MBB.end()) | ||||||||
291 | return 0; | ||||||||
292 | |||||||||
293 | return HashMachineInstr(*I); | ||||||||
294 | } | ||||||||
295 | |||||||||
296 | /// Whether MI should be counted as an instruction when calculating common tail. | ||||||||
297 | static bool countsAsInstruction(const MachineInstr &MI) { | ||||||||
298 | return !(MI.isDebugInstr() || MI.isCFIInstruction()); | ||||||||
299 | } | ||||||||
300 | |||||||||
301 | /// Iterate backwards from the given iterator \p I, towards the beginning of the | ||||||||
302 | /// block. If a MI satisfying 'countsAsInstruction' is found, return an iterator | ||||||||
303 | /// pointing to that MI. If no such MI is found, return the end iterator. | ||||||||
304 | static MachineBasicBlock::iterator | ||||||||
305 | skipBackwardPastNonInstructions(MachineBasicBlock::iterator I, | ||||||||
306 | MachineBasicBlock *MBB) { | ||||||||
307 | while (I != MBB->begin()) { | ||||||||
308 | --I; | ||||||||
309 | if (countsAsInstruction(*I)) | ||||||||
310 | return I; | ||||||||
311 | } | ||||||||
312 | return MBB->end(); | ||||||||
313 | } | ||||||||
314 | |||||||||
315 | /// Given two machine basic blocks, return the number of instructions they | ||||||||
316 | /// actually have in common together at their end. If a common tail is found (at | ||||||||
317 | /// least by one instruction), then iterators for the first shared instruction | ||||||||
318 | /// in each block are returned as well. | ||||||||
319 | /// | ||||||||
320 | /// Non-instructions according to countsAsInstruction are ignored. | ||||||||
321 | static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, | ||||||||
322 | MachineBasicBlock *MBB2, | ||||||||
323 | MachineBasicBlock::iterator &I1, | ||||||||
324 | MachineBasicBlock::iterator &I2) { | ||||||||
325 | MachineBasicBlock::iterator MBBI1 = MBB1->end(); | ||||||||
326 | MachineBasicBlock::iterator MBBI2 = MBB2->end(); | ||||||||
327 | |||||||||
328 | unsigned TailLen = 0; | ||||||||
329 | while (true) { | ||||||||
330 | MBBI1 = skipBackwardPastNonInstructions(MBBI1, MBB1); | ||||||||
331 | MBBI2 = skipBackwardPastNonInstructions(MBBI2, MBB2); | ||||||||
332 | if (MBBI1 == MBB1->end() || MBBI2 == MBB2->end()) | ||||||||
333 | break; | ||||||||
334 | if (!MBBI1->isIdenticalTo(*MBBI2) || | ||||||||
335 | // FIXME: This check is dubious. It's used to get around a problem where | ||||||||
336 | // people incorrectly expect inline asm directives to remain in the same | ||||||||
337 | // relative order. This is untenable because normal compiler | ||||||||
338 | // optimizations (like this one) may reorder and/or merge these | ||||||||
339 | // directives. | ||||||||
340 | MBBI1->isInlineAsm()) { | ||||||||
341 | break; | ||||||||
342 | } | ||||||||
343 | if (MBBI1->getFlag(MachineInstr::NoMerge) || | ||||||||
344 | MBBI2->getFlag(MachineInstr::NoMerge)) | ||||||||
345 | break; | ||||||||
346 | ++TailLen; | ||||||||
347 | I1 = MBBI1; | ||||||||
348 | I2 = MBBI2; | ||||||||
349 | } | ||||||||
350 | |||||||||
351 | return TailLen; | ||||||||
352 | } | ||||||||
353 | |||||||||
354 | void BranchFolder::replaceTailWithBranchTo(MachineBasicBlock::iterator OldInst, | ||||||||
355 | MachineBasicBlock &NewDest) { | ||||||||
356 | if (UpdateLiveIns) { | ||||||||
357 | // OldInst should always point to an instruction. | ||||||||
358 | MachineBasicBlock &OldMBB = *OldInst->getParent(); | ||||||||
359 | LiveRegs.clear(); | ||||||||
360 | LiveRegs.addLiveOuts(OldMBB); | ||||||||
361 | // Move backward to the place where will insert the jump. | ||||||||
362 | MachineBasicBlock::iterator I = OldMBB.end(); | ||||||||
363 | do { | ||||||||
364 | --I; | ||||||||
365 | LiveRegs.stepBackward(*I); | ||||||||
366 | } while (I != OldInst); | ||||||||
367 | |||||||||
368 | // Merging the tails may have switched some undef operand to non-undef ones. | ||||||||
369 | // Add IMPLICIT_DEFS into OldMBB as necessary to have a definition of the | ||||||||
370 | // register. | ||||||||
371 | for (MachineBasicBlock::RegisterMaskPair P : NewDest.liveins()) { | ||||||||
372 | // We computed the liveins with computeLiveIn earlier and should only see | ||||||||
373 | // full registers: | ||||||||
374 | assert(P.LaneMask == LaneBitmask::getAll() &&((void)0) | ||||||||
375 | "Can only handle full register.")((void)0); | ||||||||
376 | MCPhysReg Reg = P.PhysReg; | ||||||||
377 | if (!LiveRegs.available(*MRI, Reg)) | ||||||||
378 | continue; | ||||||||
379 | DebugLoc DL; | ||||||||
380 | BuildMI(OldMBB, OldInst, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Reg); | ||||||||
381 | } | ||||||||
382 | } | ||||||||
383 | |||||||||
384 | TII->ReplaceTailWithBranchTo(OldInst, &NewDest); | ||||||||
385 | ++NumTailMerge; | ||||||||
386 | } | ||||||||
387 | |||||||||
388 | MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB, | ||||||||
389 | MachineBasicBlock::iterator BBI1, | ||||||||
390 | const BasicBlock *BB) { | ||||||||
391 | if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1)) | ||||||||
392 | return nullptr; | ||||||||
393 | |||||||||
394 | MachineFunction &MF = *CurMBB.getParent(); | ||||||||
395 | |||||||||
396 | // Create the fall-through block. | ||||||||
397 | MachineFunction::iterator MBBI = CurMBB.getIterator(); | ||||||||
398 | MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(BB); | ||||||||
399 | CurMBB.getParent()->insert(++MBBI, NewMBB); | ||||||||
400 | |||||||||
401 | // Move all the successors of this block to the specified block. | ||||||||
402 | NewMBB->transferSuccessors(&CurMBB); | ||||||||
403 | |||||||||
404 | // Add an edge from CurMBB to NewMBB for the fall-through. | ||||||||
405 | CurMBB.addSuccessor(NewMBB); | ||||||||
406 | |||||||||
407 | // Splice the code over. | ||||||||
408 | NewMBB->splice(NewMBB->end(), &CurMBB, BBI1, CurMBB.end()); | ||||||||
409 | |||||||||
410 | // NewMBB belongs to the same loop as CurMBB. | ||||||||
411 | if (MLI) | ||||||||
412 | if (MachineLoop *ML = MLI->getLoopFor(&CurMBB)) | ||||||||
413 | ML->addBasicBlockToLoop(NewMBB, MLI->getBase()); | ||||||||
414 | |||||||||
415 | // NewMBB inherits CurMBB's block frequency. | ||||||||
416 | MBBFreqInfo.setBlockFreq(NewMBB, MBBFreqInfo.getBlockFreq(&CurMBB)); | ||||||||
417 | |||||||||
418 | if (UpdateLiveIns) | ||||||||
419 | computeAndAddLiveIns(LiveRegs, *NewMBB); | ||||||||
420 | |||||||||
421 | // Add the new block to the EH scope. | ||||||||
422 | const auto &EHScopeI = EHScopeMembership.find(&CurMBB); | ||||||||
423 | if (EHScopeI != EHScopeMembership.end()) { | ||||||||
424 | auto n = EHScopeI->second; | ||||||||
425 | EHScopeMembership[NewMBB] = n; | ||||||||
426 | } | ||||||||
427 | |||||||||
428 | return NewMBB; | ||||||||
429 | } | ||||||||
430 | |||||||||
431 | /// EstimateRuntime - Make a rough estimate for how long it will take to run | ||||||||
432 | /// the specified code. | ||||||||
433 | static unsigned EstimateRuntime(MachineBasicBlock::iterator I, | ||||||||
434 | MachineBasicBlock::iterator E) { | ||||||||
435 | unsigned Time = 0; | ||||||||
436 | for (; I != E; ++I) { | ||||||||
437 | if (!countsAsInstruction(*I)) | ||||||||
438 | continue; | ||||||||
439 | if (I->isCall()) | ||||||||
440 | Time += 10; | ||||||||
441 | else if (I->mayLoadOrStore()) | ||||||||
442 | Time += 2; | ||||||||
443 | else | ||||||||
444 | ++Time; | ||||||||
445 | } | ||||||||
446 | return Time; | ||||||||
447 | } | ||||||||
448 | |||||||||
449 | // CurMBB needs to add an unconditional branch to SuccMBB (we removed these | ||||||||
450 | // branches temporarily for tail merging). In the case where CurMBB ends | ||||||||
451 | // with a conditional branch to the next block, optimize by reversing the | ||||||||
452 | // test and conditionally branching to SuccMBB instead. | ||||||||
453 | static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB, | ||||||||
454 | const TargetInstrInfo *TII) { | ||||||||
455 | MachineFunction *MF = CurMBB->getParent(); | ||||||||
456 | MachineFunction::iterator I = std::next(MachineFunction::iterator(CurMBB)); | ||||||||
457 | MachineBasicBlock *TBB = nullptr, *FBB = nullptr; | ||||||||
458 | SmallVector<MachineOperand, 4> Cond; | ||||||||
459 | DebugLoc dl = CurMBB->findBranchDebugLoc(); | ||||||||
460 | if (I != MF->end() && !TII->analyzeBranch(*CurMBB, TBB, FBB, Cond, true)) { | ||||||||
461 | MachineBasicBlock *NextBB = &*I; | ||||||||
462 | if (TBB == NextBB && !Cond.empty() && !FBB) { | ||||||||
463 | if (!TII->reverseBranchCondition(Cond)) { | ||||||||
464 | TII->removeBranch(*CurMBB); | ||||||||
465 | TII->insertBranch(*CurMBB, SuccBB, nullptr, Cond, dl); | ||||||||
466 | return; | ||||||||
467 | } | ||||||||
468 | } | ||||||||
469 | } | ||||||||
470 | TII->insertBranch(*CurMBB, SuccBB, nullptr, | ||||||||
471 | SmallVector<MachineOperand, 0>(), dl); | ||||||||
472 | } | ||||||||
473 | |||||||||
474 | bool | ||||||||
475 | BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const { | ||||||||
476 | if (getHash() < o.getHash()) | ||||||||
477 | return true; | ||||||||
478 | if (getHash() > o.getHash()) | ||||||||
479 | return false; | ||||||||
480 | if (getBlock()->getNumber() < o.getBlock()->getNumber()) | ||||||||
481 | return true; | ||||||||
482 | if (getBlock()->getNumber() > o.getBlock()->getNumber()) | ||||||||
483 | return false; | ||||||||
484 | // _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing | ||||||||
485 | // an object with itself. | ||||||||
486 | #ifndef _GLIBCXX_DEBUG | ||||||||
487 | llvm_unreachable("Predecessor appears twice")__builtin_unreachable(); | ||||||||
488 | #else | ||||||||
489 | return false; | ||||||||
490 | #endif | ||||||||
491 | } | ||||||||
492 | |||||||||
493 | /// CountTerminators - Count the number of terminators in the given | ||||||||
494 | /// block and set I to the position of the first non-terminator, if there | ||||||||
495 | /// is one, or MBB->end() otherwise. | ||||||||
496 | static unsigned CountTerminators(MachineBasicBlock *MBB, | ||||||||
497 | MachineBasicBlock::iterator &I) { | ||||||||
498 | I = MBB->end(); | ||||||||
499 | unsigned NumTerms = 0; | ||||||||
500 | while (true) { | ||||||||
501 | if (I == MBB->begin()) { | ||||||||
502 | I = MBB->end(); | ||||||||
503 | break; | ||||||||
504 | } | ||||||||
505 | --I; | ||||||||
506 | if (!I->isTerminator()) break; | ||||||||
507 | ++NumTerms; | ||||||||
508 | } | ||||||||
509 | return NumTerms; | ||||||||
510 | } | ||||||||
511 | |||||||||
512 | /// A no successor, non-return block probably ends in unreachable and is cold. | ||||||||
513 | /// Also consider a block that ends in an indirect branch to be a return block, | ||||||||
514 | /// since many targets use plain indirect branches to return. | ||||||||
515 | static bool blockEndsInUnreachable(const MachineBasicBlock *MBB) { | ||||||||
516 | if (!MBB->succ_empty()) | ||||||||
517 | return false; | ||||||||
518 | if (MBB->empty()) | ||||||||
519 | return true; | ||||||||
520 | return !(MBB->back().isReturn() || MBB->back().isIndirectBranch()); | ||||||||
521 | } | ||||||||
522 | |||||||||
523 | /// ProfitableToMerge - Check if two machine basic blocks have a common tail | ||||||||
524 | /// and decide if it would be profitable to merge those tails. Return the | ||||||||
525 | /// length of the common tail and iterators to the first common instruction | ||||||||
526 | /// in each block. | ||||||||
527 | /// MBB1, MBB2 The blocks to check | ||||||||
528 | /// MinCommonTailLength Minimum size of tail block to be merged. | ||||||||
529 | /// CommonTailLen Out parameter to record the size of the shared tail between | ||||||||
530 | /// MBB1 and MBB2 | ||||||||
531 | /// I1, I2 Iterator references that will be changed to point to the first | ||||||||
532 | /// instruction in the common tail shared by MBB1,MBB2 | ||||||||
533 | /// SuccBB A common successor of MBB1, MBB2 which are in a canonical form | ||||||||
534 | /// relative to SuccBB | ||||||||
535 | /// PredBB The layout predecessor of SuccBB, if any. | ||||||||
536 | /// EHScopeMembership map from block to EH scope #. | ||||||||
537 | /// AfterPlacement True if we are merging blocks after layout. Stricter | ||||||||
538 | /// thresholds apply to prevent undoing tail-duplication. | ||||||||
539 | static bool | ||||||||
540 | ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, | ||||||||
541 | unsigned MinCommonTailLength, unsigned &CommonTailLen, | ||||||||
542 | MachineBasicBlock::iterator &I1, | ||||||||
543 | MachineBasicBlock::iterator &I2, MachineBasicBlock *SuccBB, | ||||||||
544 | MachineBasicBlock *PredBB, | ||||||||
545 | DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, | ||||||||
546 | bool AfterPlacement, | ||||||||
547 | MBFIWrapper &MBBFreqInfo, | ||||||||
548 | ProfileSummaryInfo *PSI) { | ||||||||
549 | // It is never profitable to tail-merge blocks from two different EH scopes. | ||||||||
550 | if (!EHScopeMembership.empty()) { | ||||||||
551 | auto EHScope1 = EHScopeMembership.find(MBB1); | ||||||||
552 | assert(EHScope1 != EHScopeMembership.end())((void)0); | ||||||||
553 | auto EHScope2 = EHScopeMembership.find(MBB2); | ||||||||
554 | assert(EHScope2 != EHScopeMembership.end())((void)0); | ||||||||
555 | if (EHScope1->second != EHScope2->second) | ||||||||
556 | return false; | ||||||||
557 | } | ||||||||
558 | |||||||||
559 | CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2); | ||||||||
560 | if (CommonTailLen == 0) | ||||||||
561 | return false; | ||||||||
562 | LLVM_DEBUG(dbgs() << "Common tail length of " << printMBBReference(*MBB1)do { } while (false) | ||||||||
563 | << " and " << printMBBReference(*MBB2) << " is "do { } while (false) | ||||||||
564 | << CommonTailLen << '\n')do { } while (false); | ||||||||
565 | |||||||||
566 | // Move the iterators to the beginning of the MBB if we only got debug | ||||||||
567 | // instructions before the tail. This is to avoid splitting a block when we | ||||||||
568 | // only got debug instructions before the tail (to be invariant on -g). | ||||||||
569 | if (skipDebugInstructionsForward(MBB1->begin(), MBB1->end(), false) == I1) | ||||||||
570 | I1 = MBB1->begin(); | ||||||||
571 | if (skipDebugInstructionsForward(MBB2->begin(), MBB2->end(), false) == I2) | ||||||||
572 | I2 = MBB2->begin(); | ||||||||
573 | |||||||||
574 | bool FullBlockTail1 = I1 == MBB1->begin(); | ||||||||
575 | bool FullBlockTail2 = I2 == MBB2->begin(); | ||||||||
576 | |||||||||
577 | // It's almost always profitable to merge any number of non-terminator | ||||||||
578 | // instructions with the block that falls through into the common successor. | ||||||||
579 | // This is true only for a single successor. For multiple successors, we are | ||||||||
580 | // trading a conditional branch for an unconditional one. | ||||||||
581 | // TODO: Re-visit successor size for non-layout tail merging. | ||||||||
582 | if ((MBB1 == PredBB || MBB2 == PredBB) && | ||||||||
583 | (!AfterPlacement || MBB1->succ_size() == 1)) { | ||||||||
584 | MachineBasicBlock::iterator I; | ||||||||
585 | unsigned NumTerms = CountTerminators(MBB1 == PredBB ? MBB2 : MBB1, I); | ||||||||
586 | if (CommonTailLen > NumTerms) | ||||||||
587 | return true; | ||||||||
588 | } | ||||||||
589 | |||||||||
590 | // If these are identical non-return blocks with no successors, merge them. | ||||||||
591 | // Such blocks are typically cold calls to noreturn functions like abort, and | ||||||||
592 | // are unlikely to become a fallthrough target after machine block placement. | ||||||||
593 | // Tail merging these blocks is unlikely to create additional unconditional | ||||||||
594 | // branches, and will reduce the size of this cold code. | ||||||||
595 | if (FullBlockTail1 && FullBlockTail2 && | ||||||||
596 | blockEndsInUnreachable(MBB1) && blockEndsInUnreachable(MBB2)) | ||||||||
597 | return true; | ||||||||
598 | |||||||||
599 | // If one of the blocks can be completely merged and happens to be in | ||||||||
600 | // a position where the other could fall through into it, merge any number | ||||||||
601 | // of instructions, because it can be done without a branch. | ||||||||
602 | // TODO: If the blocks are not adjacent, move one of them so that they are? | ||||||||
603 | if (MBB1->isLayoutSuccessor(MBB2) && FullBlockTail2) | ||||||||
604 | return true; | ||||||||
605 | if (MBB2->isLayoutSuccessor(MBB1) && FullBlockTail1) | ||||||||
606 | return true; | ||||||||
607 | |||||||||
608 | // If both blocks are identical and end in a branch, merge them unless they | ||||||||
609 | // both have a fallthrough predecessor and successor. | ||||||||
610 | // We can only do this after block placement because it depends on whether | ||||||||
611 | // there are fallthroughs, and we don't know until after layout. | ||||||||
612 | if (AfterPlacement && FullBlockTail1 && FullBlockTail2) { | ||||||||
613 | auto BothFallThrough = [](MachineBasicBlock *MBB) { | ||||||||
614 | if (MBB->succ_size() != 0 && !MBB->canFallThrough()) | ||||||||
615 | return false; | ||||||||
616 | MachineFunction::iterator I(MBB); | ||||||||
617 | MachineFunction *MF = MBB->getParent(); | ||||||||
618 | return (MBB != &*MF->begin()) && std::prev(I)->canFallThrough(); | ||||||||
619 | }; | ||||||||
620 | if (!BothFallThrough(MBB1) || !BothFallThrough(MBB2)) | ||||||||
621 | return true; | ||||||||
622 | } | ||||||||
623 | |||||||||
624 | // If both blocks have an unconditional branch temporarily stripped out, | ||||||||
625 | // count that as an additional common instruction for the following | ||||||||
626 | // heuristics. This heuristic is only accurate for single-succ blocks, so to | ||||||||
627 | // make sure that during layout merging and duplicating don't crash, we check | ||||||||
628 | // for that when merging during layout. | ||||||||
629 | unsigned EffectiveTailLen = CommonTailLen; | ||||||||
630 | if (SuccBB && MBB1 != PredBB && MBB2 != PredBB && | ||||||||
631 | (MBB1->succ_size() == 1 || !AfterPlacement) && | ||||||||
632 | !MBB1->back().isBarrier() && | ||||||||
633 | !MBB2->back().isBarrier()) | ||||||||
634 | ++EffectiveTailLen; | ||||||||
635 | |||||||||
636 | // Check if the common tail is long enough to be worthwhile. | ||||||||
637 | if (EffectiveTailLen >= MinCommonTailLength) | ||||||||
638 | return true; | ||||||||
639 | |||||||||
640 | // If we are optimizing for code size, 2 instructions in common is enough if | ||||||||
641 | // we don't have to split a block. At worst we will be introducing 1 new | ||||||||
642 | // branch instruction, which is likely to be smaller than the 2 | ||||||||
643 | // instructions that would be deleted in the merge. | ||||||||
644 | MachineFunction *MF = MBB1->getParent(); | ||||||||
645 | bool OptForSize = | ||||||||
646 | MF->getFunction().hasOptSize() || | ||||||||
647 | (llvm::shouldOptimizeForSize(MBB1, PSI, &MBBFreqInfo) && | ||||||||
648 | llvm::shouldOptimizeForSize(MBB2, PSI, &MBBFreqInfo)); | ||||||||
649 | return EffectiveTailLen >= 2 && OptForSize && | ||||||||
650 | (FullBlockTail1 || FullBlockTail2); | ||||||||
651 | } | ||||||||
652 | |||||||||
653 | unsigned BranchFolder::ComputeSameTails(unsigned CurHash, | ||||||||
654 | unsigned MinCommonTailLength, | ||||||||
655 | MachineBasicBlock *SuccBB, | ||||||||
656 | MachineBasicBlock *PredBB) { | ||||||||
657 | unsigned maxCommonTailLength = 0U; | ||||||||
658 | SameTails.clear(); | ||||||||
659 | MachineBasicBlock::iterator TrialBBI1, TrialBBI2; | ||||||||
660 | MPIterator HighestMPIter = std::prev(MergePotentials.end()); | ||||||||
661 | for (MPIterator CurMPIter = std::prev(MergePotentials.end()), | ||||||||
662 | B = MergePotentials.begin(); | ||||||||
663 | CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) { | ||||||||
664 | for (MPIterator I = std::prev(CurMPIter); I->getHash() == CurHash; --I) { | ||||||||
665 | unsigned CommonTailLen; | ||||||||
666 | if (ProfitableToMerge(CurMPIter->getBlock(), I->getBlock(), | ||||||||
667 | MinCommonTailLength, | ||||||||
668 | CommonTailLen, TrialBBI1, TrialBBI2, | ||||||||
669 | SuccBB, PredBB, | ||||||||
670 | EHScopeMembership, | ||||||||
671 | AfterBlockPlacement, MBBFreqInfo, PSI)) { | ||||||||
672 | if (CommonTailLen > maxCommonTailLength) { | ||||||||
673 | SameTails.clear(); | ||||||||
674 | maxCommonTailLength = CommonTailLen; | ||||||||
675 | HighestMPIter = CurMPIter; | ||||||||
676 | SameTails.push_back(SameTailElt(CurMPIter, TrialBBI1)); | ||||||||
677 | } | ||||||||
678 | if (HighestMPIter == CurMPIter && | ||||||||
679 | CommonTailLen == maxCommonTailLength) | ||||||||
680 | SameTails.push_back(SameTailElt(I, TrialBBI2)); | ||||||||
681 | } | ||||||||
682 | if (I == B) | ||||||||
683 | break; | ||||||||
684 | } | ||||||||
685 | } | ||||||||
686 | return maxCommonTailLength; | ||||||||
687 | } | ||||||||
688 | |||||||||
689 | void BranchFolder::RemoveBlocksWithHash(unsigned CurHash, | ||||||||
690 | MachineBasicBlock *SuccBB, | ||||||||
691 | MachineBasicBlock *PredBB) { | ||||||||
692 | MPIterator CurMPIter, B; | ||||||||
693 | for (CurMPIter = std::prev(MergePotentials.end()), | ||||||||
694 | B = MergePotentials.begin(); | ||||||||
695 | CurMPIter->getHash() == CurHash; --CurMPIter) { | ||||||||
696 | // Put the unconditional branch back, if we need one. | ||||||||
697 | MachineBasicBlock *CurMBB = CurMPIter->getBlock(); | ||||||||
698 | if (SuccBB && CurMBB != PredBB) | ||||||||
699 | FixTail(CurMBB, SuccBB, TII); | ||||||||
700 | if (CurMPIter == B) | ||||||||
701 | break; | ||||||||
702 | } | ||||||||
703 | if (CurMPIter->getHash() != CurHash) | ||||||||
704 | CurMPIter++; | ||||||||
705 | MergePotentials.erase(CurMPIter, MergePotentials.end()); | ||||||||
706 | } | ||||||||
707 | |||||||||
708 | bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB, | ||||||||
709 | MachineBasicBlock *SuccBB, | ||||||||
710 | unsigned maxCommonTailLength, | ||||||||
711 | unsigned &commonTailIndex) { | ||||||||
712 | commonTailIndex = 0; | ||||||||
713 | unsigned TimeEstimate = ~0U; | ||||||||
714 | for (unsigned i = 0, e = SameTails.size(); i != e; ++i) { | ||||||||
715 | // Use PredBB if possible; that doesn't require a new branch. | ||||||||
716 | if (SameTails[i].getBlock() == PredBB) { | ||||||||
717 | commonTailIndex = i; | ||||||||
718 | break; | ||||||||
719 | } | ||||||||
720 | // Otherwise, make a (fairly bogus) choice based on estimate of | ||||||||
721 | // how long it will take the various blocks to execute. | ||||||||
722 | unsigned t = EstimateRuntime(SameTails[i].getBlock()->begin(), | ||||||||
723 | SameTails[i].getTailStartPos()); | ||||||||
724 | if (t <= TimeEstimate) { | ||||||||
725 | TimeEstimate = t; | ||||||||
726 | commonTailIndex = i; | ||||||||
727 | } | ||||||||
728 | } | ||||||||
729 | |||||||||
730 | MachineBasicBlock::iterator BBI = | ||||||||
731 | SameTails[commonTailIndex].getTailStartPos(); | ||||||||
732 | MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); | ||||||||
733 | |||||||||
734 | LLVM_DEBUG(dbgs() << "\nSplitting " << printMBBReference(*MBB) << ", size "do { } while (false) | ||||||||
735 | << maxCommonTailLength)do { } while (false); | ||||||||
736 | |||||||||
737 | // If the split block unconditionally falls-thru to SuccBB, it will be | ||||||||
738 | // merged. In control flow terms it should then take SuccBB's name. e.g. If | ||||||||
739 | // SuccBB is an inner loop, the common tail is still part of the inner loop. | ||||||||
740 | const BasicBlock *BB = (SuccBB && MBB->succ_size() == 1) ? | ||||||||
741 | SuccBB->getBasicBlock() : MBB->getBasicBlock(); | ||||||||
742 | MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI, BB); | ||||||||
743 | if (!newMBB) { | ||||||||
744 | LLVM_DEBUG(dbgs() << "... failed!")do { } while (false); | ||||||||
745 | return false; | ||||||||
746 | } | ||||||||
747 | |||||||||
748 | SameTails[commonTailIndex].setBlock(newMBB); | ||||||||
749 | SameTails[commonTailIndex].setTailStartPos(newMBB->begin()); | ||||||||
750 | |||||||||
751 | // If we split PredBB, newMBB is the new predecessor. | ||||||||
752 | if (PredBB == MBB) | ||||||||
753 | PredBB = newMBB; | ||||||||
754 | |||||||||
755 | return true; | ||||||||
756 | } | ||||||||
757 | |||||||||
758 | static void | ||||||||
759 | mergeOperations(MachineBasicBlock::iterator MBBIStartPos, | ||||||||
760 | MachineBasicBlock &MBBCommon) { | ||||||||
761 | MachineBasicBlock *MBB = MBBIStartPos->getParent(); | ||||||||
762 | // Note CommonTailLen does not necessarily matches the size of | ||||||||
763 | // the common BB nor all its instructions because of debug | ||||||||
764 | // instructions differences. | ||||||||
765 | unsigned CommonTailLen = 0; | ||||||||
766 | for (auto E = MBB->end(); MBBIStartPos != E; ++MBBIStartPos) | ||||||||
767 | ++CommonTailLen; | ||||||||
768 | |||||||||
769 | MachineBasicBlock::reverse_iterator MBBI = MBB->rbegin(); | ||||||||
770 | MachineBasicBlock::reverse_iterator MBBIE = MBB->rend(); | ||||||||
771 | MachineBasicBlock::reverse_iterator MBBICommon = MBBCommon.rbegin(); | ||||||||
772 | MachineBasicBlock::reverse_iterator MBBIECommon = MBBCommon.rend(); | ||||||||
773 | |||||||||
774 | while (CommonTailLen--) { | ||||||||
775 | assert(MBBI != MBBIE && "Reached BB end within common tail length!")((void)0); | ||||||||
776 | (void)MBBIE; | ||||||||
777 | |||||||||
778 | if (!countsAsInstruction(*MBBI)) { | ||||||||
779 | ++MBBI; | ||||||||
780 | continue; | ||||||||
781 | } | ||||||||
782 | |||||||||
783 | while ((MBBICommon != MBBIECommon) && !countsAsInstruction(*MBBICommon)) | ||||||||
784 | ++MBBICommon; | ||||||||
785 | |||||||||
786 | assert(MBBICommon != MBBIECommon &&((void)0) | ||||||||
787 | "Reached BB end within common tail length!")((void)0); | ||||||||
788 | assert(MBBICommon->isIdenticalTo(*MBBI) && "Expected matching MIIs!")((void)0); | ||||||||
789 | |||||||||
790 | // Merge MMOs from memory operations in the common block. | ||||||||
791 | if (MBBICommon->mayLoadOrStore()) | ||||||||
792 | MBBICommon->cloneMergedMemRefs(*MBB->getParent(), {&*MBBICommon, &*MBBI}); | ||||||||
793 | // Drop undef flags if they aren't present in all merged instructions. | ||||||||
794 | for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) { | ||||||||
795 | MachineOperand &MO = MBBICommon->getOperand(I); | ||||||||
796 | if (MO.isReg() && MO.isUndef()) { | ||||||||
797 | const MachineOperand &OtherMO = MBBI->getOperand(I); | ||||||||
798 | if (!OtherMO.isUndef()) | ||||||||
799 | MO.setIsUndef(false); | ||||||||
800 | } | ||||||||
801 | } | ||||||||
802 | |||||||||
803 | ++MBBI; | ||||||||
804 | ++MBBICommon; | ||||||||
805 | } | ||||||||
806 | } | ||||||||
807 | |||||||||
808 | void BranchFolder::mergeCommonTails(unsigned commonTailIndex) { | ||||||||
809 | MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); | ||||||||
810 | |||||||||
811 | std::vector<MachineBasicBlock::iterator> NextCommonInsts(SameTails.size()); | ||||||||
812 | for (unsigned int i = 0 ; i != SameTails.size() ; ++i) { | ||||||||
813 | if (i != commonTailIndex) { | ||||||||
814 | NextCommonInsts[i] = SameTails[i].getTailStartPos(); | ||||||||
815 | mergeOperations(SameTails[i].getTailStartPos(), *MBB); | ||||||||
816 | } else { | ||||||||
817 | assert(SameTails[i].getTailStartPos() == MBB->begin() &&((void)0) | ||||||||
818 | "MBB is not a common tail only block")((void)0); | ||||||||
819 | } | ||||||||
820 | } | ||||||||
821 | |||||||||
822 | for (auto &MI : *MBB) { | ||||||||
823 | if (!countsAsInstruction(MI)) | ||||||||
824 | continue; | ||||||||
825 | DebugLoc DL = MI.getDebugLoc(); | ||||||||
826 | for (unsigned int i = 0 ; i < NextCommonInsts.size() ; i++) { | ||||||||
827 | if (i == commonTailIndex) | ||||||||
828 | continue; | ||||||||
829 | |||||||||
830 | auto &Pos = NextCommonInsts[i]; | ||||||||
831 | assert(Pos != SameTails[i].getBlock()->end() &&((void)0) | ||||||||
832 | "Reached BB end within common tail")((void)0); | ||||||||
833 | while (!countsAsInstruction(*Pos)) { | ||||||||
834 | ++Pos; | ||||||||
835 | assert(Pos != SameTails[i].getBlock()->end() &&((void)0) | ||||||||
836 | "Reached BB end within common tail")((void)0); | ||||||||
837 | } | ||||||||
838 | assert(MI.isIdenticalTo(*Pos) && "Expected matching MIIs!")((void)0); | ||||||||
839 | DL = DILocation::getMergedLocation(DL, Pos->getDebugLoc()); | ||||||||
840 | NextCommonInsts[i] = ++Pos; | ||||||||
841 | } | ||||||||
842 | MI.setDebugLoc(DL); | ||||||||
843 | } | ||||||||
844 | |||||||||
845 | if (UpdateLiveIns) { | ||||||||
846 | LivePhysRegs NewLiveIns(*TRI); | ||||||||
847 | computeLiveIns(NewLiveIns, *MBB); | ||||||||
848 | LiveRegs.init(*TRI); | ||||||||
849 | |||||||||
850 | // The flag merging may lead to some register uses no longer using the | ||||||||
851 | // <undef> flag, add IMPLICIT_DEFs in the predecessors as necessary. | ||||||||
852 | for (MachineBasicBlock *Pred : MBB->predecessors()) { | ||||||||
853 | LiveRegs.clear(); | ||||||||
854 | LiveRegs.addLiveOuts(*Pred); | ||||||||
855 | MachineBasicBlock::iterator InsertBefore = Pred->getFirstTerminator(); | ||||||||
856 | for (Register Reg : NewLiveIns) { | ||||||||
857 | if (!LiveRegs.available(*MRI, Reg)) | ||||||||
858 | continue; | ||||||||
859 | DebugLoc DL; | ||||||||
860 | BuildMI(*Pred, InsertBefore, DL, TII->get(TargetOpcode::IMPLICIT_DEF), | ||||||||
861 | Reg); | ||||||||
862 | } | ||||||||
863 | } | ||||||||
864 | |||||||||
865 | MBB->clearLiveIns(); | ||||||||
866 | addLiveIns(*MBB, NewLiveIns); | ||||||||
867 | } | ||||||||
868 | } | ||||||||
869 | |||||||||
870 | // See if any of the blocks in MergePotentials (which all have SuccBB as a | ||||||||
871 | // successor, or all have no successor if it is null) can be tail-merged. | ||||||||
872 | // If there is a successor, any blocks in MergePotentials that are not | ||||||||
873 | // tail-merged and are not immediately before Succ must have an unconditional | ||||||||
874 | // branch to Succ added (but the predecessor/successor lists need no | ||||||||
875 | // adjustment). The lone predecessor of Succ that falls through into Succ, | ||||||||
876 | // if any, is given in PredBB. | ||||||||
877 | // MinCommonTailLength - Except for the special cases below, tail-merge if | ||||||||
878 | // there are at least this many instructions in common. | ||||||||
879 | bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB, | ||||||||
880 | MachineBasicBlock *PredBB, | ||||||||
881 | unsigned MinCommonTailLength) { | ||||||||
882 | bool MadeChange = false; | ||||||||
883 | |||||||||
884 | LLVM_DEBUG(do { } while (false) | ||||||||
885 | dbgs() << "\nTryTailMergeBlocks: ";do { } while (false) | ||||||||
886 | for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) dbgs()do { } while (false) | ||||||||
887 | << printMBBReference(*MergePotentials[i].getBlock())do { } while (false) | ||||||||
888 | << (i == e - 1 ? "" : ", ");do { } while (false) | ||||||||
889 | dbgs() << "\n"; if (SuccBB) {do { } while (false) | ||||||||
890 | dbgs() << " with successor " << printMBBReference(*SuccBB) << '\n';do { } while (false) | ||||||||
891 | if (PredBB)do { } while (false) | ||||||||
892 | dbgs() << " which has fall-through from "do { } while (false) | ||||||||
893 | << printMBBReference(*PredBB) << "\n";do { } while (false) | ||||||||
894 | } dbgs() << "Looking for common tails of at least "do { } while (false) | ||||||||
895 | << MinCommonTailLength << " instruction"do { } while (false) | ||||||||
896 | << (MinCommonTailLength == 1 ? "" : "s") << '\n';)do { } while (false); | ||||||||
897 | |||||||||
898 | // Sort by hash value so that blocks with identical end sequences sort | ||||||||
899 | // together. | ||||||||
900 | array_pod_sort(MergePotentials.begin(), MergePotentials.end()); | ||||||||
901 | |||||||||
902 | // Walk through equivalence sets looking for actual exact matches. | ||||||||
903 | while (MergePotentials.size() > 1) { | ||||||||
904 | unsigned CurHash = MergePotentials.back().getHash(); | ||||||||
905 | |||||||||
906 | // Build SameTails, identifying the set of blocks with this hash code | ||||||||
907 | // and with the maximum number of instructions in common. | ||||||||
908 | unsigned maxCommonTailLength = ComputeSameTails(CurHash, | ||||||||
909 | MinCommonTailLength, | ||||||||
910 | SuccBB, PredBB); | ||||||||
911 | |||||||||
912 | // If we didn't find any pair that has at least MinCommonTailLength | ||||||||
913 | // instructions in common, remove all blocks with this hash code and retry. | ||||||||
914 | if (SameTails.empty()) { | ||||||||
915 | RemoveBlocksWithHash(CurHash, SuccBB, PredBB); | ||||||||
916 | continue; | ||||||||
917 | } | ||||||||
918 | |||||||||
919 | // If one of the blocks is the entire common tail (and is not the entry | ||||||||
920 | // block/an EH pad, which we can't jump to), we can treat all blocks with | ||||||||
921 | // this same tail at once. Use PredBB if that is one of the possibilities, | ||||||||
922 | // as that will not introduce any extra branches. | ||||||||
923 | MachineBasicBlock *EntryBB = | ||||||||
924 | &MergePotentials.front().getBlock()->getParent()->front(); | ||||||||
925 | unsigned commonTailIndex = SameTails.size(); | ||||||||
926 | // If there are two blocks, check to see if one can be made to fall through | ||||||||
927 | // into the other. | ||||||||
928 | if (SameTails.size() == 2 && | ||||||||
929 | SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) && | ||||||||
930 | SameTails[1].tailIsWholeBlock() && !SameTails[1].getBlock()->isEHPad()) | ||||||||
931 | commonTailIndex = 1; | ||||||||
932 | else if (SameTails.size() == 2 && | ||||||||
933 | SameTails[1].getBlock()->isLayoutSuccessor( | ||||||||
934 | SameTails[0].getBlock()) && | ||||||||
935 | SameTails[0].tailIsWholeBlock() && | ||||||||
936 | !SameTails[0].getBlock()->isEHPad()) | ||||||||
937 | commonTailIndex = 0; | ||||||||
938 | else { | ||||||||
939 | // Otherwise just pick one, favoring the fall-through predecessor if | ||||||||
940 | // there is one. | ||||||||
941 | for (unsigned i = 0, e = SameTails.size(); i != e; ++i) { | ||||||||
942 | MachineBasicBlock *MBB = SameTails[i].getBlock(); | ||||||||
943 | if ((MBB == EntryBB || MBB->isEHPad()) && | ||||||||
944 | SameTails[i].tailIsWholeBlock()) | ||||||||
945 | continue; | ||||||||
946 | if (MBB == PredBB) { | ||||||||
947 | commonTailIndex = i; | ||||||||
948 | break; | ||||||||
949 | } | ||||||||
950 | if (SameTails[i].tailIsWholeBlock()) | ||||||||
951 | commonTailIndex = i; | ||||||||
952 | } | ||||||||
953 | } | ||||||||
954 | |||||||||
955 | if (commonTailIndex == SameTails.size() || | ||||||||
956 | (SameTails[commonTailIndex].getBlock() == PredBB && | ||||||||
957 | !SameTails[commonTailIndex].tailIsWholeBlock())) { | ||||||||
958 | // None of the blocks consist entirely of the common tail. | ||||||||
959 | // Split a block so that one does. | ||||||||
960 | if (!CreateCommonTailOnlyBlock(PredBB, SuccBB, | ||||||||
961 | maxCommonTailLength, commonTailIndex)) { | ||||||||
962 | RemoveBlocksWithHash(CurHash, SuccBB, PredBB); | ||||||||
963 | continue; | ||||||||
964 | } | ||||||||
965 | } | ||||||||
966 | |||||||||
967 | MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); | ||||||||
968 | |||||||||
969 | // Recompute common tail MBB's edge weights and block frequency. | ||||||||
970 | setCommonTailEdgeWeights(*MBB); | ||||||||
971 | |||||||||
972 | // Merge debug locations, MMOs and undef flags across identical instructions | ||||||||
973 | // for common tail. | ||||||||
974 | mergeCommonTails(commonTailIndex); | ||||||||
975 | |||||||||
976 | // MBB is common tail. Adjust all other BB's to jump to this one. | ||||||||
977 | // Traversal must be forwards so erases work. | ||||||||
978 | LLVM_DEBUG(dbgs() << "\nUsing common tail in " << printMBBReference(*MBB)do { } while (false) | ||||||||
979 | << " for ")do { } while (false); | ||||||||
980 | for (unsigned int i=0, e = SameTails.size(); i != e; ++i) { | ||||||||
981 | if (commonTailIndex == i) | ||||||||
982 | continue; | ||||||||
983 | LLVM_DEBUG(dbgs() << printMBBReference(*SameTails[i].getBlock())do { } while (false) | ||||||||
984 | << (i == e - 1 ? "" : ", "))do { } while (false); | ||||||||
985 | // Hack the end off BB i, making it jump to BB commonTailIndex instead. | ||||||||
986 | replaceTailWithBranchTo(SameTails[i].getTailStartPos(), *MBB); | ||||||||
987 | // BB i is no longer a predecessor of SuccBB; remove it from the worklist. | ||||||||
988 | MergePotentials.erase(SameTails[i].getMPIter()); | ||||||||
989 | } | ||||||||
990 | LLVM_DEBUG(dbgs() << "\n")do { } while (false); | ||||||||
991 | // We leave commonTailIndex in the worklist in case there are other blocks | ||||||||
992 | // that match it with a smaller number of instructions. | ||||||||
993 | MadeChange = true; | ||||||||
994 | } | ||||||||
995 | return MadeChange; | ||||||||
996 | } | ||||||||
997 | |||||||||
998 | bool BranchFolder::TailMergeBlocks(MachineFunction &MF) { | ||||||||
999 | bool MadeChange = false; | ||||||||
1000 | if (!EnableTailMerge) | ||||||||
1001 | return MadeChange; | ||||||||
1002 | |||||||||
1003 | // First find blocks with no successors. | ||||||||
1004 | // Block placement may create new tail merging opportunities for these blocks. | ||||||||
1005 | MergePotentials.clear(); | ||||||||
1006 | for (MachineBasicBlock &MBB : MF) { | ||||||||
1007 | if (MergePotentials.size() == TailMergeThreshold) | ||||||||
1008 | break; | ||||||||
1009 | if (!TriedMerging.count(&MBB) && MBB.succ_empty()) | ||||||||
1010 | MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(MBB), &MBB)); | ||||||||
1011 | } | ||||||||
1012 | |||||||||
1013 | // If this is a large problem, avoid visiting the same basic blocks | ||||||||
1014 | // multiple times. | ||||||||
1015 | if (MergePotentials.size() == TailMergeThreshold) | ||||||||
1016 | for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) | ||||||||
1017 | TriedMerging.insert(MergePotentials[i].getBlock()); | ||||||||
1018 | |||||||||
1019 | // See if we can do any tail merging on those. | ||||||||
1020 | if (MergePotentials.size() >= 2) | ||||||||
1021 | MadeChange |= TryTailMergeBlocks(nullptr, nullptr, MinCommonTailLength); | ||||||||
1022 | |||||||||
1023 | // Look at blocks (IBB) with multiple predecessors (PBB). | ||||||||
1024 | // We change each predecessor to a canonical form, by | ||||||||
1025 | // (1) temporarily removing any unconditional branch from the predecessor | ||||||||
1026 | // to IBB, and | ||||||||
1027 | // (2) alter conditional branches so they branch to the other block | ||||||||
1028 | // not IBB; this may require adding back an unconditional branch to IBB | ||||||||
1029 | // later, where there wasn't one coming in. E.g. | ||||||||
1030 | // Bcc IBB | ||||||||
1031 | // fallthrough to QBB | ||||||||
1032 | // here becomes | ||||||||
1033 | // Bncc QBB | ||||||||
1034 | // with a conceptual B to IBB after that, which never actually exists. | ||||||||
1035 | // With those changes, we see whether the predecessors' tails match, | ||||||||
1036 | // and merge them if so. We change things out of canonical form and | ||||||||
1037 | // back to the way they were later in the process. (OptimizeBranches | ||||||||
1038 | // would undo some of this, but we can't use it, because we'd get into | ||||||||
1039 | // a compile-time infinite loop repeatedly doing and undoing the same | ||||||||
1040 | // transformations.) | ||||||||
1041 | |||||||||
1042 | for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end(); | ||||||||
1043 | I != E; ++I) { | ||||||||
1044 | if (I->pred_size() < 2) continue; | ||||||||
1045 | SmallPtrSet<MachineBasicBlock *, 8> UniquePreds; | ||||||||
1046 | MachineBasicBlock *IBB = &*I; | ||||||||
1047 | MachineBasicBlock *PredBB = &*std::prev(I); | ||||||||
1048 | MergePotentials.clear(); | ||||||||
1049 | MachineLoop *ML; | ||||||||
1050 | |||||||||
1051 | // Bail if merging after placement and IBB is the loop header because | ||||||||
1052 | // -- If merging predecessors that belong to the same loop as IBB, the | ||||||||
1053 | // common tail of merged predecessors may become the loop top if block | ||||||||
1054 | // placement is called again and the predecessors may branch to this common | ||||||||
1055 | // tail and require more branches. This can be relaxed if | ||||||||
1056 | // MachineBlockPlacement::findBestLoopTop is more flexible. | ||||||||
1057 | // --If merging predecessors that do not belong to the same loop as IBB, the | ||||||||
1058 | // loop info of IBB's loop and the other loops may be affected. Calling the | ||||||||
1059 | // block placement again may make big change to the layout and eliminate the | ||||||||
1060 | // reason to do tail merging here. | ||||||||
1061 | if (AfterBlockPlacement && MLI) { | ||||||||
1062 | ML = MLI->getLoopFor(IBB); | ||||||||
1063 | if (ML && IBB == ML->getHeader()) | ||||||||
1064 | continue; | ||||||||
1065 | } | ||||||||
1066 | |||||||||
1067 | for (MachineBasicBlock *PBB : I->predecessors()) { | ||||||||
1068 | if (MergePotentials.size() == TailMergeThreshold) | ||||||||
1069 | break; | ||||||||
1070 | |||||||||
1071 | if (TriedMerging.count(PBB)) | ||||||||
1072 | continue; | ||||||||
1073 | |||||||||
1074 | // Skip blocks that loop to themselves, can't tail merge these. | ||||||||
1075 | if (PBB == IBB) | ||||||||
1076 | continue; | ||||||||
1077 | |||||||||
1078 | // Visit each predecessor only once. | ||||||||
1079 | if (!UniquePreds.insert(PBB).second) | ||||||||
1080 | continue; | ||||||||
1081 | |||||||||
1082 | // Skip blocks which may jump to a landing pad or jump from an asm blob. | ||||||||
1083 | // Can't tail merge these. | ||||||||
1084 | if (PBB->hasEHPadSuccessor() || PBB->mayHaveInlineAsmBr()) | ||||||||
1085 | continue; | ||||||||
1086 | |||||||||
1087 | // After block placement, only consider predecessors that belong to the | ||||||||
1088 | // same loop as IBB. The reason is the same as above when skipping loop | ||||||||
1089 | // header. | ||||||||
1090 | if (AfterBlockPlacement && MLI) | ||||||||
1091 | if (ML != MLI->getLoopFor(PBB)) | ||||||||
1092 | continue; | ||||||||
1093 | |||||||||
1094 | MachineBasicBlock *TBB = nullptr, *FBB = nullptr; | ||||||||
1095 | SmallVector<MachineOperand, 4> Cond; | ||||||||
1096 | if (!TII->analyzeBranch(*PBB, TBB, FBB, Cond, true)) { | ||||||||
1097 | // Failing case: IBB is the target of a cbr, and we cannot reverse the | ||||||||
1098 | // branch. | ||||||||
1099 | SmallVector<MachineOperand, 4> NewCond(Cond); | ||||||||
1100 | if (!Cond.empty() && TBB == IBB) { | ||||||||
1101 | if (TII->reverseBranchCondition(NewCond)) | ||||||||
1102 | continue; | ||||||||
1103 | // This is the QBB case described above | ||||||||
1104 | if (!FBB) { | ||||||||
1105 | auto Next = ++PBB->getIterator(); | ||||||||
1106 | if (Next != MF.end()) | ||||||||
1107 | FBB = &*Next; | ||||||||
1108 | } | ||||||||
1109 | } | ||||||||
1110 | |||||||||
1111 | // Remove the unconditional branch at the end, if any. | ||||||||
1112 | if (TBB && (Cond.empty() || FBB)) { | ||||||||
1113 | DebugLoc dl = PBB->findBranchDebugLoc(); | ||||||||
1114 | TII->removeBranch(*PBB); | ||||||||
1115 | if (!Cond.empty()) | ||||||||
1116 | // reinsert conditional branch only, for now | ||||||||
1117 | TII->insertBranch(*PBB, (TBB == IBB) ? FBB : TBB, nullptr, | ||||||||
1118 | NewCond, dl); | ||||||||
1119 | } | ||||||||
1120 | |||||||||
1121 | MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(*PBB), PBB)); | ||||||||
1122 | } | ||||||||
1123 | } | ||||||||
1124 | |||||||||
1125 | // If this is a large problem, avoid visiting the same basic blocks multiple | ||||||||
1126 | // times. | ||||||||
1127 | if (MergePotentials.size() == TailMergeThreshold) | ||||||||
1128 | for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) | ||||||||
1129 | TriedMerging.insert(MergePotentials[i].getBlock()); | ||||||||
1130 | |||||||||
1131 | if (MergePotentials.size() >= 2) | ||||||||
1132 | MadeChange |= TryTailMergeBlocks(IBB, PredBB, MinCommonTailLength); | ||||||||
1133 | |||||||||
1134 | // Reinsert an unconditional branch if needed. The 1 below can occur as a | ||||||||
1135 | // result of removing blocks in TryTailMergeBlocks. | ||||||||
1136 | PredBB = &*std::prev(I); // this may have been changed in TryTailMergeBlocks | ||||||||
1137 | if (MergePotentials.size() == 1 && | ||||||||
1138 | MergePotentials.begin()->getBlock() != PredBB) | ||||||||
1139 | FixTail(MergePotentials.begin()->getBlock(), IBB, TII); | ||||||||
1140 | } | ||||||||
1141 | |||||||||
1142 | return MadeChange; | ||||||||
1143 | } | ||||||||
1144 | |||||||||
1145 | void BranchFolder::setCommonTailEdgeWeights(MachineBasicBlock &TailMBB) { | ||||||||
1146 | SmallVector<BlockFrequency, 2> EdgeFreqLs(TailMBB.succ_size()); | ||||||||
1147 | BlockFrequency AccumulatedMBBFreq; | ||||||||
1148 | |||||||||
1149 | // Aggregate edge frequency of successor edge j: | ||||||||
1150 | // edgeFreq(j) = sum (freq(bb) * edgeProb(bb, j)), | ||||||||
1151 | // where bb is a basic block that is in SameTails. | ||||||||
1152 | for (const auto &Src : SameTails) { | ||||||||
1153 | const MachineBasicBlock *SrcMBB = Src.getBlock(); | ||||||||
1154 | BlockFrequency BlockFreq = MBBFreqInfo.getBlockFreq(SrcMBB); | ||||||||
1155 | AccumulatedMBBFreq += BlockFreq; | ||||||||
1156 | |||||||||
1157 | // It is not necessary to recompute edge weights if TailBB has less than two | ||||||||
1158 | // successors. | ||||||||
1159 | if (TailMBB.succ_size() <= 1) | ||||||||
1160 | continue; | ||||||||
1161 | |||||||||
1162 | auto EdgeFreq = EdgeFreqLs.begin(); | ||||||||
1163 | |||||||||
1164 | for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end(); | ||||||||
1165 | SuccI != SuccE; ++SuccI, ++EdgeFreq) | ||||||||
1166 | *EdgeFreq += BlockFreq * MBPI.getEdgeProbability(SrcMBB, *SuccI); | ||||||||
1167 | } | ||||||||
1168 | |||||||||
1169 | MBBFreqInfo.setBlockFreq(&TailMBB, AccumulatedMBBFreq); | ||||||||
1170 | |||||||||
1171 | if (TailMBB.succ_size() <= 1) | ||||||||
1172 | return; | ||||||||
1173 | |||||||||
1174 | auto SumEdgeFreq = | ||||||||
1175 | std::accumulate(EdgeFreqLs.begin(), EdgeFreqLs.end(), BlockFrequency(0)) | ||||||||
1176 | .getFrequency(); | ||||||||
1177 | auto EdgeFreq = EdgeFreqLs.begin(); | ||||||||
1178 | |||||||||
1179 | if (SumEdgeFreq > 0) { | ||||||||
1180 | for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end(); | ||||||||
1181 | SuccI != SuccE; ++SuccI, ++EdgeFreq) { | ||||||||
1182 | auto Prob = BranchProbability::getBranchProbability( | ||||||||
1183 | EdgeFreq->getFrequency(), SumEdgeFreq); | ||||||||
1184 | TailMBB.setSuccProbability(SuccI, Prob); | ||||||||
1185 | } | ||||||||
1186 | } | ||||||||
1187 | } | ||||||||
1188 | |||||||||
1189 | //===----------------------------------------------------------------------===// | ||||||||
1190 | // Branch Optimization | ||||||||
1191 | //===----------------------------------------------------------------------===// | ||||||||
1192 | |||||||||
1193 | bool BranchFolder::OptimizeBranches(MachineFunction &MF) { | ||||||||
1194 | bool MadeChange = false; | ||||||||
1195 | |||||||||
1196 | // Make sure blocks are numbered in order | ||||||||
1197 | MF.RenumberBlocks(); | ||||||||
1198 | // Renumbering blocks alters EH scope membership, recalculate it. | ||||||||
1199 | EHScopeMembership = getEHScopeMembership(MF); | ||||||||
1200 | |||||||||
1201 | for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end(); | ||||||||
1202 | I != E; ) { | ||||||||
1203 | MachineBasicBlock *MBB = &*I++; | ||||||||
1204 | MadeChange |= OptimizeBlock(MBB); | ||||||||
1205 | |||||||||
1206 | // If it is dead, remove it. | ||||||||
1207 | if (MBB->pred_empty()) { | ||||||||
1208 | RemoveDeadBlock(MBB); | ||||||||
1209 | MadeChange = true; | ||||||||
1210 | ++NumDeadBlocks; | ||||||||
1211 | } | ||||||||
1212 | } | ||||||||
1213 | |||||||||
1214 | return MadeChange; | ||||||||
1215 | } | ||||||||
1216 | |||||||||
1217 | // Blocks should be considered empty if they contain only debug info; | ||||||||
1218 | // else the debug info would affect codegen. | ||||||||
1219 | static bool IsEmptyBlock(MachineBasicBlock *MBB) { | ||||||||
1220 | return MBB->getFirstNonDebugInstr(true) == MBB->end(); | ||||||||
1221 | } | ||||||||
1222 | |||||||||
1223 | // Blocks with only debug info and branches should be considered the same | ||||||||
1224 | // as blocks with only branches. | ||||||||
1225 | static bool IsBranchOnlyBlock(MachineBasicBlock *MBB) { | ||||||||
1226 | MachineBasicBlock::iterator I = MBB->getFirstNonDebugInstr(); | ||||||||
1227 | assert(I != MBB->end() && "empty block!")((void)0); | ||||||||
1228 | return I->isBranch(); | ||||||||
1229 | } | ||||||||
1230 | |||||||||
1231 | /// IsBetterFallthrough - Return true if it would be clearly better to | ||||||||
1232 | /// fall-through to MBB1 than to fall through into MBB2. This has to return | ||||||||
1233 | /// a strict ordering, returning true for both (MBB1,MBB2) and (MBB2,MBB1) will | ||||||||
1234 | /// result in infinite loops. | ||||||||
1235 | static bool IsBetterFallthrough(MachineBasicBlock *MBB1, | ||||||||
1236 | MachineBasicBlock *MBB2) { | ||||||||
1237 | assert(MBB1 && MBB2 && "Unknown MachineBasicBlock")((void)0); | ||||||||
1238 | |||||||||
1239 | // Right now, we use a simple heuristic. If MBB2 ends with a call, and | ||||||||
1240 | // MBB1 doesn't, we prefer to fall through into MBB1. This allows us to | ||||||||
1241 | // optimize branches that branch to either a return block or an assert block | ||||||||
1242 | // into a fallthrough to the return. | ||||||||
1243 | MachineBasicBlock::iterator MBB1I = MBB1->getLastNonDebugInstr(); | ||||||||
| |||||||||
1244 | MachineBasicBlock::iterator MBB2I = MBB2->getLastNonDebugInstr(); | ||||||||
1245 | if (MBB1I == MBB1->end() || MBB2I == MBB2->end()) | ||||||||
1246 | return false; | ||||||||
1247 | |||||||||
1248 | // If there is a clear successor ordering we make sure that one block | ||||||||
1249 | // will fall through to the next | ||||||||
1250 | if (MBB1->isSuccessor(MBB2)) return true; | ||||||||
1251 | if (MBB2->isSuccessor(MBB1)) return false; | ||||||||
1252 | |||||||||
1253 | return MBB2I->isCall() && !MBB1I->isCall(); | ||||||||
1254 | } | ||||||||
1255 | |||||||||
1256 | /// getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch | ||||||||
1257 | /// instructions on the block. | ||||||||
1258 | static DebugLoc getBranchDebugLoc(MachineBasicBlock &MBB) { | ||||||||
1259 | MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); | ||||||||
1260 | if (I != MBB.end() && I->isBranch()) | ||||||||
1261 | return I->getDebugLoc(); | ||||||||
1262 | return DebugLoc(); | ||||||||
1263 | } | ||||||||
1264 | |||||||||
1265 | static void copyDebugInfoToPredecessor(const TargetInstrInfo *TII, | ||||||||
1266 | MachineBasicBlock &MBB, | ||||||||
1267 | MachineBasicBlock &PredMBB) { | ||||||||
1268 | auto InsertBefore = PredMBB.getFirstTerminator(); | ||||||||
1269 | for (MachineInstr &MI : MBB.instrs()) | ||||||||
1270 | if (MI.isDebugInstr()) { | ||||||||
1271 | TII->duplicate(PredMBB, InsertBefore, MI); | ||||||||
1272 | LLVM_DEBUG(dbgs() << "Copied debug entity from empty block to pred: "do { } while (false) | ||||||||
1273 | << MI)do { } while (false); | ||||||||
1274 | } | ||||||||
1275 | } | ||||||||
1276 | |||||||||
1277 | static void copyDebugInfoToSuccessor(const TargetInstrInfo *TII, | ||||||||
1278 | MachineBasicBlock &MBB, | ||||||||
1279 | MachineBasicBlock &SuccMBB) { | ||||||||
1280 | auto InsertBefore = SuccMBB.SkipPHIsAndLabels(SuccMBB.begin()); | ||||||||
1281 | for (MachineInstr &MI : MBB.instrs()) | ||||||||
1282 | if (MI.isDebugInstr()) { | ||||||||
1283 | TII->duplicate(SuccMBB, InsertBefore, MI); | ||||||||
1284 | LLVM_DEBUG(dbgs() << "Copied debug entity from empty block to succ: "do { } while (false) | ||||||||
1285 | << MI)do { } while (false); | ||||||||
1286 | } | ||||||||
1287 | } | ||||||||
1288 | |||||||||
1289 | // Try to salvage DBG_VALUE instructions from an otherwise empty block. If such | ||||||||
1290 | // a basic block is removed we would lose the debug information unless we have | ||||||||
1291 | // copied the information to a predecessor/successor. | ||||||||
1292 | // | ||||||||
1293 | // TODO: This function only handles some simple cases. An alternative would be | ||||||||
1294 | // to run a heavier analysis, such as the LiveDebugValues pass, before we do | ||||||||
1295 | // branch folding. | ||||||||
1296 | static void salvageDebugInfoFromEmptyBlock(const TargetInstrInfo *TII, | ||||||||
1297 | MachineBasicBlock &MBB) { | ||||||||
1298 | assert(IsEmptyBlock(&MBB) && "Expected an empty block (except debug info).")((void)0); | ||||||||
1299 | // If this MBB is the only predecessor of a successor it is legal to copy | ||||||||
1300 | // DBG_VALUE instructions to the beginning of the successor. | ||||||||
1301 | for (MachineBasicBlock *SuccBB : MBB.successors()) | ||||||||
1302 | if (SuccBB->pred_size() == 1) | ||||||||
1303 | copyDebugInfoToSuccessor(TII, MBB, *SuccBB); | ||||||||
1304 | // If this MBB is the only successor of a predecessor it is legal to copy the | ||||||||
1305 | // DBG_VALUE instructions to the end of the predecessor (just before the | ||||||||
1306 | // terminators, assuming that the terminator isn't affecting the DBG_VALUE). | ||||||||
1307 | for (MachineBasicBlock *PredBB : MBB.predecessors()) | ||||||||
1308 | if (PredBB->succ_size() == 1) | ||||||||
1309 | copyDebugInfoToPredecessor(TII, MBB, *PredBB); | ||||||||
1310 | } | ||||||||
1311 | |||||||||
1312 | bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) { | ||||||||
1313 | bool MadeChange = false; | ||||||||
1314 | MachineFunction &MF = *MBB->getParent(); | ||||||||
1315 | ReoptimizeBlock: | ||||||||
1316 | |||||||||
1317 | MachineFunction::iterator FallThrough = MBB->getIterator(); | ||||||||
1318 | ++FallThrough; | ||||||||
1319 | |||||||||
1320 | // Make sure MBB and FallThrough belong to the same EH scope. | ||||||||
1321 | bool SameEHScope = true; | ||||||||
1322 | if (!EHScopeMembership.empty() && FallThrough != MF.end()) { | ||||||||
| |||||||||
1323 | auto MBBEHScope = EHScopeMembership.find(MBB); | ||||||||
1324 | assert(MBBEHScope != EHScopeMembership.end())((void)0); | ||||||||
1325 | auto FallThroughEHScope = EHScopeMembership.find(&*FallThrough); | ||||||||
1326 | assert(FallThroughEHScope != EHScopeMembership.end())((void)0); | ||||||||
1327 | SameEHScope = MBBEHScope->second == FallThroughEHScope->second; | ||||||||
1328 | } | ||||||||
1329 | |||||||||
1330 | // Analyze the branch in the current block. As a side-effect, this may cause | ||||||||
1331 | // the block to become empty. | ||||||||
1332 | MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr; | ||||||||
1333 | SmallVector<MachineOperand, 4> CurCond; | ||||||||
1334 | bool CurUnAnalyzable = | ||||||||
1335 | TII->analyzeBranch(*MBB, CurTBB, CurFBB, CurCond, true); | ||||||||
1336 | |||||||||
1337 | // If this block is empty, make everyone use its fall-through, not the block | ||||||||
1338 | // explicitly. Landing pads should not do this since the landing-pad table | ||||||||
1339 | // points to this block. Blocks with their addresses taken shouldn't be | ||||||||
1340 | // optimized away. | ||||||||
1341 | if (IsEmptyBlock(MBB) && !MBB->isEHPad() && !MBB->hasAddressTaken() && | ||||||||
1342 | SameEHScope) { | ||||||||
1343 | salvageDebugInfoFromEmptyBlock(TII, *MBB); | ||||||||
1344 | // Dead block? Leave for cleanup later. | ||||||||
1345 | if (MBB->pred_empty()) return MadeChange; | ||||||||
1346 | |||||||||
1347 | if (FallThrough == MF.end()) { | ||||||||
1348 | // TODO: Simplify preds to not branch here if possible! | ||||||||
1349 | } else if (FallThrough->isEHPad()) { | ||||||||
1350 | // Don't rewrite to a landing pad fallthough. That could lead to the case | ||||||||
1351 | // where a BB jumps to more than one landing pad. | ||||||||
1352 | // TODO: Is it ever worth rewriting predecessors which don't already | ||||||||
1353 | // jump to a landing pad, and so can safely jump to the fallthrough? | ||||||||
1354 | } else if (MBB->isSuccessor(&*FallThrough)) { | ||||||||
1355 | // Rewrite all predecessors of the old block to go to the fallthrough | ||||||||
1356 | // instead. | ||||||||
1357 | while (!MBB->pred_empty()) { | ||||||||
1358 | MachineBasicBlock *Pred = *(MBB->pred_end()-1); | ||||||||
1359 | Pred->ReplaceUsesOfBlockWith(MBB, &*FallThrough); | ||||||||
1360 | } | ||||||||
1361 | // If MBB was the target of a jump table, update jump tables to go to the | ||||||||
1362 | // fallthrough instead. | ||||||||
1363 | if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo()) | ||||||||
1364 | MJTI->ReplaceMBBInJumpTables(MBB, &*FallThrough); | ||||||||
1365 | MadeChange = true; | ||||||||
1366 | } | ||||||||
1367 | return MadeChange; | ||||||||
1368 | } | ||||||||
1369 | |||||||||
1370 | // Check to see if we can simplify the terminator of the block before this | ||||||||
1371 | // one. | ||||||||
1372 | MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB)); | ||||||||
1373 | |||||||||
1374 | MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr; | ||||||||
1375 | SmallVector<MachineOperand, 4> PriorCond; | ||||||||
1376 | bool PriorUnAnalyzable = | ||||||||
1377 | TII->analyzeBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, true); | ||||||||
1378 | if (!PriorUnAnalyzable) { | ||||||||
1379 | // If the previous branch is conditional and both conditions go to the same | ||||||||
1380 | // destination, remove the branch, replacing it with an unconditional one or | ||||||||
1381 | // a fall-through. | ||||||||
1382 | if (PriorTBB && PriorTBB == PriorFBB) { | ||||||||
1383 | DebugLoc dl = getBranchDebugLoc(PrevBB); | ||||||||
1384 | TII->removeBranch(PrevBB); | ||||||||
1385 | PriorCond.clear(); | ||||||||
1386 | if (PriorTBB != MBB) | ||||||||
1387 | TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl); | ||||||||
1388 | MadeChange = true; | ||||||||
1389 | ++NumBranchOpts; | ||||||||
1390 | goto ReoptimizeBlock; | ||||||||
1391 | } | ||||||||
1392 | |||||||||
1393 | // If the previous block unconditionally falls through to this block and | ||||||||
1394 | // this block has no other predecessors, move the contents of this block | ||||||||
1395 | // into the prior block. This doesn't usually happen when SimplifyCFG | ||||||||
1396 | // has been used, but it can happen if tail merging splits a fall-through | ||||||||
1397 | // predecessor of a block. | ||||||||
1398 | // This has to check PrevBB->succ_size() because EH edges are ignored by | ||||||||
1399 | // analyzeBranch. | ||||||||
1400 | if (PriorCond.empty() && !PriorTBB && MBB->pred_size() == 1 && | ||||||||
1401 | PrevBB.succ_size() == 1 && | ||||||||
1402 | !MBB->hasAddressTaken() && !MBB->isEHPad()) { | ||||||||
1403 | LLVM_DEBUG(dbgs() << "\nMerging into block: " << PrevBBdo { } while (false) | ||||||||
1404 | << "From MBB: " << *MBB)do { } while (false); | ||||||||
1405 | // Remove redundant DBG_VALUEs first. | ||||||||
1406 | if (!PrevBB.empty()) { | ||||||||
1407 | MachineBasicBlock::iterator PrevBBIter = PrevBB.end(); | ||||||||
1408 | --PrevBBIter; | ||||||||
1409 | MachineBasicBlock::iterator MBBIter = MBB->begin(); | ||||||||
1410 | // Check if DBG_VALUE at the end of PrevBB is identical to the | ||||||||
1411 | // DBG_VALUE at the beginning of MBB. | ||||||||
1412 | while (PrevBBIter != PrevBB.begin() && MBBIter != MBB->end() | ||||||||
1413 | && PrevBBIter->isDebugInstr() && MBBIter->isDebugInstr()) { | ||||||||
1414 | if (!MBBIter->isIdenticalTo(*PrevBBIter)) | ||||||||
1415 | break; | ||||||||
1416 | MachineInstr &DuplicateDbg = *MBBIter; | ||||||||
1417 | ++MBBIter; -- PrevBBIter; | ||||||||
1418 | DuplicateDbg.eraseFromParent(); | ||||||||
1419 | } | ||||||||
1420 | } | ||||||||
1421 | PrevBB.splice(PrevBB.end(), MBB, MBB->begin(), MBB->end()); | ||||||||
1422 | PrevBB.removeSuccessor(PrevBB.succ_begin()); | ||||||||
1423 | assert(PrevBB.succ_empty())((void)0); | ||||||||
1424 | PrevBB.transferSuccessors(MBB); | ||||||||
1425 | MadeChange = true; | ||||||||
1426 | return MadeChange; | ||||||||
1427 | } | ||||||||
1428 | |||||||||
1429 | // If the previous branch *only* branches to *this* block (conditional or | ||||||||
1430 | // not) remove the branch. | ||||||||
1431 | if (PriorTBB
| ||||||||
1432 | TII->removeBranch(PrevBB); | ||||||||
1433 | MadeChange = true; | ||||||||
1434 | ++NumBranchOpts; | ||||||||
1435 | goto ReoptimizeBlock; | ||||||||
1436 | } | ||||||||
1437 | |||||||||
1438 | // If the prior block branches somewhere else on the condition and here if | ||||||||
1439 | // the condition is false, remove the uncond second branch. | ||||||||
1440 | if (PriorFBB == MBB) { | ||||||||
1441 | DebugLoc dl = getBranchDebugLoc(PrevBB); | ||||||||
1442 | TII->removeBranch(PrevBB); | ||||||||
1443 | TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl); | ||||||||
1444 | MadeChange = true; | ||||||||
1445 | ++NumBranchOpts; | ||||||||
1446 | goto ReoptimizeBlock; | ||||||||
1447 | } | ||||||||
1448 | |||||||||
1449 | // If the prior block branches here on true and somewhere else on false, and | ||||||||
1450 | // if the branch condition is reversible, reverse the branch to create a | ||||||||
1451 | // fall-through. | ||||||||
1452 | if (PriorTBB
| ||||||||
1453 | SmallVector<MachineOperand, 4> NewPriorCond(PriorCond); | ||||||||
1454 | if (!TII->reverseBranchCondition(NewPriorCond)) { | ||||||||
1455 | DebugLoc dl = getBranchDebugLoc(PrevBB); | ||||||||
1456 | TII->removeBranch(PrevBB); | ||||||||
1457 | TII->insertBranch(PrevBB, PriorFBB, nullptr, NewPriorCond, dl); | ||||||||
1458 | MadeChange = true; | ||||||||
1459 | ++NumBranchOpts; | ||||||||
1460 | goto ReoptimizeBlock; | ||||||||
1461 | } | ||||||||
1462 | } | ||||||||
1463 | |||||||||
1464 | // If this block has no successors (e.g. it is a return block or ends with | ||||||||
1465 | // a call to a no-return function like abort or __cxa_throw) and if the pred | ||||||||
1466 | // falls through into this block, and if it would otherwise fall through | ||||||||
1467 | // into the block after this, move this block to the end of the function. | ||||||||
1468 | // | ||||||||
1469 | // We consider it more likely that execution will stay in the function (e.g. | ||||||||
1470 | // due to loops) than it is to exit it. This asserts in loops etc, moving | ||||||||
1471 | // the assert condition out of the loop body. | ||||||||
1472 | if (MBB->succ_empty() && !PriorCond.empty() && !PriorFBB && | ||||||||
1473 | MachineFunction::iterator(PriorTBB) == FallThrough && | ||||||||
1474 | !MBB->canFallThrough()) { | ||||||||
1475 | bool DoTransform = true; | ||||||||
1476 | |||||||||
1477 | // We have to be careful that the succs of PredBB aren't both no-successor | ||||||||
1478 | // blocks. If neither have successors and if PredBB is the second from | ||||||||
1479 | // last block in the function, we'd just keep swapping the two blocks for | ||||||||
1480 | // last. Only do the swap if one is clearly better to fall through than | ||||||||
1481 | // the other. | ||||||||
1482 | if (FallThrough == --MF.end() && | ||||||||
1483 | !IsBetterFallthrough(PriorTBB, MBB)) | ||||||||
1484 | DoTransform = false; | ||||||||
1485 | |||||||||
1486 | if (DoTransform) { | ||||||||
1487 | // Reverse the branch so we will fall through on the previous true cond. | ||||||||
1488 | SmallVector<MachineOperand, 4> NewPriorCond(PriorCond); | ||||||||
1489 | if (!TII->reverseBranchCondition(NewPriorCond)) { | ||||||||
1490 | LLVM_DEBUG(dbgs() << "\nMoving MBB: " << *MBBdo { } while (false) | ||||||||
1491 | << "To make fallthrough to: " << *PriorTBB << "\n")do { } while (false); | ||||||||
1492 | |||||||||
1493 | DebugLoc dl = getBranchDebugLoc(PrevBB); | ||||||||
1494 | TII->removeBranch(PrevBB); | ||||||||
1495 | TII->insertBranch(PrevBB, MBB, nullptr, NewPriorCond, dl); | ||||||||
1496 | |||||||||
1497 | // Move this block to the end of the function. | ||||||||
1498 | MBB->moveAfter(&MF.back()); | ||||||||
1499 | MadeChange = true; | ||||||||
1500 | ++NumBranchOpts; | ||||||||
1501 | return MadeChange; | ||||||||
1502 | } | ||||||||
1503 | } | ||||||||
1504 | } | ||||||||
1505 | } | ||||||||
1506 | |||||||||
1507 | bool OptForSize = | ||||||||
1508 | MF.getFunction().hasOptSize() || | ||||||||
1509 | llvm::shouldOptimizeForSize(MBB, PSI, &MBBFreqInfo); | ||||||||
1510 | if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 && OptForSize) { | ||||||||
1511 | // Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch | ||||||||
1512 | // direction, thereby defeating careful block placement and regressing | ||||||||
1513 | // performance. Therefore, only consider this for optsize functions. | ||||||||
1514 | MachineInstr &TailCall = *MBB->getFirstNonDebugInstr(); | ||||||||
1515 | if (TII->isUnconditionalTailCall(TailCall)) { | ||||||||
1516 | MachineBasicBlock *Pred = *MBB->pred_begin(); | ||||||||
1517 | MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr; | ||||||||
1518 | SmallVector<MachineOperand, 4> PredCond; | ||||||||
1519 | bool PredAnalyzable = | ||||||||
1520 | !TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true); | ||||||||
1521 | |||||||||
1522 | if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB && | ||||||||
1523 | PredTBB != PredFBB) { | ||||||||
1524 | // The predecessor has a conditional branch to this block which consists | ||||||||
1525 | // of only a tail call. Try to fold the tail call into the conditional | ||||||||
1526 | // branch. | ||||||||
1527 | if (TII->canMakeTailCallConditional(PredCond, TailCall)) { | ||||||||
1528 | // TODO: It would be nice if analyzeBranch() could provide a pointer | ||||||||
1529 | // to the branch instruction so replaceBranchWithTailCall() doesn't | ||||||||
1530 | // have to search for it. | ||||||||
1531 | TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall); | ||||||||
1532 | ++NumTailCalls; | ||||||||
1533 | Pred->removeSuccessor(MBB); | ||||||||
1534 | MadeChange = true; | ||||||||
1535 | return MadeChange; | ||||||||
1536 | } | ||||||||
1537 | } | ||||||||
1538 | // If the predecessor is falling through to this block, we could reverse | ||||||||
1539 | // the branch condition and fold the tail call into that. However, after | ||||||||
1540 | // that we might have to re-arrange the CFG to fall through to the other | ||||||||
1541 | // block and there is a high risk of regressing code size rather than | ||||||||
1542 | // improving it. | ||||||||
1543 | } | ||||||||
1544 | } | ||||||||
1545 | |||||||||
1546 | if (!CurUnAnalyzable) { | ||||||||
1547 | // If this is a two-way branch, and the FBB branches to this block, reverse | ||||||||
1548 | // the condition so the single-basic-block loop is faster. Instead of: | ||||||||
1549 | // Loop: xxx; jcc Out; jmp Loop | ||||||||
1550 | // we want: | ||||||||
1551 | // Loop: xxx; jncc Loop; jmp Out | ||||||||
1552 | if (CurTBB && CurFBB && CurFBB == MBB && CurTBB != MBB) { | ||||||||
1553 | SmallVector<MachineOperand, 4> NewCond(CurCond); | ||||||||
1554 | if (!TII->reverseBranchCondition(NewCond)) { | ||||||||
1555 | DebugLoc dl = getBranchDebugLoc(*MBB); | ||||||||
1556 | TII->removeBranch(*MBB); | ||||||||
1557 | TII->insertBranch(*MBB, CurFBB, CurTBB, NewCond, dl); | ||||||||
1558 | MadeChange = true; | ||||||||
1559 | ++NumBranchOpts; | ||||||||
1560 | goto ReoptimizeBlock; | ||||||||
1561 | } | ||||||||
1562 | } | ||||||||
1563 | |||||||||
1564 | // If this branch is the only thing in its block, see if we can forward | ||||||||
1565 | // other blocks across it. | ||||||||
1566 | if (CurTBB && CurCond.empty() && !CurFBB && | ||||||||
1567 | IsBranchOnlyBlock(MBB) && CurTBB != MBB && | ||||||||
1568 | !MBB->hasAddressTaken() && !MBB->isEHPad()) { | ||||||||
1569 | DebugLoc dl = getBranchDebugLoc(*MBB); | ||||||||
1570 | // This block may contain just an unconditional branch. Because there can | ||||||||
1571 | // be 'non-branch terminators' in the block, try removing the branch and | ||||||||
1572 | // then seeing if the block is empty. | ||||||||
1573 | TII->removeBranch(*MBB); | ||||||||
1574 | // If the only things remaining in the block are debug info, remove these | ||||||||
1575 | // as well, so this will behave the same as an empty block in non-debug | ||||||||
1576 | // mode. | ||||||||
1577 | if (IsEmptyBlock(MBB)) { | ||||||||
1578 | // Make the block empty, losing the debug info (we could probably | ||||||||
1579 | // improve this in some cases.) | ||||||||
1580 | MBB->erase(MBB->begin(), MBB->end()); | ||||||||
1581 | } | ||||||||
1582 | // If this block is just an unconditional branch to CurTBB, we can | ||||||||
1583 | // usually completely eliminate the block. The only case we cannot | ||||||||
1584 | // completely eliminate the block is when the block before this one | ||||||||
1585 | // falls through into MBB and we can't understand the prior block's branch | ||||||||
1586 | // condition. | ||||||||
1587 | if (MBB->empty()) { | ||||||||
1588 | bool PredHasNoFallThrough = !PrevBB.canFallThrough(); | ||||||||
1589 | if (PredHasNoFallThrough || !PriorUnAnalyzable || | ||||||||
1590 | !PrevBB.isSuccessor(MBB)) { | ||||||||
1591 | // If the prior block falls through into us, turn it into an | ||||||||
1592 | // explicit branch to us to make updates simpler. | ||||||||
1593 | if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) && | ||||||||
1594 | PriorTBB != MBB && PriorFBB != MBB) { | ||||||||
1595 | if (!PriorTBB) { | ||||||||
1596 | assert(PriorCond.empty() && !PriorFBB &&((void)0) | ||||||||
1597 | "Bad branch analysis")((void)0); | ||||||||
1598 | PriorTBB = MBB; | ||||||||
1599 | } else { | ||||||||
1600 | assert(!PriorFBB && "Machine CFG out of date!")((void)0); | ||||||||
1601 | PriorFBB = MBB; | ||||||||
1602 | } | ||||||||
1603 | DebugLoc pdl = getBranchDebugLoc(PrevBB); | ||||||||
1604 | TII->removeBranch(PrevBB); | ||||||||
1605 | TII->insertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, pdl); | ||||||||
1606 | } | ||||||||
1607 | |||||||||
1608 | // Iterate through all the predecessors, revectoring each in-turn. | ||||||||
1609 | size_t PI = 0; | ||||||||
1610 | bool DidChange = false; | ||||||||
1611 | bool HasBranchToSelf = false; | ||||||||
1612 | while(PI != MBB->pred_size()) { | ||||||||
1613 | MachineBasicBlock *PMBB = *(MBB->pred_begin() + PI); | ||||||||
1614 | if (PMBB == MBB) { | ||||||||
1615 | // If this block has an uncond branch to itself, leave it. | ||||||||
1616 | ++PI; | ||||||||
1617 | HasBranchToSelf = true; | ||||||||
1618 | } else { | ||||||||
1619 | DidChange = true; | ||||||||
1620 | PMBB->ReplaceUsesOfBlockWith(MBB, CurTBB); | ||||||||
1621 | // If this change resulted in PMBB ending in a conditional | ||||||||
1622 | // branch where both conditions go to the same destination, | ||||||||
1623 | // change this to an unconditional branch. | ||||||||
1624 | MachineBasicBlock *NewCurTBB = nullptr, *NewCurFBB = nullptr; | ||||||||
1625 | SmallVector<MachineOperand, 4> NewCurCond; | ||||||||
1626 | bool NewCurUnAnalyzable = TII->analyzeBranch( | ||||||||
1627 | *PMBB, NewCurTBB, NewCurFBB, NewCurCond, true); | ||||||||
1628 | if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) { | ||||||||
1629 | DebugLoc pdl = getBranchDebugLoc(*PMBB); | ||||||||
1630 | TII->removeBranch(*PMBB); | ||||||||
1631 | NewCurCond.clear(); | ||||||||
1632 | TII->insertBranch(*PMBB, NewCurTBB, nullptr, NewCurCond, pdl); | ||||||||
1633 | MadeChange = true; | ||||||||
1634 | ++NumBranchOpts; | ||||||||
1635 | } | ||||||||
1636 | } | ||||||||
1637 | } | ||||||||
1638 | |||||||||
1639 | // Change any jumptables to go to the new MBB. | ||||||||
1640 | if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo()) | ||||||||
1641 | MJTI->ReplaceMBBInJumpTables(MBB, CurTBB); | ||||||||
1642 | if (DidChange) { | ||||||||
1643 | ++NumBranchOpts; | ||||||||
1644 | MadeChange = true; | ||||||||
1645 | if (!HasBranchToSelf) return MadeChange; | ||||||||
1646 | } | ||||||||
1647 | } | ||||||||
1648 | } | ||||||||
1649 | |||||||||
1650 | // Add the branch back if the block is more than just an uncond branch. | ||||||||
1651 | TII->insertBranch(*MBB, CurTBB, nullptr, CurCond, dl); | ||||||||
1652 | } | ||||||||
1653 | } | ||||||||
1654 | |||||||||
1655 | // If the prior block doesn't fall through into this block, and if this | ||||||||
1656 | // block doesn't fall through into some other block, see if we can find a | ||||||||
1657 | // place to move this block where a fall-through will happen. | ||||||||
1658 | if (!PrevBB.canFallThrough()) { | ||||||||
1659 | // Now we know that there was no fall-through into this block, check to | ||||||||
1660 | // see if it has a fall-through into its successor. | ||||||||
1661 | bool CurFallsThru = MBB->canFallThrough(); | ||||||||
1662 | |||||||||
1663 | if (!MBB->isEHPad()) { | ||||||||
1664 | // Check all the predecessors of this block. If one of them has no fall | ||||||||
1665 | // throughs, and analyzeBranch thinks it _could_ fallthrough to this | ||||||||
1666 | // block, move this block right after it. | ||||||||
1667 | for (MachineBasicBlock *PredBB : MBB->predecessors()) { | ||||||||
1668 | // Analyze the branch at the end of the pred. | ||||||||
1669 | MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr; | ||||||||
1670 | SmallVector<MachineOperand, 4> PredCond; | ||||||||
1671 | if (PredBB != MBB && !PredBB->canFallThrough() && | ||||||||
1672 | !TII->analyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true) && | ||||||||
1673 | (PredTBB == MBB || PredFBB == MBB) && | ||||||||
1674 | (!CurFallsThru || !CurTBB || !CurFBB) && | ||||||||
1675 | (!CurFallsThru || MBB->getNumber() >= PredBB->getNumber())) { | ||||||||
1676 | // If the current block doesn't fall through, just move it. | ||||||||
1677 | // If the current block can fall through and does not end with a | ||||||||
1678 | // conditional branch, we need to append an unconditional jump to | ||||||||
1679 | // the (current) next block. To avoid a possible compile-time | ||||||||
1680 | // infinite loop, move blocks only backward in this case. | ||||||||
1681 | // Also, if there are already 2 branches here, we cannot add a third; | ||||||||
1682 | // this means we have the case | ||||||||
1683 | // Bcc next | ||||||||
1684 | // B elsewhere | ||||||||
1685 | // next: | ||||||||
1686 | if (CurFallsThru) { | ||||||||
1687 | MachineBasicBlock *NextBB = &*std::next(MBB->getIterator()); | ||||||||
1688 | CurCond.clear(); | ||||||||
1689 | TII->insertBranch(*MBB, NextBB, nullptr, CurCond, DebugLoc()); | ||||||||
1690 | } | ||||||||
1691 | MBB->moveAfter(PredBB); | ||||||||
1692 | MadeChange = true; | ||||||||
1693 | goto ReoptimizeBlock; | ||||||||
1694 | } | ||||||||
1695 | } | ||||||||
1696 | } | ||||||||
1697 | |||||||||
1698 | if (!CurFallsThru) { | ||||||||
1699 | // Check analyzable branch-successors to see if we can move this block | ||||||||
1700 | // before one. | ||||||||
1701 | if (!CurUnAnalyzable) { | ||||||||
1702 | for (MachineBasicBlock *SuccBB : {CurFBB, CurTBB}) { | ||||||||
1703 | if (!SuccBB) | ||||||||
1704 | continue; | ||||||||
1705 | // Analyze the branch at the end of the block before the succ. | ||||||||
1706 | MachineFunction::iterator SuccPrev = --SuccBB->getIterator(); | ||||||||
1707 | |||||||||
1708 | // If this block doesn't already fall-through to that successor, and | ||||||||
1709 | // if the succ doesn't already have a block that can fall through into | ||||||||
1710 | // it, we can arrange for the fallthrough to happen. | ||||||||
1711 | if (SuccBB != MBB && &*SuccPrev != MBB && | ||||||||
1712 | !SuccPrev->canFallThrough()) { | ||||||||
1713 | MBB->moveBefore(SuccBB); | ||||||||
1714 | MadeChange = true; | ||||||||
1715 | goto ReoptimizeBlock; | ||||||||
1716 | } | ||||||||
1717 | } | ||||||||
1718 | } | ||||||||
1719 | |||||||||
1720 | // Okay, there is no really great place to put this block. If, however, | ||||||||
1721 | // the block before this one would be a fall-through if this block were | ||||||||
1722 | // removed, move this block to the end of the function. There is no real | ||||||||
1723 | // advantage in "falling through" to an EH block, so we don't want to | ||||||||
1724 | // perform this transformation for that case. | ||||||||
1725 | // | ||||||||
1726 | // Also, Windows EH introduced the possibility of an arbitrary number of | ||||||||
1727 | // successors to a given block. The analyzeBranch call does not consider | ||||||||
1728 | // exception handling and so we can get in a state where a block | ||||||||
1729 | // containing a call is followed by multiple EH blocks that would be | ||||||||
1730 | // rotated infinitely at the end of the function if the transformation | ||||||||
1731 | // below were performed for EH "FallThrough" blocks. Therefore, even if | ||||||||
1732 | // that appears not to be happening anymore, we should assume that it is | ||||||||
1733 | // possible and not remove the "!FallThrough()->isEHPad" condition below. | ||||||||
1734 | MachineBasicBlock *PrevTBB = nullptr, *PrevFBB = nullptr; | ||||||||
1735 | SmallVector<MachineOperand, 4> PrevCond; | ||||||||
1736 | if (FallThrough != MF.end() && | ||||||||
1737 | !FallThrough->isEHPad() && | ||||||||
1738 | !TII->analyzeBranch(PrevBB, PrevTBB, PrevFBB, PrevCond, true) && | ||||||||
1739 | PrevBB.isSuccessor(&*FallThrough)) { | ||||||||
1740 | MBB->moveAfter(&MF.back()); | ||||||||
1741 | MadeChange = true; | ||||||||
1742 | return MadeChange; | ||||||||
1743 | } | ||||||||
1744 | } | ||||||||
1745 | } | ||||||||
1746 | |||||||||
1747 | return MadeChange; | ||||||||
1748 | } | ||||||||
1749 | |||||||||
1750 | //===----------------------------------------------------------------------===// | ||||||||
1751 | // Hoist Common Code | ||||||||
1752 | //===----------------------------------------------------------------------===// | ||||||||
1753 | |||||||||
1754 | bool BranchFolder::HoistCommonCode(MachineFunction &MF) { | ||||||||
1755 | bool MadeChange = false; | ||||||||
1756 | for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ) { | ||||||||
1757 | MachineBasicBlock *MBB = &*I++; | ||||||||
1758 | MadeChange |= HoistCommonCodeInSuccs(MBB); | ||||||||
1759 | } | ||||||||
1760 | |||||||||
1761 | return MadeChange; | ||||||||
1762 | } | ||||||||
1763 | |||||||||
1764 | /// findFalseBlock - BB has a fallthrough. Find its 'false' successor given | ||||||||
1765 | /// its 'true' successor. | ||||||||
1766 | static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB, | ||||||||
1767 | MachineBasicBlock *TrueBB) { | ||||||||
1768 | for (MachineBasicBlock *SuccBB : BB->successors()) | ||||||||
1769 | if (SuccBB != TrueBB) | ||||||||
1770 | return SuccBB; | ||||||||
1771 | return nullptr; | ||||||||
1772 | } | ||||||||
1773 | |||||||||
1774 | template <class Container> | ||||||||
1775 | static void addRegAndItsAliases(Register Reg, const TargetRegisterInfo *TRI, | ||||||||
1776 | Container &Set) { | ||||||||
1777 | if (Reg.isPhysical()) { | ||||||||
1778 | for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) | ||||||||
1779 | Set.insert(*AI); | ||||||||
1780 | } else { | ||||||||
1781 | Set.insert(Reg); | ||||||||
1782 | } | ||||||||
1783 | } | ||||||||
1784 | |||||||||
1785 | /// findHoistingInsertPosAndDeps - Find the location to move common instructions | ||||||||
1786 | /// in successors to. The location is usually just before the terminator, | ||||||||
1787 | /// however if the terminator is a conditional branch and its previous | ||||||||
1788 | /// instruction is the flag setting instruction, the previous instruction is | ||||||||
1789 | /// the preferred location. This function also gathers uses and defs of the | ||||||||
1790 | /// instructions from the insertion point to the end of the block. The data is | ||||||||
1791 | /// used by HoistCommonCodeInSuccs to ensure safety. | ||||||||
1792 | static | ||||||||
1793 | MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB, | ||||||||
1794 | const TargetInstrInfo *TII, | ||||||||
1795 | const TargetRegisterInfo *TRI, | ||||||||
1796 | SmallSet<Register, 4> &Uses, | ||||||||
1797 | SmallSet<Register, 4> &Defs) { | ||||||||
1798 | MachineBasicBlock::iterator Loc = MBB->getFirstTerminator(); | ||||||||
1799 | if (!TII->isUnpredicatedTerminator(*Loc)) | ||||||||
1800 | return MBB->end(); | ||||||||
1801 | |||||||||
1802 | for (const MachineOperand &MO : Loc->operands()) { | ||||||||
1803 | if (!MO.isReg()) | ||||||||
1804 | continue; | ||||||||
1805 | Register Reg = MO.getReg(); | ||||||||
1806 | if (!Reg) | ||||||||
1807 | continue; | ||||||||
1808 | if (MO.isUse()) { | ||||||||
1809 | addRegAndItsAliases(Reg, TRI, Uses); | ||||||||
1810 | } else { | ||||||||
1811 | if (!MO.isDead()) | ||||||||
1812 | // Don't try to hoist code in the rare case the terminator defines a | ||||||||
1813 | // register that is later used. | ||||||||
1814 | return MBB->end(); | ||||||||
1815 | |||||||||
1816 | // If the terminator defines a register, make sure we don't hoist | ||||||||
1817 | // the instruction whose def might be clobbered by the terminator. | ||||||||
1818 | addRegAndItsAliases(Reg, TRI, Defs); | ||||||||
1819 | } | ||||||||
1820 | } | ||||||||
1821 | |||||||||
1822 | if (Uses.empty()) | ||||||||
1823 | return Loc; | ||||||||
1824 | // If the terminator is the only instruction in the block and Uses is not | ||||||||
1825 | // empty (or we would have returned above), we can still safely hoist | ||||||||
1826 | // instructions just before the terminator as long as the Defs/Uses are not | ||||||||
1827 | // violated (which is checked in HoistCommonCodeInSuccs). | ||||||||
1828 | if (Loc == MBB->begin()) | ||||||||
1829 | return Loc; | ||||||||
1830 | |||||||||
1831 | // The terminator is probably a conditional branch, try not to separate the | ||||||||
1832 | // branch from condition setting instruction. | ||||||||
1833 | MachineBasicBlock::iterator PI = prev_nodbg(Loc, MBB->begin()); | ||||||||
1834 | |||||||||
1835 | bool IsDef = false; | ||||||||
1836 | for (const MachineOperand &MO : PI->operands()) { | ||||||||
1837 | // If PI has a regmask operand, it is probably a call. Separate away. | ||||||||
1838 | if (MO.isRegMask()) | ||||||||
1839 | return Loc; | ||||||||
1840 | if (!MO.isReg() || MO.isUse()) | ||||||||
1841 | continue; | ||||||||
1842 | Register Reg = MO.getReg(); | ||||||||
1843 | if (!Reg) | ||||||||
1844 | continue; | ||||||||
1845 | if (Uses.count(Reg)) { | ||||||||
1846 | IsDef = true; | ||||||||
1847 | break; | ||||||||
1848 | } | ||||||||
1849 | } | ||||||||
1850 | if (!IsDef) | ||||||||
1851 | // The condition setting instruction is not just before the conditional | ||||||||
1852 | // branch. | ||||||||
1853 | return Loc; | ||||||||
1854 | |||||||||
1855 | // Be conservative, don't insert instruction above something that may have | ||||||||
1856 | // side-effects. And since it's potentially bad to separate flag setting | ||||||||
1857 | // instruction from the conditional branch, just abort the optimization | ||||||||
1858 | // completely. | ||||||||
1859 | // Also avoid moving code above predicated instruction since it's hard to | ||||||||
1860 | // reason about register liveness with predicated instruction. | ||||||||
1861 | bool DontMoveAcrossStore = true; | ||||||||
1862 | if (!PI->isSafeToMove(nullptr, DontMoveAcrossStore) || TII->isPredicated(*PI)) | ||||||||
1863 | return MBB->end(); | ||||||||
1864 | |||||||||
1865 | // Find out what registers are live. Note this routine is ignoring other live | ||||||||
1866 | // registers which are only used by instructions in successor blocks. | ||||||||
1867 | for (const MachineOperand &MO : PI->operands()) { | ||||||||
1868 | if (!MO.isReg()) | ||||||||
1869 | continue; | ||||||||
1870 | Register Reg = MO.getReg(); | ||||||||
1871 | if (!Reg) | ||||||||
1872 | continue; | ||||||||
1873 | if (MO.isUse()) { | ||||||||
1874 | addRegAndItsAliases(Reg, TRI, Uses); | ||||||||
1875 | } else { | ||||||||
1876 | if (Uses.erase(Reg)) { | ||||||||
1877 | if (Register::isPhysicalRegister(Reg)) { | ||||||||
1878 | for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) | ||||||||
1879 | Uses.erase(*SubRegs); // Use sub-registers to be conservative | ||||||||
1880 | } | ||||||||
1881 | } | ||||||||
1882 | addRegAndItsAliases(Reg, TRI, Defs); | ||||||||
1883 | } | ||||||||
1884 | } | ||||||||
1885 | |||||||||
1886 | return PI; | ||||||||
1887 | } | ||||||||
1888 | |||||||||
1889 | bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) { | ||||||||
1890 | MachineBasicBlock *TBB = nullptr, *FBB = nullptr; | ||||||||
1891 | SmallVector<MachineOperand, 4> Cond; | ||||||||
1892 | if (TII->analyzeBranch(*MBB, TBB, FBB, Cond, true) || !TBB || Cond.empty()) | ||||||||
1893 | return false; | ||||||||
1894 | |||||||||
1895 | if (!FBB) FBB = findFalseBlock(MBB, TBB); | ||||||||
1896 | if (!FBB) | ||||||||
1897 | // Malformed bcc? True and false blocks are the same? | ||||||||
1898 | return false; | ||||||||
1899 | |||||||||
1900 | // Restrict the optimization to cases where MBB is the only predecessor, | ||||||||
1901 | // it is an obvious win. | ||||||||
1902 | if (TBB->pred_size() > 1 || FBB->pred_size() > 1) | ||||||||
1903 | return false; | ||||||||
1904 | |||||||||
1905 | // Find a suitable position to hoist the common instructions to. Also figure | ||||||||
1906 | // out which registers are used or defined by instructions from the insertion | ||||||||
1907 | // point to the end of the block. | ||||||||
1908 | SmallSet<Register, 4> Uses, Defs; | ||||||||
1909 | MachineBasicBlock::iterator Loc = | ||||||||
1910 | findHoistingInsertPosAndDeps(MBB, TII, TRI, Uses, Defs); | ||||||||
1911 | if (Loc == MBB->end()) | ||||||||
1912 | return false; | ||||||||
1913 | |||||||||
1914 | bool HasDups = false; | ||||||||
1915 | SmallSet<Register, 4> ActiveDefsSet, AllDefsSet; | ||||||||
1916 | MachineBasicBlock::iterator TIB = TBB->begin(); | ||||||||
1917 | MachineBasicBlock::iterator FIB = FBB->begin(); | ||||||||
1918 | MachineBasicBlock::iterator TIE = TBB->end(); | ||||||||
1919 | MachineBasicBlock::iterator FIE = FBB->end(); | ||||||||
1920 | while (TIB != TIE && FIB != FIE) { | ||||||||
1921 | // Skip dbg_value instructions. These do not count. | ||||||||
1922 | TIB = skipDebugInstructionsForward(TIB, TIE, false); | ||||||||
1923 | FIB = skipDebugInstructionsForward(FIB, FIE, false); | ||||||||
1924 | if (TIB == TIE || FIB == FIE) | ||||||||
1925 | break; | ||||||||
1926 | |||||||||
1927 | if (!TIB->isIdenticalTo(*FIB, MachineInstr::CheckKillDead)) | ||||||||
1928 | break; | ||||||||
1929 | |||||||||
1930 | if (TII->isPredicated(*TIB)) | ||||||||
1931 | // Hard to reason about register liveness with predicated instruction. | ||||||||
1932 | break; | ||||||||
1933 | |||||||||
1934 | bool IsSafe = true; | ||||||||
1935 | for (MachineOperand &MO : TIB->operands()) { | ||||||||
1936 | // Don't attempt to hoist instructions with register masks. | ||||||||
1937 | if (MO.isRegMask()) { | ||||||||
1938 | IsSafe = false; | ||||||||
1939 | break; | ||||||||
1940 | } | ||||||||
1941 | if (!MO.isReg()) | ||||||||
1942 | continue; | ||||||||
1943 | Register Reg = MO.getReg(); | ||||||||
1944 | if (!Reg) | ||||||||
1945 | continue; | ||||||||
1946 | if (MO.isDef()) { | ||||||||
1947 | if (Uses.count(Reg)) { | ||||||||
1948 | // Avoid clobbering a register that's used by the instruction at | ||||||||
1949 | // the point of insertion. | ||||||||
1950 | IsSafe = false; | ||||||||
1951 | break; | ||||||||
1952 | } | ||||||||
1953 | |||||||||
1954 | if (Defs.count(Reg) && !MO.isDead()) { | ||||||||
1955 | // Don't hoist the instruction if the def would be clobber by the | ||||||||
1956 | // instruction at the point insertion. FIXME: This is overly | ||||||||
1957 | // conservative. It should be possible to hoist the instructions | ||||||||
1958 | // in BB2 in the following example: | ||||||||
1959 | // BB1: | ||||||||
1960 | // r1, eflag = op1 r2, r3 | ||||||||
1961 | // brcc eflag | ||||||||
1962 | // | ||||||||
1963 | // BB2: | ||||||||
1964 | // r1 = op2, ... | ||||||||
1965 | // = op3, killed r1 | ||||||||
1966 | IsSafe = false; | ||||||||
1967 | break; | ||||||||
1968 | } | ||||||||
1969 | } else if (!ActiveDefsSet.count(Reg)) { | ||||||||
1970 | if (Defs.count(Reg)) { | ||||||||
1971 | // Use is defined by the instruction at the point of insertion. | ||||||||
1972 | IsSafe = false; | ||||||||
1973 | break; | ||||||||
1974 | } | ||||||||
1975 | |||||||||
1976 | if (MO.isKill() && Uses.count(Reg)) | ||||||||
1977 | // Kills a register that's read by the instruction at the point of | ||||||||
1978 | // insertion. Remove the kill marker. | ||||||||
1979 | MO.setIsKill(false); | ||||||||
1980 | } | ||||||||
1981 | } | ||||||||
1982 | if (!IsSafe) | ||||||||
1983 | break; | ||||||||
1984 | |||||||||
1985 | bool DontMoveAcrossStore = true; | ||||||||
1986 | if (!TIB->isSafeToMove(nullptr, DontMoveAcrossStore)) | ||||||||
1987 | break; | ||||||||
1988 | |||||||||
1989 | // Remove kills from ActiveDefsSet, these registers had short live ranges. | ||||||||
1990 | for (const MachineOperand &MO : TIB->operands()) { | ||||||||
1991 | if (!MO.isReg() || !MO.isUse() || !MO.isKill()) | ||||||||
1992 | continue; | ||||||||
1993 | Register Reg = MO.getReg(); | ||||||||
1994 | if (!Reg) | ||||||||
1995 | continue; | ||||||||
1996 | if (!AllDefsSet.count(Reg)) { | ||||||||
1997 | continue; | ||||||||
1998 | } | ||||||||
1999 | if (Reg.isPhysical()) { | ||||||||
2000 | for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) | ||||||||
2001 | ActiveDefsSet.erase(*AI); | ||||||||
2002 | } else { | ||||||||
2003 | ActiveDefsSet.erase(Reg); | ||||||||
2004 | } | ||||||||
2005 | } | ||||||||
2006 | |||||||||
2007 | // Track local defs so we can update liveins. | ||||||||
2008 | for (const MachineOperand &MO : TIB->operands()) { | ||||||||
2009 | if (!MO.isReg() || !MO.isDef() || MO.isDead()) | ||||||||
2010 | continue; | ||||||||
2011 | Register Reg = MO.getReg(); | ||||||||
2012 | if (!Reg || Reg.isVirtual()) | ||||||||
2013 | continue; | ||||||||
2014 | addRegAndItsAliases(Reg, TRI, ActiveDefsSet); | ||||||||
2015 | addRegAndItsAliases(Reg, TRI, AllDefsSet); | ||||||||
2016 | } | ||||||||
2017 | |||||||||
2018 | HasDups = true; | ||||||||
2019 | ++TIB; | ||||||||
2020 | ++FIB; | ||||||||
2021 | } | ||||||||
2022 | |||||||||
2023 | if (!HasDups) | ||||||||
2024 | return false; | ||||||||
2025 | |||||||||
2026 | MBB->splice(Loc, TBB, TBB->begin(), TIB); | ||||||||
2027 | FBB->erase(FBB->begin(), FIB); | ||||||||
2028 | |||||||||
2029 | if (UpdateLiveIns) { | ||||||||
2030 | recomputeLiveIns(*TBB); | ||||||||
2031 | recomputeLiveIns(*FBB); | ||||||||
2032 | } | ||||||||
2033 | |||||||||
2034 | ++NumHoist; | ||||||||
2035 | return true; | ||||||||
2036 | } |
1 | //===- llvm/CodeGen/MachineInstrBundleIterator.h ----------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Defines an iterator class that bundles MachineInstr. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H |
14 | #define LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H |
15 | |
16 | #include "llvm/ADT/ilist.h" |
17 | #include "llvm/ADT/simple_ilist.h" |
18 | #include <cassert> |
19 | #include <iterator> |
20 | #include <type_traits> |
21 | |
22 | namespace llvm { |
23 | |
24 | template <class T, bool IsReverse> struct MachineInstrBundleIteratorTraits; |
25 | template <class T> struct MachineInstrBundleIteratorTraits<T, false> { |
26 | using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>; |
27 | using instr_iterator = typename list_type::iterator; |
28 | using nonconst_instr_iterator = typename list_type::iterator; |
29 | using const_instr_iterator = typename list_type::const_iterator; |
30 | }; |
31 | template <class T> struct MachineInstrBundleIteratorTraits<T, true> { |
32 | using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>; |
33 | using instr_iterator = typename list_type::reverse_iterator; |
34 | using nonconst_instr_iterator = typename list_type::reverse_iterator; |
35 | using const_instr_iterator = typename list_type::const_reverse_iterator; |
36 | }; |
37 | template <class T> struct MachineInstrBundleIteratorTraits<const T, false> { |
38 | using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>; |
39 | using instr_iterator = typename list_type::const_iterator; |
40 | using nonconst_instr_iterator = typename list_type::iterator; |
41 | using const_instr_iterator = typename list_type::const_iterator; |
42 | }; |
43 | template <class T> struct MachineInstrBundleIteratorTraits<const T, true> { |
44 | using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>; |
45 | using instr_iterator = typename list_type::const_reverse_iterator; |
46 | using nonconst_instr_iterator = typename list_type::reverse_iterator; |
47 | using const_instr_iterator = typename list_type::const_reverse_iterator; |
48 | }; |
49 | |
50 | template <bool IsReverse> struct MachineInstrBundleIteratorHelper; |
51 | template <> struct MachineInstrBundleIteratorHelper<false> { |
52 | /// Get the beginning of the current bundle. |
53 | template <class Iterator> static Iterator getBundleBegin(Iterator I) { |
54 | if (!I.isEnd()) |
55 | while (I->isBundledWithPred()) |
56 | --I; |
57 | return I; |
58 | } |
59 | |
60 | /// Get the final node of the current bundle. |
61 | template <class Iterator> static Iterator getBundleFinal(Iterator I) { |
62 | if (!I.isEnd()) |
63 | while (I->isBundledWithSucc()) |
64 | ++I; |
65 | return I; |
66 | } |
67 | |
68 | /// Increment forward ilist iterator. |
69 | template <class Iterator> static void increment(Iterator &I) { |
70 | I = std::next(getBundleFinal(I)); |
71 | } |
72 | |
73 | /// Decrement forward ilist iterator. |
74 | template <class Iterator> static void decrement(Iterator &I) { |
75 | I = getBundleBegin(std::prev(I)); |
76 | } |
77 | }; |
78 | |
79 | template <> struct MachineInstrBundleIteratorHelper<true> { |
80 | /// Get the beginning of the current bundle. |
81 | template <class Iterator> static Iterator getBundleBegin(Iterator I) { |
82 | return MachineInstrBundleIteratorHelper<false>::getBundleBegin( |
83 | I.getReverse()) |
84 | .getReverse(); |
85 | } |
86 | |
87 | /// Get the final node of the current bundle. |
88 | template <class Iterator> static Iterator getBundleFinal(Iterator I) { |
89 | return MachineInstrBundleIteratorHelper<false>::getBundleFinal( |
90 | I.getReverse()) |
91 | .getReverse(); |
92 | } |
93 | |
94 | /// Increment reverse ilist iterator. |
95 | template <class Iterator> static void increment(Iterator &I) { |
96 | I = getBundleBegin(std::next(I)); |
97 | } |
98 | |
99 | /// Decrement reverse ilist iterator. |
100 | template <class Iterator> static void decrement(Iterator &I) { |
101 | I = std::prev(getBundleFinal(I)); |
102 | } |
103 | }; |
104 | |
105 | /// MachineBasicBlock iterator that automatically skips over MIs that are |
106 | /// inside bundles (i.e. walk top level MIs only). |
107 | template <typename Ty, bool IsReverse = false> |
108 | class MachineInstrBundleIterator : MachineInstrBundleIteratorHelper<IsReverse> { |
109 | using Traits = MachineInstrBundleIteratorTraits<Ty, IsReverse>; |
110 | using instr_iterator = typename Traits::instr_iterator; |
111 | |
112 | instr_iterator MII; |
113 | |
114 | public: |
115 | using value_type = typename instr_iterator::value_type; |
116 | using difference_type = typename instr_iterator::difference_type; |
117 | using pointer = typename instr_iterator::pointer; |
118 | using reference = typename instr_iterator::reference; |
119 | using const_pointer = typename instr_iterator::const_pointer; |
120 | using const_reference = typename instr_iterator::const_reference; |
121 | using iterator_category = std::bidirectional_iterator_tag; |
122 | |
123 | private: |
124 | using nonconst_instr_iterator = typename Traits::nonconst_instr_iterator; |
125 | using const_instr_iterator = typename Traits::const_instr_iterator; |
126 | using nonconst_iterator = |
127 | MachineInstrBundleIterator<typename nonconst_instr_iterator::value_type, |
128 | IsReverse>; |
129 | using reverse_iterator = MachineInstrBundleIterator<Ty, !IsReverse>; |
130 | |
131 | public: |
132 | MachineInstrBundleIterator(instr_iterator MI) : MII(MI) { |
133 | assert((!MI.getNodePtr() || MI.isEnd() || !MI->isBundledWithPred()) &&((void)0) |
134 | "It's not legal to initialize MachineInstrBundleIterator with a "((void)0) |
135 | "bundled MI")((void)0); |
136 | } |
137 | |
138 | MachineInstrBundleIterator(reference MI) : MII(MI) { |
139 | assert(!MI.isBundledWithPred() && "It's not legal to initialize "((void)0) |
140 | "MachineInstrBundleIterator with a "((void)0) |
141 | "bundled MI")((void)0); |
142 | } |
143 | |
144 | MachineInstrBundleIterator(pointer MI) : MII(MI) { |
145 | // FIXME: This conversion should be explicit. |
146 | assert((!MI || !MI->isBundledWithPred()) && "It's not legal to initialize "((void)0) |
147 | "MachineInstrBundleIterator "((void)0) |
148 | "with a bundled MI")((void)0); |
149 | } |
150 | |
151 | // Template allows conversion from const to nonconst. |
152 | template <class OtherTy> |
153 | MachineInstrBundleIterator( |
154 | const MachineInstrBundleIterator<OtherTy, IsReverse> &I, |
155 | std::enable_if_t<std::is_convertible<OtherTy *, Ty *>::value, void *> = |
156 | nullptr) |
157 | : MII(I.getInstrIterator()) {} |
158 | |
159 | MachineInstrBundleIterator() : MII(nullptr) {} |
160 | |
161 | /// Explicit conversion between forward/reverse iterators. |
162 | /// |
163 | /// Translate between forward and reverse iterators without changing range |
164 | /// boundaries. The resulting iterator will dereference (and have a handle) |
165 | /// to the previous node, which is somewhat unexpected; but converting the |
166 | /// two endpoints in a range will give the same range in reverse. |
167 | /// |
168 | /// This matches std::reverse_iterator conversions. |
169 | explicit MachineInstrBundleIterator( |
170 | const MachineInstrBundleIterator<Ty, !IsReverse> &I) |
171 | : MachineInstrBundleIterator(++I.getReverse()) {} |
172 | |
173 | /// Get the bundle iterator for the given instruction's bundle. |
174 | static MachineInstrBundleIterator getAtBundleBegin(instr_iterator MI) { |
175 | return MachineInstrBundleIteratorHelper<IsReverse>::getBundleBegin(MI); |
176 | } |
177 | |
178 | reference operator*() const { return *MII; } |
179 | pointer operator->() const { return &operator*(); } |
180 | |
181 | /// Check for null. |
182 | bool isValid() const { return MII.getNodePtr(); } |
183 | |
184 | friend bool operator==(const MachineInstrBundleIterator &L, |
185 | const MachineInstrBundleIterator &R) { |
186 | return L.MII == R.MII; |
187 | } |
188 | friend bool operator==(const MachineInstrBundleIterator &L, |
189 | const const_instr_iterator &R) { |
190 | return L.MII == R; // Avoid assertion about validity of R. |
191 | } |
192 | friend bool operator==(const const_instr_iterator &L, |
193 | const MachineInstrBundleIterator &R) { |
194 | return L == R.MII; // Avoid assertion about validity of L. |
195 | } |
196 | friend bool operator==(const MachineInstrBundleIterator &L, |
197 | const nonconst_instr_iterator &R) { |
198 | return L.MII == R; // Avoid assertion about validity of R. |
199 | } |
200 | friend bool operator==(const nonconst_instr_iterator &L, |
201 | const MachineInstrBundleIterator &R) { |
202 | return L == R.MII; // Avoid assertion about validity of L. |
203 | } |
204 | friend bool operator==(const MachineInstrBundleIterator &L, const_pointer R) { |
205 | return L == const_instr_iterator(R); // Avoid assertion about validity of R. |
206 | } |
207 | friend bool operator==(const_pointer L, const MachineInstrBundleIterator &R) { |
208 | return const_instr_iterator(L) == R; // Avoid assertion about validity of L. |
209 | } |
210 | friend bool operator==(const MachineInstrBundleIterator &L, |
211 | const_reference R) { |
212 | return L == &R; // Avoid assertion about validity of R. |
213 | } |
214 | friend bool operator==(const_reference L, |
215 | const MachineInstrBundleIterator &R) { |
216 | return &L == R; // Avoid assertion about validity of L. |
217 | } |
218 | |
219 | friend bool operator!=(const MachineInstrBundleIterator &L, |
220 | const MachineInstrBundleIterator &R) { |
221 | return !(L == R); |
222 | } |
223 | friend bool operator!=(const MachineInstrBundleIterator &L, |
224 | const const_instr_iterator &R) { |
225 | return !(L == R); |
226 | } |
227 | friend bool operator!=(const const_instr_iterator &L, |
228 | const MachineInstrBundleIterator &R) { |
229 | return !(L == R); |
230 | } |
231 | friend bool operator!=(const MachineInstrBundleIterator &L, |
232 | const nonconst_instr_iterator &R) { |
233 | return !(L == R); |
234 | } |
235 | friend bool operator!=(const nonconst_instr_iterator &L, |
236 | const MachineInstrBundleIterator &R) { |
237 | return !(L == R); |
238 | } |
239 | friend bool operator!=(const MachineInstrBundleIterator &L, const_pointer R) { |
240 | return !(L == R); |
241 | } |
242 | friend bool operator!=(const_pointer L, const MachineInstrBundleIterator &R) { |
243 | return !(L == R); |
244 | } |
245 | friend bool operator!=(const MachineInstrBundleIterator &L, |
246 | const_reference R) { |
247 | return !(L == R); |
248 | } |
249 | friend bool operator!=(const_reference L, |
250 | const MachineInstrBundleIterator &R) { |
251 | return !(L == R); |
252 | } |
253 | |
254 | // Increment and decrement operators... |
255 | MachineInstrBundleIterator &operator--() { |
256 | this->decrement(MII); |
257 | return *this; |
258 | } |
259 | MachineInstrBundleIterator &operator++() { |
260 | this->increment(MII); |
261 | return *this; |
262 | } |
263 | MachineInstrBundleIterator operator--(int) { |
264 | MachineInstrBundleIterator Temp = *this; |
265 | --*this; |
266 | return Temp; |
267 | } |
268 | MachineInstrBundleIterator operator++(int) { |
269 | MachineInstrBundleIterator Temp = *this; |
270 | ++*this; |
271 | return Temp; |
272 | } |
273 | |
274 | instr_iterator getInstrIterator() const { return MII; } |
275 | |
276 | nonconst_iterator getNonConstIterator() const { return MII.getNonConst(); } |
277 | |
278 | /// Get a reverse iterator to the same node. |
279 | /// |
280 | /// Gives a reverse iterator that will dereference (and have a handle) to the |
281 | /// same node. Converting the endpoint iterators in a range will give a |
282 | /// different range; for range operations, use the explicit conversions. |
283 | reverse_iterator getReverse() const { return MII.getReverse(); } |
284 | }; |
285 | |
286 | } // end namespace llvm |
287 | |
288 | #endif // LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H |
1 | //===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #ifndef LLVM_ADT_ILIST_ITERATOR_H |
10 | #define LLVM_ADT_ILIST_ITERATOR_H |
11 | |
12 | #include "llvm/ADT/ilist_node.h" |
13 | #include <cassert> |
14 | #include <cstddef> |
15 | #include <iterator> |
16 | #include <type_traits> |
17 | |
18 | namespace llvm { |
19 | |
20 | namespace ilist_detail { |
21 | |
22 | /// Find const-correct node types. |
23 | template <class OptionsT, bool IsConst> struct IteratorTraits; |
24 | template <class OptionsT> struct IteratorTraits<OptionsT, false> { |
25 | using value_type = typename OptionsT::value_type; |
26 | using pointer = typename OptionsT::pointer; |
27 | using reference = typename OptionsT::reference; |
28 | using node_pointer = ilist_node_impl<OptionsT> *; |
29 | using node_reference = ilist_node_impl<OptionsT> &; |
30 | }; |
31 | template <class OptionsT> struct IteratorTraits<OptionsT, true> { |
32 | using value_type = const typename OptionsT::value_type; |
33 | using pointer = typename OptionsT::const_pointer; |
34 | using reference = typename OptionsT::const_reference; |
35 | using node_pointer = const ilist_node_impl<OptionsT> *; |
36 | using node_reference = const ilist_node_impl<OptionsT> &; |
37 | }; |
38 | |
39 | template <bool IsReverse> struct IteratorHelper; |
40 | template <> struct IteratorHelper<false> : ilist_detail::NodeAccess { |
41 | using Access = ilist_detail::NodeAccess; |
42 | |
43 | template <class T> static void increment(T *&I) { I = Access::getNext(*I); } |
44 | template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); } |
45 | }; |
46 | template <> struct IteratorHelper<true> : ilist_detail::NodeAccess { |
47 | using Access = ilist_detail::NodeAccess; |
48 | |
49 | template <class T> static void increment(T *&I) { I = Access::getPrev(*I); } |
50 | template <class T> static void decrement(T *&I) { I = Access::getNext(*I); } |
51 | }; |
52 | |
53 | } // end namespace ilist_detail |
54 | |
55 | /// Iterator for intrusive lists based on ilist_node. |
56 | template <class OptionsT, bool IsReverse, bool IsConst> |
57 | class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> { |
58 | friend ilist_iterator<OptionsT, IsReverse, !IsConst>; |
59 | friend ilist_iterator<OptionsT, !IsReverse, IsConst>; |
60 | friend ilist_iterator<OptionsT, !IsReverse, !IsConst>; |
61 | |
62 | using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>; |
63 | using Access = ilist_detail::SpecificNodeAccess<OptionsT>; |
64 | |
65 | public: |
66 | using value_type = typename Traits::value_type; |
67 | using pointer = typename Traits::pointer; |
68 | using reference = typename Traits::reference; |
69 | using difference_type = ptrdiff_t; |
70 | using iterator_category = std::bidirectional_iterator_tag; |
71 | using const_pointer = typename OptionsT::const_pointer; |
72 | using const_reference = typename OptionsT::const_reference; |
73 | |
74 | private: |
75 | using node_pointer = typename Traits::node_pointer; |
76 | using node_reference = typename Traits::node_reference; |
77 | |
78 | node_pointer NodePtr = nullptr; |
79 | |
80 | public: |
81 | /// Create from an ilist_node. |
82 | explicit ilist_iterator(node_reference N) : NodePtr(&N) {} |
83 | |
84 | explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {} |
85 | explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {} |
86 | ilist_iterator() = default; |
87 | |
88 | // This is templated so that we can allow constructing a const iterator from |
89 | // a nonconst iterator... |
90 | template <bool RHSIsConst> |
91 | ilist_iterator(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS, |
92 | std::enable_if_t<IsConst || !RHSIsConst, void *> = nullptr) |
93 | : NodePtr(RHS.NodePtr) {} |
94 | |
95 | // This is templated so that we can allow assigning to a const iterator from |
96 | // a nonconst iterator... |
97 | template <bool RHSIsConst> |
98 | std::enable_if_t<IsConst || !RHSIsConst, ilist_iterator &> |
99 | operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) { |
100 | NodePtr = RHS.NodePtr; |
101 | return *this; |
102 | } |
103 | |
104 | /// Explicit conversion between forward/reverse iterators. |
105 | /// |
106 | /// Translate between forward and reverse iterators without changing range |
107 | /// boundaries. The resulting iterator will dereference (and have a handle) |
108 | /// to the previous node, which is somewhat unexpected; but converting the |
109 | /// two endpoints in a range will give the same range in reverse. |
110 | /// |
111 | /// This matches std::reverse_iterator conversions. |
112 | explicit ilist_iterator( |
113 | const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS) |
114 | : ilist_iterator(++RHS.getReverse()) {} |
115 | |
116 | /// Get a reverse iterator to the same node. |
117 | /// |
118 | /// Gives a reverse iterator that will dereference (and have a handle) to the |
119 | /// same node. Converting the endpoint iterators in a range will give a |
120 | /// different range; for range operations, use the explicit conversions. |
121 | ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const { |
122 | if (NodePtr) |
123 | return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr); |
124 | return ilist_iterator<OptionsT, !IsReverse, IsConst>(); |
125 | } |
126 | |
127 | /// Const-cast. |
128 | ilist_iterator<OptionsT, IsReverse, false> getNonConst() const { |
129 | if (NodePtr) |
130 | return ilist_iterator<OptionsT, IsReverse, false>( |
131 | const_cast<typename ilist_iterator<OptionsT, IsReverse, |
132 | false>::node_reference>(*NodePtr)); |
133 | return ilist_iterator<OptionsT, IsReverse, false>(); |
134 | } |
135 | |
136 | // Accessors... |
137 | reference operator*() const { |
138 | assert(!NodePtr->isKnownSentinel())((void)0); |
139 | return *Access::getValuePtr(NodePtr); |
140 | } |
141 | pointer operator->() const { return &operator*(); } |
142 | |
143 | // Comparison operators |
144 | friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) { |
145 | return LHS.NodePtr == RHS.NodePtr; |
146 | } |
147 | friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) { |
148 | return LHS.NodePtr != RHS.NodePtr; |
149 | } |
150 | |
151 | // Increment and decrement operators... |
152 | ilist_iterator &operator--() { |
153 | NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev(); |
154 | return *this; |
155 | } |
156 | ilist_iterator &operator++() { |
157 | NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext(); |
158 | return *this; |
159 | } |
160 | ilist_iterator operator--(int) { |
161 | ilist_iterator tmp = *this; |
162 | --*this; |
163 | return tmp; |
164 | } |
165 | ilist_iterator operator++(int) { |
166 | ilist_iterator tmp = *this; |
167 | ++*this; |
168 | return tmp; |
169 | } |
170 | |
171 | /// Get the underlying ilist_node. |
172 | node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); } |
173 | |
174 | /// Check for end. Only valid if ilist_sentinel_tracking<true>. |
175 | bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; } |
176 | }; |
177 | |
178 | template <typename From> struct simplify_type; |
179 | |
180 | /// Allow ilist_iterators to convert into pointers to a node automatically when |
181 | /// used by the dyn_cast, cast, isa mechanisms... |
182 | /// |
183 | /// FIXME: remove this, since there is no implicit conversion to NodeTy. |
184 | template <class OptionsT, bool IsConst> |
185 | struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> { |
186 | using iterator = ilist_iterator<OptionsT, false, IsConst>; |
187 | using SimpleType = typename iterator::pointer; |
188 | |
189 | static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; } |
190 | }; |
191 | template <class OptionsT, bool IsConst> |
192 | struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>> |
193 | : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {}; |
194 | |
195 | } // end namespace llvm |
196 | |
197 | #endif // LLVM_ADT_ILIST_ITERATOR_H |
1 | //===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the SmallVector class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_ADT_SMALLVECTOR_H |
14 | #define LLVM_ADT_SMALLVECTOR_H |
15 | |
16 | #include "llvm/ADT/iterator_range.h" |
17 | #include "llvm/Support/Compiler.h" |
18 | #include "llvm/Support/ErrorHandling.h" |
19 | #include "llvm/Support/MemAlloc.h" |
20 | #include "llvm/Support/type_traits.h" |
21 | #include <algorithm> |
22 | #include <cassert> |
23 | #include <cstddef> |
24 | #include <cstdlib> |
25 | #include <cstring> |
26 | #include <functional> |
27 | #include <initializer_list> |
28 | #include <iterator> |
29 | #include <limits> |
30 | #include <memory> |
31 | #include <new> |
32 | #include <type_traits> |
33 | #include <utility> |
34 | |
35 | namespace llvm { |
36 | |
37 | /// This is all the stuff common to all SmallVectors. |
38 | /// |
39 | /// The template parameter specifies the type which should be used to hold the |
40 | /// Size and Capacity of the SmallVector, so it can be adjusted. |
41 | /// Using 32 bit size is desirable to shrink the size of the SmallVector. |
42 | /// Using 64 bit size is desirable for cases like SmallVector<char>, where a |
43 | /// 32 bit size would limit the vector to ~4GB. SmallVectors are used for |
44 | /// buffering bitcode output - which can exceed 4GB. |
45 | template <class Size_T> class SmallVectorBase { |
46 | protected: |
47 | void *BeginX; |
48 | Size_T Size = 0, Capacity; |
49 | |
50 | /// The maximum value of the Size_T used. |
51 | static constexpr size_t SizeTypeMax() { |
52 | return std::numeric_limits<Size_T>::max(); |
53 | } |
54 | |
55 | SmallVectorBase() = delete; |
56 | SmallVectorBase(void *FirstEl, size_t TotalCapacity) |
57 | : BeginX(FirstEl), Capacity(TotalCapacity) {} |
58 | |
59 | /// This is a helper for \a grow() that's out of line to reduce code |
60 | /// duplication. This function will report a fatal error if it can't grow at |
61 | /// least to \p MinSize. |
62 | void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity); |
63 | |
64 | /// This is an implementation of the grow() method which only works |
65 | /// on POD-like data types and is out of line to reduce code duplication. |
66 | /// This function will report a fatal error if it cannot increase capacity. |
67 | void grow_pod(void *FirstEl, size_t MinSize, size_t TSize); |
68 | |
69 | public: |
70 | size_t size() const { return Size; } |
71 | size_t capacity() const { return Capacity; } |
72 | |
73 | LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; } |
74 | |
75 | /// Set the array size to \p N, which the current array must have enough |
76 | /// capacity for. |
77 | /// |
78 | /// This does not construct or destroy any elements in the vector. |
79 | /// |
80 | /// Clients can use this in conjunction with capacity() to write past the end |
81 | /// of the buffer when they know that more elements are available, and only |
82 | /// update the size later. This avoids the cost of value initializing elements |
83 | /// which will only be overwritten. |
84 | void set_size(size_t N) { |
85 | assert(N <= capacity())((void)0); |
86 | Size = N; |
87 | } |
88 | }; |
89 | |
90 | template <class T> |
91 | using SmallVectorSizeType = |
92 | typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t, |
93 | uint32_t>::type; |
94 | |
95 | /// Figure out the offset of the first element. |
96 | template <class T, typename = void> struct SmallVectorAlignmentAndSize { |
97 | alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof( |
98 | SmallVectorBase<SmallVectorSizeType<T>>)]; |
99 | alignas(T) char FirstEl[sizeof(T)]; |
100 | }; |
101 | |
102 | /// This is the part of SmallVectorTemplateBase which does not depend on whether |
103 | /// the type T is a POD. The extra dummy template argument is used by ArrayRef |
104 | /// to avoid unnecessarily requiring T to be complete. |
105 | template <typename T, typename = void> |
106 | class SmallVectorTemplateCommon |
107 | : public SmallVectorBase<SmallVectorSizeType<T>> { |
108 | using Base = SmallVectorBase<SmallVectorSizeType<T>>; |
109 | |
110 | /// Find the address of the first element. For this pointer math to be valid |
111 | /// with small-size of 0 for T with lots of alignment, it's important that |
112 | /// SmallVectorStorage is properly-aligned even for small-size of 0. |
113 | void *getFirstEl() const { |
114 | return const_cast<void *>(reinterpret_cast<const void *>( |
115 | reinterpret_cast<const char *>(this) + |
116 | offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl ))); |
117 | } |
118 | // Space after 'FirstEl' is clobbered, do not add any instance vars after it. |
119 | |
120 | protected: |
121 | SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {} |
122 | |
123 | void grow_pod(size_t MinSize, size_t TSize) { |
124 | Base::grow_pod(getFirstEl(), MinSize, TSize); |
125 | } |
126 | |
127 | /// Return true if this is a smallvector which has not had dynamic |
128 | /// memory allocated for it. |
129 | bool isSmall() const { return this->BeginX == getFirstEl(); } |
130 | |
131 | /// Put this vector in a state of being small. |
132 | void resetToSmall() { |
133 | this->BeginX = getFirstEl(); |
134 | this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect. |
135 | } |
136 | |
137 | /// Return true if V is an internal reference to the given range. |
138 | bool isReferenceToRange(const void *V, const void *First, const void *Last) const { |
139 | // Use std::less to avoid UB. |
140 | std::less<> LessThan; |
141 | return !LessThan(V, First) && LessThan(V, Last); |
142 | } |
143 | |
144 | /// Return true if V is an internal reference to this vector. |
145 | bool isReferenceToStorage(const void *V) const { |
146 | return isReferenceToRange(V, this->begin(), this->end()); |
147 | } |
148 | |
149 | /// Return true if First and Last form a valid (possibly empty) range in this |
150 | /// vector's storage. |
151 | bool isRangeInStorage(const void *First, const void *Last) const { |
152 | // Use std::less to avoid UB. |
153 | std::less<> LessThan; |
154 | return !LessThan(First, this->begin()) && !LessThan(Last, First) && |
155 | !LessThan(this->end(), Last); |
156 | } |
157 | |
158 | /// Return true unless Elt will be invalidated by resizing the vector to |
159 | /// NewSize. |
160 | bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { |
161 | // Past the end. |
162 | if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true)) |
163 | return true; |
164 | |
165 | // Return false if Elt will be destroyed by shrinking. |
166 | if (NewSize <= this->size()) |
167 | return Elt < this->begin() + NewSize; |
168 | |
169 | // Return false if we need to grow. |
170 | return NewSize <= this->capacity(); |
171 | } |
172 | |
173 | /// Check whether Elt will be invalidated by resizing the vector to NewSize. |
174 | void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { |
175 | assert(isSafeToReferenceAfterResize(Elt, NewSize) &&((void)0) |
176 | "Attempting to reference an element of the vector in an operation "((void)0) |
177 | "that invalidates it")((void)0); |
178 | } |
179 | |
180 | /// Check whether Elt will be invalidated by increasing the size of the |
181 | /// vector by N. |
182 | void assertSafeToAdd(const void *Elt, size_t N = 1) { |
183 | this->assertSafeToReferenceAfterResize(Elt, this->size() + N); |
184 | } |
185 | |
186 | /// Check whether any part of the range will be invalidated by clearing. |
187 | void assertSafeToReferenceAfterClear(const T *From, const T *To) { |
188 | if (From == To) |
189 | return; |
190 | this->assertSafeToReferenceAfterResize(From, 0); |
191 | this->assertSafeToReferenceAfterResize(To - 1, 0); |
192 | } |
193 | template < |
194 | class ItTy, |
195 | std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value, |
196 | bool> = false> |
197 | void assertSafeToReferenceAfterClear(ItTy, ItTy) {} |
198 | |
199 | /// Check whether any part of the range will be invalidated by growing. |
200 | void assertSafeToAddRange(const T *From, const T *To) { |
201 | if (From == To) |
202 | return; |
203 | this->assertSafeToAdd(From, To - From); |
204 | this->assertSafeToAdd(To - 1, To - From); |
205 | } |
206 | template < |
207 | class ItTy, |
208 | std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value, |
209 | bool> = false> |
210 | void assertSafeToAddRange(ItTy, ItTy) {} |
211 | |
212 | /// Reserve enough space to add one element, and return the updated element |
213 | /// pointer in case it was a reference to the storage. |
214 | template <class U> |
215 | static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt, |
216 | size_t N) { |
217 | size_t NewSize = This->size() + N; |
218 | if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true )) |
219 | return &Elt; |
220 | |
221 | bool ReferencesStorage = false; |
222 | int64_t Index = -1; |
223 | if (!U::TakesParamByValue) { |
224 | if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt )), false)) { |
225 | ReferencesStorage = true; |
226 | Index = &Elt - This->begin(); |
227 | } |
228 | } |
229 | This->grow(NewSize); |
230 | return ReferencesStorage ? This->begin() + Index : &Elt; |
231 | } |
232 | |
233 | public: |
234 | using size_type = size_t; |
235 | using difference_type = ptrdiff_t; |
236 | using value_type = T; |
237 | using iterator = T *; |
238 | using const_iterator = const T *; |
239 | |
240 | using const_reverse_iterator = std::reverse_iterator<const_iterator>; |
241 | using reverse_iterator = std::reverse_iterator<iterator>; |
242 | |
243 | using reference = T &; |
244 | using const_reference = const T &; |
245 | using pointer = T *; |
246 | using const_pointer = const T *; |
247 | |
248 | using Base::capacity; |
249 | using Base::empty; |
250 | using Base::size; |
251 | |
252 | // forward iterator creation methods. |
253 | iterator begin() { return (iterator)this->BeginX; } |
254 | const_iterator begin() const { return (const_iterator)this->BeginX; } |
255 | iterator end() { return begin() + size(); } |
256 | const_iterator end() const { return begin() + size(); } |
257 | |
258 | // reverse iterator creation methods. |
259 | reverse_iterator rbegin() { return reverse_iterator(end()); } |
260 | const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); } |
261 | reverse_iterator rend() { return reverse_iterator(begin()); } |
262 | const_reverse_iterator rend() const { return const_reverse_iterator(begin());} |
263 | |
264 | size_type size_in_bytes() const { return size() * sizeof(T); } |
265 | size_type max_size() const { |
266 | return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T)); |
267 | } |
268 | |
269 | size_t capacity_in_bytes() const { return capacity() * sizeof(T); } |
270 | |
271 | /// Return a pointer to the vector's buffer, even if empty(). |
272 | pointer data() { return pointer(begin()); } |
273 | /// Return a pointer to the vector's buffer, even if empty(). |
274 | const_pointer data() const { return const_pointer(begin()); } |
275 | |
276 | reference operator[](size_type idx) { |
277 | assert(idx < size())((void)0); |
278 | return begin()[idx]; |
279 | } |
280 | const_reference operator[](size_type idx) const { |
281 | assert(idx < size())((void)0); |
282 | return begin()[idx]; |
283 | } |
284 | |
285 | reference front() { |
286 | assert(!empty())((void)0); |
287 | return begin()[0]; |
288 | } |
289 | const_reference front() const { |
290 | assert(!empty())((void)0); |
291 | return begin()[0]; |
292 | } |
293 | |
294 | reference back() { |
295 | assert(!empty())((void)0); |
296 | return end()[-1]; |
297 | } |
298 | const_reference back() const { |
299 | assert(!empty())((void)0); |
300 | return end()[-1]; |
301 | } |
302 | }; |
303 | |
304 | /// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put |
305 | /// method implementations that are designed to work with non-trivial T's. |
306 | /// |
307 | /// We approximate is_trivially_copyable with trivial move/copy construction and |
308 | /// trivial destruction. While the standard doesn't specify that you're allowed |
309 | /// copy these types with memcpy, there is no way for the type to observe this. |
310 | /// This catches the important case of std::pair<POD, POD>, which is not |
311 | /// trivially assignable. |
312 | template <typename T, bool = (is_trivially_copy_constructible<T>::value) && |
313 | (is_trivially_move_constructible<T>::value) && |
314 | std::is_trivially_destructible<T>::value> |
315 | class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> { |
316 | friend class SmallVectorTemplateCommon<T>; |
317 | |
318 | protected: |
319 | static constexpr bool TakesParamByValue = false; |
320 | using ValueParamT = const T &; |
321 | |
322 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
323 | |
324 | static void destroy_range(T *S, T *E) { |
325 | while (S != E) { |
326 | --E; |
327 | E->~T(); |
328 | } |
329 | } |
330 | |
331 | /// Move the range [I, E) into the uninitialized memory starting with "Dest", |
332 | /// constructing elements as needed. |
333 | template<typename It1, typename It2> |
334 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
335 | std::uninitialized_copy(std::make_move_iterator(I), |
336 | std::make_move_iterator(E), Dest); |
337 | } |
338 | |
339 | /// Copy the range [I, E) onto the uninitialized memory starting with "Dest", |
340 | /// constructing elements as needed. |
341 | template<typename It1, typename It2> |
342 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
343 | std::uninitialized_copy(I, E, Dest); |
344 | } |
345 | |
346 | /// Grow the allocated memory (without initializing new elements), doubling |
347 | /// the size of the allocated memory. Guarantees space for at least one more |
348 | /// element, or MinSize more elements if specified. |
349 | void grow(size_t MinSize = 0); |
350 | |
351 | /// Create a new allocation big enough for \p MinSize and pass back its size |
352 | /// in \p NewCapacity. This is the first section of \a grow(). |
353 | T *mallocForGrow(size_t MinSize, size_t &NewCapacity) { |
354 | return static_cast<T *>( |
355 | SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow( |
356 | MinSize, sizeof(T), NewCapacity)); |
357 | } |
358 | |
359 | /// Move existing elements over to the new allocation \p NewElts, the middle |
360 | /// section of \a grow(). |
361 | void moveElementsForGrow(T *NewElts); |
362 | |
363 | /// Transfer ownership of the allocation, finishing up \a grow(). |
364 | void takeAllocationForGrow(T *NewElts, size_t NewCapacity); |
365 | |
366 | /// Reserve enough space to add one element, and return the updated element |
367 | /// pointer in case it was a reference to the storage. |
368 | const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) { |
369 | return this->reserveForParamAndGetAddressImpl(this, Elt, N); |
370 | } |
371 | |
372 | /// Reserve enough space to add one element, and return the updated element |
373 | /// pointer in case it was a reference to the storage. |
374 | T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) { |
375 | return const_cast<T *>( |
376 | this->reserveForParamAndGetAddressImpl(this, Elt, N)); |
377 | } |
378 | |
379 | static T &&forward_value_param(T &&V) { return std::move(V); } |
380 | static const T &forward_value_param(const T &V) { return V; } |
381 | |
382 | void growAndAssign(size_t NumElts, const T &Elt) { |
383 | // Grow manually in case Elt is an internal reference. |
384 | size_t NewCapacity; |
385 | T *NewElts = mallocForGrow(NumElts, NewCapacity); |
386 | std::uninitialized_fill_n(NewElts, NumElts, Elt); |
387 | this->destroy_range(this->begin(), this->end()); |
388 | takeAllocationForGrow(NewElts, NewCapacity); |
389 | this->set_size(NumElts); |
390 | } |
391 | |
392 | template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) { |
393 | // Grow manually in case one of Args is an internal reference. |
394 | size_t NewCapacity; |
395 | T *NewElts = mallocForGrow(0, NewCapacity); |
396 | ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...); |
397 | moveElementsForGrow(NewElts); |
398 | takeAllocationForGrow(NewElts, NewCapacity); |
399 | this->set_size(this->size() + 1); |
400 | return this->back(); |
401 | } |
402 | |
403 | public: |
404 | void push_back(const T &Elt) { |
405 | const T *EltPtr = reserveForParamAndGetAddress(Elt); |
406 | ::new ((void *)this->end()) T(*EltPtr); |
407 | this->set_size(this->size() + 1); |
408 | } |
409 | |
410 | void push_back(T &&Elt) { |
411 | T *EltPtr = reserveForParamAndGetAddress(Elt); |
412 | ::new ((void *)this->end()) T(::std::move(*EltPtr)); |
413 | this->set_size(this->size() + 1); |
414 | } |
415 | |
416 | void pop_back() { |
417 | this->set_size(this->size() - 1); |
418 | this->end()->~T(); |
419 | } |
420 | }; |
421 | |
422 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
423 | template <typename T, bool TriviallyCopyable> |
424 | void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) { |
425 | size_t NewCapacity; |
426 | T *NewElts = mallocForGrow(MinSize, NewCapacity); |
427 | moveElementsForGrow(NewElts); |
428 | takeAllocationForGrow(NewElts, NewCapacity); |
429 | } |
430 | |
431 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
432 | template <typename T, bool TriviallyCopyable> |
433 | void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow( |
434 | T *NewElts) { |
435 | // Move the elements over. |
436 | this->uninitialized_move(this->begin(), this->end(), NewElts); |
437 | |
438 | // Destroy the original elements. |
439 | destroy_range(this->begin(), this->end()); |
440 | } |
441 | |
442 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
443 | template <typename T, bool TriviallyCopyable> |
444 | void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow( |
445 | T *NewElts, size_t NewCapacity) { |
446 | // If this wasn't grown from the inline copy, deallocate the old space. |
447 | if (!this->isSmall()) |
448 | free(this->begin()); |
449 | |
450 | this->BeginX = NewElts; |
451 | this->Capacity = NewCapacity; |
452 | } |
453 | |
454 | /// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put |
455 | /// method implementations that are designed to work with trivially copyable |
456 | /// T's. This allows using memcpy in place of copy/move construction and |
457 | /// skipping destruction. |
458 | template <typename T> |
459 | class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> { |
460 | friend class SmallVectorTemplateCommon<T>; |
461 | |
462 | protected: |
463 | /// True if it's cheap enough to take parameters by value. Doing so avoids |
464 | /// overhead related to mitigations for reference invalidation. |
465 | static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *); |
466 | |
467 | /// Either const T& or T, depending on whether it's cheap enough to take |
468 | /// parameters by value. |
469 | using ValueParamT = |
470 | typename std::conditional<TakesParamByValue, T, const T &>::type; |
471 | |
472 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
473 | |
474 | // No need to do a destroy loop for POD's. |
475 | static void destroy_range(T *, T *) {} |
476 | |
477 | /// Move the range [I, E) onto the uninitialized memory |
478 | /// starting with "Dest", constructing elements into it as needed. |
479 | template<typename It1, typename It2> |
480 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
481 | // Just do a copy. |
482 | uninitialized_copy(I, E, Dest); |
483 | } |
484 | |
485 | /// Copy the range [I, E) onto the uninitialized memory |
486 | /// starting with "Dest", constructing elements into it as needed. |
487 | template<typename It1, typename It2> |
488 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
489 | // Arbitrary iterator types; just use the basic implementation. |
490 | std::uninitialized_copy(I, E, Dest); |
491 | } |
492 | |
493 | /// Copy the range [I, E) onto the uninitialized memory |
494 | /// starting with "Dest", constructing elements into it as needed. |
495 | template <typename T1, typename T2> |
496 | static void uninitialized_copy( |
497 | T1 *I, T1 *E, T2 *Dest, |
498 | std::enable_if_t<std::is_same<typename std::remove_const<T1>::type, |
499 | T2>::value> * = nullptr) { |
500 | // Use memcpy for PODs iterated by pointers (which includes SmallVector |
501 | // iterators): std::uninitialized_copy optimizes to memmove, but we can |
502 | // use memcpy here. Note that I and E are iterators and thus might be |
503 | // invalid for memcpy if they are equal. |
504 | if (I != E) |
505 | memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T)); |
506 | } |
507 | |
508 | /// Double the size of the allocated memory, guaranteeing space for at |
509 | /// least one more element or MinSize if specified. |
510 | void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); } |
511 | |
512 | /// Reserve enough space to add one element, and return the updated element |
513 | /// pointer in case it was a reference to the storage. |
514 | const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) { |
515 | return this->reserveForParamAndGetAddressImpl(this, Elt, N); |
516 | } |
517 | |
518 | /// Reserve enough space to add one element, and return the updated element |
519 | /// pointer in case it was a reference to the storage. |
520 | T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) { |
521 | return const_cast<T *>( |
522 | this->reserveForParamAndGetAddressImpl(this, Elt, N)); |
523 | } |
524 | |
525 | /// Copy \p V or return a reference, depending on \a ValueParamT. |
526 | static ValueParamT forward_value_param(ValueParamT V) { return V; } |
527 | |
528 | void growAndAssign(size_t NumElts, T Elt) { |
529 | // Elt has been copied in case it's an internal reference, side-stepping |
530 | // reference invalidation problems without losing the realloc optimization. |
531 | this->set_size(0); |
532 | this->grow(NumElts); |
533 | std::uninitialized_fill_n(this->begin(), NumElts, Elt); |
534 | this->set_size(NumElts); |
535 | } |
536 | |
537 | template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) { |
538 | // Use push_back with a copy in case Args has an internal reference, |
539 | // side-stepping reference invalidation problems without losing the realloc |
540 | // optimization. |
541 | push_back(T(std::forward<ArgTypes>(Args)...)); |
542 | return this->back(); |
543 | } |
544 | |
545 | public: |
546 | void push_back(ValueParamT Elt) { |
547 | const T *EltPtr = reserveForParamAndGetAddress(Elt); |
548 | memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T)); |
549 | this->set_size(this->size() + 1); |
550 | } |
551 | |
552 | void pop_back() { this->set_size(this->size() - 1); } |
553 | }; |
554 | |
555 | /// This class consists of common code factored out of the SmallVector class to |
556 | /// reduce code duplication based on the SmallVector 'N' template parameter. |
557 | template <typename T> |
558 | class SmallVectorImpl : public SmallVectorTemplateBase<T> { |
559 | using SuperClass = SmallVectorTemplateBase<T>; |
560 | |
561 | public: |
562 | using iterator = typename SuperClass::iterator; |
563 | using const_iterator = typename SuperClass::const_iterator; |
564 | using reference = typename SuperClass::reference; |
565 | using size_type = typename SuperClass::size_type; |
566 | |
567 | protected: |
568 | using SmallVectorTemplateBase<T>::TakesParamByValue; |
569 | using ValueParamT = typename SuperClass::ValueParamT; |
570 | |
571 | // Default ctor - Initialize to empty. |
572 | explicit SmallVectorImpl(unsigned N) |
573 | : SmallVectorTemplateBase<T>(N) {} |
574 | |
575 | public: |
576 | SmallVectorImpl(const SmallVectorImpl &) = delete; |
577 | |
578 | ~SmallVectorImpl() { |
579 | // Subclass has already destructed this vector's elements. |
580 | // If this wasn't grown from the inline copy, deallocate the old space. |
581 | if (!this->isSmall()) |
582 | free(this->begin()); |
583 | } |
584 | |
585 | void clear() { |
586 | this->destroy_range(this->begin(), this->end()); |
587 | this->Size = 0; |
588 | } |
589 | |
590 | private: |
591 | template <bool ForOverwrite> void resizeImpl(size_type N) { |
592 | if (N < this->size()) { |
593 | this->pop_back_n(this->size() - N); |
594 | } else if (N > this->size()) { |
595 | this->reserve(N); |
596 | for (auto I = this->end(), E = this->begin() + N; I != E; ++I) |
597 | if (ForOverwrite) |
598 | new (&*I) T; |
599 | else |
600 | new (&*I) T(); |
601 | this->set_size(N); |
602 | } |
603 | } |
604 | |
605 | public: |
606 | void resize(size_type N) { resizeImpl<false>(N); } |
607 | |
608 | /// Like resize, but \ref T is POD, the new values won't be initialized. |
609 | void resize_for_overwrite(size_type N) { resizeImpl<true>(N); } |
610 | |
611 | void resize(size_type N, ValueParamT NV) { |
612 | if (N == this->size()) |
613 | return; |
614 | |
615 | if (N < this->size()) { |
616 | this->pop_back_n(this->size() - N); |
617 | return; |
618 | } |
619 | |
620 | // N > this->size(). Defer to append. |
621 | this->append(N - this->size(), NV); |
622 | } |
623 | |
624 | void reserve(size_type N) { |
625 | if (this->capacity() < N) |
626 | this->grow(N); |
627 | } |
628 | |
629 | void pop_back_n(size_type NumItems) { |
630 | assert(this->size() >= NumItems)((void)0); |
631 | this->destroy_range(this->end() - NumItems, this->end()); |
632 | this->set_size(this->size() - NumItems); |
633 | } |
634 | |
635 | LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() { |
636 | T Result = ::std::move(this->back()); |
637 | this->pop_back(); |
638 | return Result; |
639 | } |
640 | |
641 | void swap(SmallVectorImpl &RHS); |
642 | |
643 | /// Add the specified range to the end of the SmallVector. |
644 | template <typename in_iter, |
645 | typename = std::enable_if_t<std::is_convertible< |
646 | typename std::iterator_traits<in_iter>::iterator_category, |
647 | std::input_iterator_tag>::value>> |
648 | void append(in_iter in_start, in_iter in_end) { |
649 | this->assertSafeToAddRange(in_start, in_end); |
650 | size_type NumInputs = std::distance(in_start, in_end); |
651 | this->reserve(this->size() + NumInputs); |
652 | this->uninitialized_copy(in_start, in_end, this->end()); |
653 | this->set_size(this->size() + NumInputs); |
654 | } |
655 | |
656 | /// Append \p NumInputs copies of \p Elt to the end. |
657 | void append(size_type NumInputs, ValueParamT Elt) { |
658 | const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs); |
659 | std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr); |
660 | this->set_size(this->size() + NumInputs); |
661 | } |
662 | |
663 | void append(std::initializer_list<T> IL) { |
664 | append(IL.begin(), IL.end()); |
665 | } |
666 | |
667 | void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); } |
668 | |
669 | void assign(size_type NumElts, ValueParamT Elt) { |
670 | // Note that Elt could be an internal reference. |
671 | if (NumElts > this->capacity()) { |
672 | this->growAndAssign(NumElts, Elt); |
673 | return; |
674 | } |
675 | |
676 | // Assign over existing elements. |
677 | std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt); |
678 | if (NumElts > this->size()) |
679 | std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt); |
680 | else if (NumElts < this->size()) |
681 | this->destroy_range(this->begin() + NumElts, this->end()); |
682 | this->set_size(NumElts); |
683 | } |
684 | |
685 | // FIXME: Consider assigning over existing elements, rather than clearing & |
686 | // re-initializing them - for all assign(...) variants. |
687 | |
688 | template <typename in_iter, |
689 | typename = std::enable_if_t<std::is_convertible< |
690 | typename std::iterator_traits<in_iter>::iterator_category, |
691 | std::input_iterator_tag>::value>> |
692 | void assign(in_iter in_start, in_iter in_end) { |
693 | this->assertSafeToReferenceAfterClear(in_start, in_end); |
694 | clear(); |
695 | append(in_start, in_end); |
696 | } |
697 | |
698 | void assign(std::initializer_list<T> IL) { |
699 | clear(); |
700 | append(IL); |
701 | } |
702 | |
703 | void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); } |
704 | |
705 | iterator erase(const_iterator CI) { |
706 | // Just cast away constness because this is a non-const member function. |
707 | iterator I = const_cast<iterator>(CI); |
708 | |
709 | assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")((void)0); |
710 | |
711 | iterator N = I; |
712 | // Shift all elts down one. |
713 | std::move(I+1, this->end(), I); |
714 | // Drop the last elt. |
715 | this->pop_back(); |
716 | return(N); |
717 | } |
718 | |
719 | iterator erase(const_iterator CS, const_iterator CE) { |
720 | // Just cast away constness because this is a non-const member function. |
721 | iterator S = const_cast<iterator>(CS); |
722 | iterator E = const_cast<iterator>(CE); |
723 | |
724 | assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")((void)0); |
725 | |
726 | iterator N = S; |
727 | // Shift all elts down. |
728 | iterator I = std::move(E, this->end(), S); |
729 | // Drop the last elts. |
730 | this->destroy_range(I, this->end()); |
731 | this->set_size(I - this->begin()); |
732 | return(N); |
733 | } |
734 | |
735 | private: |
736 | template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) { |
737 | // Callers ensure that ArgType is derived from T. |
738 | static_assert( |
739 | std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>, |
740 | T>::value, |
741 | "ArgType must be derived from T!"); |
742 | |
743 | if (I == this->end()) { // Important special case for empty vector. |
744 | this->push_back(::std::forward<ArgType>(Elt)); |
745 | return this->end()-1; |
746 | } |
747 | |
748 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")((void)0); |
749 | |
750 | // Grow if necessary. |
751 | size_t Index = I - this->begin(); |
752 | std::remove_reference_t<ArgType> *EltPtr = |
753 | this->reserveForParamAndGetAddress(Elt); |
754 | I = this->begin() + Index; |
755 | |
756 | ::new ((void*) this->end()) T(::std::move(this->back())); |
757 | // Push everything else over. |
758 | std::move_backward(I, this->end()-1, this->end()); |
759 | this->set_size(this->size() + 1); |
760 | |
761 | // If we just moved the element we're inserting, be sure to update |
762 | // the reference (never happens if TakesParamByValue). |
763 | static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value, |
764 | "ArgType must be 'T' when taking by value!"); |
765 | if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end())) |
766 | ++EltPtr; |
767 | |
768 | *I = ::std::forward<ArgType>(*EltPtr); |
769 | return I; |
770 | } |
771 | |
772 | public: |
773 | iterator insert(iterator I, T &&Elt) { |
774 | return insert_one_impl(I, this->forward_value_param(std::move(Elt))); |
775 | } |
776 | |
777 | iterator insert(iterator I, const T &Elt) { |
778 | return insert_one_impl(I, this->forward_value_param(Elt)); |
779 | } |
780 | |
781 | iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) { |
782 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
783 | size_t InsertElt = I - this->begin(); |
784 | |
785 | if (I == this->end()) { // Important special case for empty vector. |
786 | append(NumToInsert, Elt); |
787 | return this->begin()+InsertElt; |
788 | } |
789 | |
790 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")((void)0); |
791 | |
792 | // Ensure there is enough space, and get the (maybe updated) address of |
793 | // Elt. |
794 | const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert); |
795 | |
796 | // Uninvalidate the iterator. |
797 | I = this->begin()+InsertElt; |
798 | |
799 | // If there are more elements between the insertion point and the end of the |
800 | // range than there are being inserted, we can use a simple approach to |
801 | // insertion. Since we already reserved space, we know that this won't |
802 | // reallocate the vector. |
803 | if (size_t(this->end()-I) >= NumToInsert) { |
804 | T *OldEnd = this->end(); |
805 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
806 | std::move_iterator<iterator>(this->end())); |
807 | |
808 | // Copy the existing elements that get replaced. |
809 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
810 | |
811 | // If we just moved the element we're inserting, be sure to update |
812 | // the reference (never happens if TakesParamByValue). |
813 | if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) |
814 | EltPtr += NumToInsert; |
815 | |
816 | std::fill_n(I, NumToInsert, *EltPtr); |
817 | return I; |
818 | } |
819 | |
820 | // Otherwise, we're inserting more elements than exist already, and we're |
821 | // not inserting at the end. |
822 | |
823 | // Move over the elements that we're about to overwrite. |
824 | T *OldEnd = this->end(); |
825 | this->set_size(this->size() + NumToInsert); |
826 | size_t NumOverwritten = OldEnd-I; |
827 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
828 | |
829 | // If we just moved the element we're inserting, be sure to update |
830 | // the reference (never happens if TakesParamByValue). |
831 | if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) |
832 | EltPtr += NumToInsert; |
833 | |
834 | // Replace the overwritten part. |
835 | std::fill_n(I, NumOverwritten, *EltPtr); |
836 | |
837 | // Insert the non-overwritten middle part. |
838 | std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr); |
839 | return I; |
840 | } |
841 | |
842 | template <typename ItTy, |
843 | typename = std::enable_if_t<std::is_convertible< |
844 | typename std::iterator_traits<ItTy>::iterator_category, |
845 | std::input_iterator_tag>::value>> |
846 | iterator insert(iterator I, ItTy From, ItTy To) { |
847 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
848 | size_t InsertElt = I - this->begin(); |
849 | |
850 | if (I == this->end()) { // Important special case for empty vector. |
851 | append(From, To); |
852 | return this->begin()+InsertElt; |
853 | } |
854 | |
855 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")((void)0); |
856 | |
857 | // Check that the reserve that follows doesn't invalidate the iterators. |
858 | this->assertSafeToAddRange(From, To); |
859 | |
860 | size_t NumToInsert = std::distance(From, To); |
861 | |
862 | // Ensure there is enough space. |
863 | reserve(this->size() + NumToInsert); |
864 | |
865 | // Uninvalidate the iterator. |
866 | I = this->begin()+InsertElt; |
867 | |
868 | // If there are more elements between the insertion point and the end of the |
869 | // range than there are being inserted, we can use a simple approach to |
870 | // insertion. Since we already reserved space, we know that this won't |
871 | // reallocate the vector. |
872 | if (size_t(this->end()-I) >= NumToInsert) { |
873 | T *OldEnd = this->end(); |
874 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
875 | std::move_iterator<iterator>(this->end())); |
876 | |
877 | // Copy the existing elements that get replaced. |
878 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
879 | |
880 | std::copy(From, To, I); |
881 | return I; |
882 | } |
883 | |
884 | // Otherwise, we're inserting more elements than exist already, and we're |
885 | // not inserting at the end. |
886 | |
887 | // Move over the elements that we're about to overwrite. |
888 | T *OldEnd = this->end(); |
889 | this->set_size(this->size() + NumToInsert); |
890 | size_t NumOverwritten = OldEnd-I; |
891 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
892 | |
893 | // Replace the overwritten part. |
894 | for (T *J = I; NumOverwritten > 0; --NumOverwritten) { |
895 | *J = *From; |
896 | ++J; ++From; |
897 | } |
898 | |
899 | // Insert the non-overwritten middle part. |
900 | this->uninitialized_copy(From, To, OldEnd); |
901 | return I; |
902 | } |
903 | |
904 | void insert(iterator I, std::initializer_list<T> IL) { |
905 | insert(I, IL.begin(), IL.end()); |
906 | } |
907 | |
908 | template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) { |
909 | if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity ()), false)) |
910 | return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...); |
911 | |
912 | ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...); |
913 | this->set_size(this->size() + 1); |
914 | return this->back(); |
915 | } |
916 | |
917 | SmallVectorImpl &operator=(const SmallVectorImpl &RHS); |
918 | |
919 | SmallVectorImpl &operator=(SmallVectorImpl &&RHS); |
920 | |
921 | bool operator==(const SmallVectorImpl &RHS) const { |
922 | if (this->size() != RHS.size()) return false; |
923 | return std::equal(this->begin(), this->end(), RHS.begin()); |
924 | } |
925 | bool operator!=(const SmallVectorImpl &RHS) const { |
926 | return !(*this == RHS); |
927 | } |
928 | |
929 | bool operator<(const SmallVectorImpl &RHS) const { |
930 | return std::lexicographical_compare(this->begin(), this->end(), |
931 | RHS.begin(), RHS.end()); |
932 | } |
933 | }; |
934 | |
935 | template <typename T> |
936 | void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) { |
937 | if (this == &RHS) return; |
938 | |
939 | // We can only avoid copying elements if neither vector is small. |
940 | if (!this->isSmall() && !RHS.isSmall()) { |
941 | std::swap(this->BeginX, RHS.BeginX); |
942 | std::swap(this->Size, RHS.Size); |
943 | std::swap(this->Capacity, RHS.Capacity); |
944 | return; |
945 | } |
946 | this->reserve(RHS.size()); |
947 | RHS.reserve(this->size()); |
948 | |
949 | // Swap the shared elements. |
950 | size_t NumShared = this->size(); |
951 | if (NumShared > RHS.size()) NumShared = RHS.size(); |
952 | for (size_type i = 0; i != NumShared; ++i) |
953 | std::swap((*this)[i], RHS[i]); |
954 | |
955 | // Copy over the extra elts. |
956 | if (this->size() > RHS.size()) { |
957 | size_t EltDiff = this->size() - RHS.size(); |
958 | this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end()); |
959 | RHS.set_size(RHS.size() + EltDiff); |
960 | this->destroy_range(this->begin()+NumShared, this->end()); |
961 | this->set_size(NumShared); |
962 | } else if (RHS.size() > this->size()) { |
963 | size_t EltDiff = RHS.size() - this->size(); |
964 | this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end()); |
965 | this->set_size(this->size() + EltDiff); |
966 | this->destroy_range(RHS.begin()+NumShared, RHS.end()); |
967 | RHS.set_size(NumShared); |
968 | } |
969 | } |
970 | |
971 | template <typename T> |
972 | SmallVectorImpl<T> &SmallVectorImpl<T>:: |
973 | operator=(const SmallVectorImpl<T> &RHS) { |
974 | // Avoid self-assignment. |
975 | if (this == &RHS) return *this; |
976 | |
977 | // If we already have sufficient space, assign the common elements, then |
978 | // destroy any excess. |
979 | size_t RHSSize = RHS.size(); |
980 | size_t CurSize = this->size(); |
981 | if (CurSize >= RHSSize) { |
982 | // Assign common elements. |
983 | iterator NewEnd; |
984 | if (RHSSize) |
985 | NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin()); |
986 | else |
987 | NewEnd = this->begin(); |
988 | |
989 | // Destroy excess elements. |
990 | this->destroy_range(NewEnd, this->end()); |
991 | |
992 | // Trim. |
993 | this->set_size(RHSSize); |
994 | return *this; |
995 | } |
996 | |
997 | // If we have to grow to have enough elements, destroy the current elements. |
998 | // This allows us to avoid copying them during the grow. |
999 | // FIXME: don't do this if they're efficiently moveable. |
1000 | if (this->capacity() < RHSSize) { |
1001 | // Destroy current elements. |
1002 | this->clear(); |
1003 | CurSize = 0; |
1004 | this->grow(RHSSize); |
1005 | } else if (CurSize) { |
1006 | // Otherwise, use assignment for the already-constructed elements. |
1007 | std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
1008 | } |
1009 | |
1010 | // Copy construct the new elements in place. |
1011 | this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(), |
1012 | this->begin()+CurSize); |
1013 | |
1014 | // Set end. |
1015 | this->set_size(RHSSize); |
1016 | return *this; |
1017 | } |
1018 | |
1019 | template <typename T> |
1020 | SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) { |
1021 | // Avoid self-assignment. |
1022 | if (this == &RHS) return *this; |
1023 | |
1024 | // If the RHS isn't small, clear this vector and then steal its buffer. |
1025 | if (!RHS.isSmall()) { |
1026 | this->destroy_range(this->begin(), this->end()); |
1027 | if (!this->isSmall()) free(this->begin()); |
1028 | this->BeginX = RHS.BeginX; |
1029 | this->Size = RHS.Size; |
1030 | this->Capacity = RHS.Capacity; |
1031 | RHS.resetToSmall(); |
1032 | return *this; |
1033 | } |
1034 | |
1035 | // If we already have sufficient space, assign the common elements, then |
1036 | // destroy any excess. |
1037 | size_t RHSSize = RHS.size(); |
1038 | size_t CurSize = this->size(); |
1039 | if (CurSize >= RHSSize) { |
1040 | // Assign common elements. |
1041 | iterator NewEnd = this->begin(); |
1042 | if (RHSSize) |
1043 | NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd); |
1044 | |
1045 | // Destroy excess elements and trim the bounds. |
1046 | this->destroy_range(NewEnd, this->end()); |
1047 | this->set_size(RHSSize); |
1048 | |
1049 | // Clear the RHS. |
1050 | RHS.clear(); |
1051 | |
1052 | return *this; |
1053 | } |
1054 | |
1055 | // If we have to grow to have enough elements, destroy the current elements. |
1056 | // This allows us to avoid copying them during the grow. |
1057 | // FIXME: this may not actually make any sense if we can efficiently move |
1058 | // elements. |
1059 | if (this->capacity() < RHSSize) { |
1060 | // Destroy current elements. |
1061 | this->clear(); |
1062 | CurSize = 0; |
1063 | this->grow(RHSSize); |
1064 | } else if (CurSize) { |
1065 | // Otherwise, use assignment for the already-constructed elements. |
1066 | std::move(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
1067 | } |
1068 | |
1069 | // Move-construct the new elements in place. |
1070 | this->uninitialized_move(RHS.begin()+CurSize, RHS.end(), |
1071 | this->begin()+CurSize); |
1072 | |
1073 | // Set end. |
1074 | this->set_size(RHSSize); |
1075 | |
1076 | RHS.clear(); |
1077 | return *this; |
1078 | } |
1079 | |
1080 | /// Storage for the SmallVector elements. This is specialized for the N=0 case |
1081 | /// to avoid allocating unnecessary storage. |
1082 | template <typename T, unsigned N> |
1083 | struct SmallVectorStorage { |
1084 | alignas(T) char InlineElts[N * sizeof(T)]; |
1085 | }; |
1086 | |
1087 | /// We need the storage to be properly aligned even for small-size of 0 so that |
1088 | /// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is |
1089 | /// well-defined. |
1090 | template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {}; |
1091 | |
1092 | /// Forward declaration of SmallVector so that |
1093 | /// calculateSmallVectorDefaultInlinedElements can reference |
1094 | /// `sizeof(SmallVector<T, 0>)`. |
1095 | template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector; |
1096 | |
1097 | /// Helper class for calculating the default number of inline elements for |
1098 | /// `SmallVector<T>`. |
1099 | /// |
1100 | /// This should be migrated to a constexpr function when our minimum |
1101 | /// compiler support is enough for multi-statement constexpr functions. |
1102 | template <typename T> struct CalculateSmallVectorDefaultInlinedElements { |
1103 | // Parameter controlling the default number of inlined elements |
1104 | // for `SmallVector<T>`. |
1105 | // |
1106 | // The default number of inlined elements ensures that |
1107 | // 1. There is at least one inlined element. |
1108 | // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless |
1109 | // it contradicts 1. |
1110 | static constexpr size_t kPreferredSmallVectorSizeof = 64; |
1111 | |
1112 | // static_assert that sizeof(T) is not "too big". |
1113 | // |
1114 | // Because our policy guarantees at least one inlined element, it is possible |
1115 | // for an arbitrarily large inlined element to allocate an arbitrarily large |
1116 | // amount of inline storage. We generally consider it an antipattern for a |
1117 | // SmallVector to allocate an excessive amount of inline storage, so we want |
1118 | // to call attention to these cases and make sure that users are making an |
1119 | // intentional decision if they request a lot of inline storage. |
1120 | // |
1121 | // We want this assertion to trigger in pathological cases, but otherwise |
1122 | // not be too easy to hit. To accomplish that, the cutoff is actually somewhat |
1123 | // larger than kPreferredSmallVectorSizeof (otherwise, |
1124 | // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that |
1125 | // pattern seems useful in practice). |
1126 | // |
1127 | // One wrinkle is that this assertion is in theory non-portable, since |
1128 | // sizeof(T) is in general platform-dependent. However, we don't expect this |
1129 | // to be much of an issue, because most LLVM development happens on 64-bit |
1130 | // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for |
1131 | // 32-bit hosts, dodging the issue. The reverse situation, where development |
1132 | // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a |
1133 | // 64-bit host, is expected to be very rare. |
1134 | static_assert( |
1135 | sizeof(T) <= 256, |
1136 | "You are trying to use a default number of inlined elements for " |
1137 | "`SmallVector<T>` but `sizeof(T)` is really big! Please use an " |
1138 | "explicit number of inlined elements with `SmallVector<T, N>` to make " |
1139 | "sure you really want that much inline storage."); |
1140 | |
1141 | // Discount the size of the header itself when calculating the maximum inline |
1142 | // bytes. |
1143 | static constexpr size_t PreferredInlineBytes = |
1144 | kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>); |
1145 | static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T); |
1146 | static constexpr size_t value = |
1147 | NumElementsThatFit == 0 ? 1 : NumElementsThatFit; |
1148 | }; |
1149 | |
1150 | /// This is a 'vector' (really, a variable-sized array), optimized |
1151 | /// for the case when the array is small. It contains some number of elements |
1152 | /// in-place, which allows it to avoid heap allocation when the actual number of |
1153 | /// elements is below that threshold. This allows normal "small" cases to be |
1154 | /// fast without losing generality for large inputs. |
1155 | /// |
1156 | /// \note |
1157 | /// In the absence of a well-motivated choice for the number of inlined |
1158 | /// elements \p N, it is recommended to use \c SmallVector<T> (that is, |
1159 | /// omitting the \p N). This will choose a default number of inlined elements |
1160 | /// reasonable for allocation on the stack (for example, trying to keep \c |
1161 | /// sizeof(SmallVector<T>) around 64 bytes). |
1162 | /// |
1163 | /// \warning This does not attempt to be exception safe. |
1164 | /// |
1165 | /// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h |
1166 | template <typename T, |
1167 | unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value> |
1168 | class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>, |
1169 | SmallVectorStorage<T, N> { |
1170 | public: |
1171 | SmallVector() : SmallVectorImpl<T>(N) {} |
1172 | |
1173 | ~SmallVector() { |
1174 | // Destroy the constructed elements in the vector. |
1175 | this->destroy_range(this->begin(), this->end()); |
1176 | } |
1177 | |
1178 | explicit SmallVector(size_t Size, const T &Value = T()) |
1179 | : SmallVectorImpl<T>(N) { |
1180 | this->assign(Size, Value); |
1181 | } |
1182 | |
1183 | template <typename ItTy, |
1184 | typename = std::enable_if_t<std::is_convertible< |
1185 | typename std::iterator_traits<ItTy>::iterator_category, |
1186 | std::input_iterator_tag>::value>> |
1187 | SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) { |
1188 | this->append(S, E); |
1189 | } |
1190 | |
1191 | template <typename RangeTy> |
1192 | explicit SmallVector(const iterator_range<RangeTy> &R) |
1193 | : SmallVectorImpl<T>(N) { |
1194 | this->append(R.begin(), R.end()); |
1195 | } |
1196 | |
1197 | SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) { |
1198 | this->assign(IL); |
1199 | } |
1200 | |
1201 | SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) { |
1202 | if (!RHS.empty()) |
1203 | SmallVectorImpl<T>::operator=(RHS); |
1204 | } |
1205 | |
1206 | SmallVector &operator=(const SmallVector &RHS) { |
1207 | SmallVectorImpl<T>::operator=(RHS); |
1208 | return *this; |
1209 | } |
1210 | |
1211 | SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) { |
1212 | if (!RHS.empty()) |
1213 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1214 | } |
1215 | |
1216 | SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) { |
1217 | if (!RHS.empty()) |
1218 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1219 | } |
1220 | |
1221 | SmallVector &operator=(SmallVector &&RHS) { |
1222 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1223 | return *this; |
1224 | } |
1225 | |
1226 | SmallVector &operator=(SmallVectorImpl<T> &&RHS) { |
1227 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1228 | return *this; |
1229 | } |
1230 | |
1231 | SmallVector &operator=(std::initializer_list<T> IL) { |
1232 | this->assign(IL); |
1233 | return *this; |
1234 | } |
1235 | }; |
1236 | |
1237 | template <typename T, unsigned N> |
1238 | inline size_t capacity_in_bytes(const SmallVector<T, N> &X) { |
1239 | return X.capacity_in_bytes(); |
1240 | } |
1241 | |
1242 | /// Given a range of type R, iterate the entire range and return a |
1243 | /// SmallVector with elements of the vector. This is useful, for example, |
1244 | /// when you want to iterate a range and then sort the results. |
1245 | template <unsigned Size, typename R> |
1246 | SmallVector<typename std::remove_const<typename std::remove_reference< |
1247 | decltype(*std::begin(std::declval<R &>()))>::type>::type, |
1248 | Size> |
1249 | to_vector(R &&Range) { |
1250 | return {std::begin(Range), std::end(Range)}; |
1251 | } |
1252 | |
1253 | } // end namespace llvm |
1254 | |
1255 | namespace std { |
1256 | |
1257 | /// Implement std::swap in terms of SmallVector swap. |
1258 | template<typename T> |
1259 | inline void |
1260 | swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) { |
1261 | LHS.swap(RHS); |
1262 | } |
1263 | |
1264 | /// Implement std::swap in terms of SmallVector swap. |
1265 | template<typename T, unsigned N> |
1266 | inline void |
1267 | swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) { |
1268 | LHS.swap(RHS); |
1269 | } |
1270 | |
1271 | } // end namespace std |
1272 | |
1273 | #endif // LLVM_ADT_SMALLVECTOR_H |