File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp |
Warning: | line 363, column 11 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This pass eliminates allocas by either converting them into vectors or | ||||
10 | // by migrating them to local address space. | ||||
11 | // | ||||
12 | //===----------------------------------------------------------------------===// | ||||
13 | |||||
14 | #include "AMDGPU.h" | ||||
15 | #include "GCNSubtarget.h" | ||||
16 | #include "llvm/Analysis/CaptureTracking.h" | ||||
17 | #include "llvm/Analysis/ValueTracking.h" | ||||
18 | #include "llvm/CodeGen/TargetPassConfig.h" | ||||
19 | #include "llvm/IR/IRBuilder.h" | ||||
20 | #include "llvm/IR/IntrinsicsAMDGPU.h" | ||||
21 | #include "llvm/IR/IntrinsicsR600.h" | ||||
22 | #include "llvm/Pass.h" | ||||
23 | #include "llvm/Target/TargetMachine.h" | ||||
24 | |||||
25 | #define DEBUG_TYPE"amdgpu-promote-alloca" "amdgpu-promote-alloca" | ||||
26 | |||||
27 | using namespace llvm; | ||||
28 | |||||
29 | namespace { | ||||
30 | |||||
31 | static cl::opt<bool> DisablePromoteAllocaToVector( | ||||
32 | "disable-promote-alloca-to-vector", | ||||
33 | cl::desc("Disable promote alloca to vector"), | ||||
34 | cl::init(false)); | ||||
35 | |||||
36 | static cl::opt<bool> DisablePromoteAllocaToLDS( | ||||
37 | "disable-promote-alloca-to-lds", | ||||
38 | cl::desc("Disable promote alloca to LDS"), | ||||
39 | cl::init(false)); | ||||
40 | |||||
41 | static cl::opt<unsigned> PromoteAllocaToVectorLimit( | ||||
42 | "amdgpu-promote-alloca-to-vector-limit", | ||||
43 | cl::desc("Maximum byte size to consider promote alloca to vector"), | ||||
44 | cl::init(0)); | ||||
45 | |||||
46 | // FIXME: This can create globals so should be a module pass. | ||||
47 | class AMDGPUPromoteAlloca : public FunctionPass { | ||||
48 | public: | ||||
49 | static char ID; | ||||
50 | |||||
51 | AMDGPUPromoteAlloca() : FunctionPass(ID) {} | ||||
52 | |||||
53 | bool runOnFunction(Function &F) override; | ||||
54 | |||||
55 | StringRef getPassName() const override { return "AMDGPU Promote Alloca"; } | ||||
56 | |||||
57 | bool handleAlloca(AllocaInst &I, bool SufficientLDS); | ||||
58 | |||||
59 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
60 | AU.setPreservesCFG(); | ||||
61 | FunctionPass::getAnalysisUsage(AU); | ||||
62 | } | ||||
63 | }; | ||||
64 | |||||
65 | class AMDGPUPromoteAllocaImpl { | ||||
66 | private: | ||||
67 | const TargetMachine &TM; | ||||
68 | Module *Mod = nullptr; | ||||
69 | const DataLayout *DL = nullptr; | ||||
70 | |||||
71 | // FIXME: This should be per-kernel. | ||||
72 | uint32_t LocalMemLimit = 0; | ||||
73 | uint32_t CurrentLocalMemUsage = 0; | ||||
74 | unsigned MaxVGPRs; | ||||
75 | |||||
76 | bool IsAMDGCN = false; | ||||
77 | bool IsAMDHSA = false; | ||||
78 | |||||
79 | std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder); | ||||
80 | Value *getWorkitemID(IRBuilder<> &Builder, unsigned N); | ||||
81 | |||||
82 | /// BaseAlloca is the alloca root the search started from. | ||||
83 | /// Val may be that alloca or a recursive user of it. | ||||
84 | bool collectUsesWithPtrTypes(Value *BaseAlloca, | ||||
85 | Value *Val, | ||||
86 | std::vector<Value*> &WorkList) const; | ||||
87 | |||||
88 | /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand | ||||
89 | /// indices to an instruction with 2 pointer inputs (e.g. select, icmp). | ||||
90 | /// Returns true if both operands are derived from the same alloca. Val should | ||||
91 | /// be the same value as one of the input operands of UseInst. | ||||
92 | bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val, | ||||
93 | Instruction *UseInst, | ||||
94 | int OpIdx0, int OpIdx1) const; | ||||
95 | |||||
96 | /// Check whether we have enough local memory for promotion. | ||||
97 | bool hasSufficientLocalMem(const Function &F); | ||||
98 | |||||
99 | bool handleAlloca(AllocaInst &I, bool SufficientLDS); | ||||
100 | |||||
101 | public: | ||||
102 | AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {} | ||||
103 | bool run(Function &F); | ||||
104 | }; | ||||
105 | |||||
106 | class AMDGPUPromoteAllocaToVector : public FunctionPass { | ||||
107 | public: | ||||
108 | static char ID; | ||||
109 | |||||
110 | AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {} | ||||
111 | |||||
112 | bool runOnFunction(Function &F) override; | ||||
113 | |||||
114 | StringRef getPassName() const override { | ||||
115 | return "AMDGPU Promote Alloca to vector"; | ||||
116 | } | ||||
117 | |||||
118 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
119 | AU.setPreservesCFG(); | ||||
120 | FunctionPass::getAnalysisUsage(AU); | ||||
121 | } | ||||
122 | }; | ||||
123 | |||||
124 | } // end anonymous namespace | ||||
125 | |||||
126 | char AMDGPUPromoteAlloca::ID = 0; | ||||
127 | char AMDGPUPromoteAllocaToVector::ID = 0; | ||||
128 | |||||
129 | INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry &Registry) { | ||||
130 | "AMDGPU promote alloca to vector or LDS", false, false)static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry &Registry) { | ||||
131 | // Move LDS uses from functions to kernels before promote alloca for accurate | ||||
132 | // estimation of LDS available | ||||
133 | INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)initializeAMDGPULowerModuleLDSPass(Registry); | ||||
134 | INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS" , "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo ::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false , false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm ::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry ) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce , std::ref(Registry)); } | ||||
135 | "AMDGPU promote alloca to vector or LDS", false, false)PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS" , "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo ::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false , false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm ::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry ) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce , std::ref(Registry)); } | ||||
136 | |||||
137 | INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry &Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector" , "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector ::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector >), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag ; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag , initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry )); } | ||||
138 | "AMDGPU promote alloca to vector", false, false)static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry &Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector" , "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector ::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector >), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag ; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag , initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry )); } | ||||
139 | |||||
140 | char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID; | ||||
141 | char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID; | ||||
142 | |||||
143 | bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { | ||||
144 | if (skipFunction(F)) | ||||
145 | return false; | ||||
146 | |||||
147 | if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { | ||||
148 | return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F); | ||||
149 | } | ||||
150 | return false; | ||||
151 | } | ||||
152 | |||||
153 | PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F, | ||||
154 | FunctionAnalysisManager &AM) { | ||||
155 | bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F); | ||||
156 | if (Changed) { | ||||
157 | PreservedAnalyses PA; | ||||
158 | PA.preserveSet<CFGAnalyses>(); | ||||
159 | return PA; | ||||
160 | } | ||||
161 | return PreservedAnalyses::all(); | ||||
162 | } | ||||
163 | |||||
164 | bool AMDGPUPromoteAllocaImpl::run(Function &F) { | ||||
165 | Mod = F.getParent(); | ||||
166 | DL = &Mod->getDataLayout(); | ||||
167 | |||||
168 | const Triple &TT = TM.getTargetTriple(); | ||||
169 | IsAMDGCN = TT.getArch() == Triple::amdgcn; | ||||
170 | IsAMDHSA = TT.getOS() == Triple::AMDHSA; | ||||
171 | |||||
172 | const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); | ||||
173 | if (!ST.isPromoteAllocaEnabled()) | ||||
174 | return false; | ||||
175 | |||||
176 | if (IsAMDGCN) { | ||||
177 | const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); | ||||
178 | MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); | ||||
179 | } else { | ||||
180 | MaxVGPRs = 128; | ||||
181 | } | ||||
182 | |||||
183 | bool SufficientLDS = hasSufficientLocalMem(F); | ||||
184 | bool Changed = false; | ||||
185 | BasicBlock &EntryBB = *F.begin(); | ||||
186 | |||||
187 | SmallVector<AllocaInst *, 16> Allocas; | ||||
188 | for (Instruction &I : EntryBB) { | ||||
189 | if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) | ||||
190 | Allocas.push_back(AI); | ||||
191 | } | ||||
192 | |||||
193 | for (AllocaInst *AI : Allocas) { | ||||
194 | if (handleAlloca(*AI, SufficientLDS)) | ||||
195 | Changed = true; | ||||
196 | } | ||||
197 | |||||
198 | return Changed; | ||||
199 | } | ||||
200 | |||||
201 | std::pair<Value *, Value *> | ||||
202 | AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) { | ||||
203 | const Function &F = *Builder.GetInsertBlock()->getParent(); | ||||
204 | const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); | ||||
205 | |||||
206 | if (!IsAMDHSA) { | ||||
207 | Function *LocalSizeYFn | ||||
208 | = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y); | ||||
209 | Function *LocalSizeZFn | ||||
210 | = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z); | ||||
211 | |||||
212 | CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {}); | ||||
213 | CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {}); | ||||
214 | |||||
215 | ST.makeLIDRangeMetadata(LocalSizeY); | ||||
216 | ST.makeLIDRangeMetadata(LocalSizeZ); | ||||
217 | |||||
218 | return std::make_pair(LocalSizeY, LocalSizeZ); | ||||
219 | } | ||||
220 | |||||
221 | // We must read the size out of the dispatch pointer. | ||||
222 | assert(IsAMDGCN)((void)0); | ||||
223 | |||||
224 | // We are indexing into this struct, and want to extract the workgroup_size_* | ||||
225 | // fields. | ||||
226 | // | ||||
227 | // typedef struct hsa_kernel_dispatch_packet_s { | ||||
228 | // uint16_t header; | ||||
229 | // uint16_t setup; | ||||
230 | // uint16_t workgroup_size_x ; | ||||
231 | // uint16_t workgroup_size_y; | ||||
232 | // uint16_t workgroup_size_z; | ||||
233 | // uint16_t reserved0; | ||||
234 | // uint32_t grid_size_x ; | ||||
235 | // uint32_t grid_size_y ; | ||||
236 | // uint32_t grid_size_z; | ||||
237 | // | ||||
238 | // uint32_t private_segment_size; | ||||
239 | // uint32_t group_segment_size; | ||||
240 | // uint64_t kernel_object; | ||||
241 | // | ||||
242 | // #ifdef HSA_LARGE_MODEL | ||||
243 | // void *kernarg_address; | ||||
244 | // #elif defined HSA_LITTLE_ENDIAN | ||||
245 | // void *kernarg_address; | ||||
246 | // uint32_t reserved1; | ||||
247 | // #else | ||||
248 | // uint32_t reserved1; | ||||
249 | // void *kernarg_address; | ||||
250 | // #endif | ||||
251 | // uint64_t reserved2; | ||||
252 | // hsa_signal_t completion_signal; // uint64_t wrapper | ||||
253 | // } hsa_kernel_dispatch_packet_t | ||||
254 | // | ||||
255 | Function *DispatchPtrFn | ||||
256 | = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr); | ||||
257 | |||||
258 | CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {}); | ||||
259 | DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); | ||||
260 | DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); | ||||
261 | |||||
262 | // Size of the dispatch packet struct. | ||||
263 | DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64); | ||||
264 | |||||
265 | Type *I32Ty = Type::getInt32Ty(Mod->getContext()); | ||||
266 | Value *CastDispatchPtr = Builder.CreateBitCast( | ||||
267 | DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS)); | ||||
268 | |||||
269 | // We could do a single 64-bit load here, but it's likely that the basic | ||||
270 | // 32-bit and extract sequence is already present, and it is probably easier | ||||
271 | // to CSE this. The loads should be mergable later anyway. | ||||
272 | Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1); | ||||
273 | LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4)); | ||||
274 | |||||
275 | Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2); | ||||
276 | LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4)); | ||||
277 | |||||
278 | MDNode *MD = MDNode::get(Mod->getContext(), None); | ||||
279 | LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); | ||||
280 | LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD); | ||||
281 | ST.makeLIDRangeMetadata(LoadZU); | ||||
282 | |||||
283 | // Extract y component. Upper half of LoadZU should be zero already. | ||||
284 | Value *Y = Builder.CreateLShr(LoadXY, 16); | ||||
285 | |||||
286 | return std::make_pair(Y, LoadZU); | ||||
287 | } | ||||
288 | |||||
289 | Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder, | ||||
290 | unsigned N) { | ||||
291 | const AMDGPUSubtarget &ST = | ||||
292 | AMDGPUSubtarget::get(TM, *Builder.GetInsertBlock()->getParent()); | ||||
293 | Intrinsic::ID IntrID = Intrinsic::not_intrinsic; | ||||
294 | |||||
295 | switch (N) { | ||||
296 | case 0: | ||||
297 | IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x | ||||
298 | : (Intrinsic::ID)Intrinsic::r600_read_tidig_x; | ||||
299 | break; | ||||
300 | case 1: | ||||
301 | IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y | ||||
302 | : (Intrinsic::ID)Intrinsic::r600_read_tidig_y; | ||||
303 | break; | ||||
304 | |||||
305 | case 2: | ||||
306 | IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z | ||||
307 | : (Intrinsic::ID)Intrinsic::r600_read_tidig_z; | ||||
308 | break; | ||||
309 | default: | ||||
310 | llvm_unreachable("invalid dimension")__builtin_unreachable(); | ||||
311 | } | ||||
312 | |||||
313 | Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID); | ||||
314 | CallInst *CI = Builder.CreateCall(WorkitemIdFn); | ||||
315 | ST.makeLIDRangeMetadata(CI); | ||||
316 | |||||
317 | return CI; | ||||
318 | } | ||||
319 | |||||
320 | static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) { | ||||
321 | return FixedVectorType::get(ArrayTy->getElementType(), | ||||
322 | ArrayTy->getNumElements()); | ||||
323 | } | ||||
324 | |||||
325 | static Value *stripBitcasts(Value *V) { | ||||
326 | while (Instruction *I = dyn_cast<Instruction>(V)) { | ||||
327 | if (I->getOpcode() != Instruction::BitCast) | ||||
328 | break; | ||||
329 | V = I->getOperand(0); | ||||
330 | } | ||||
331 | return V; | ||||
332 | } | ||||
333 | |||||
334 | static Value * | ||||
335 | calculateVectorIndex(Value *Ptr, | ||||
336 | const std::map<GetElementPtrInst *, Value *> &GEPIdx) { | ||||
337 | GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr)); | ||||
338 | if (!GEP) | ||||
339 | return nullptr; | ||||
340 | |||||
341 | auto I = GEPIdx.find(GEP); | ||||
342 | return I == GEPIdx.end() ? nullptr : I->second; | ||||
343 | } | ||||
344 | |||||
345 | static Value* GEPToVectorIndex(GetElementPtrInst *GEP) { | ||||
346 | // FIXME we only support simple cases | ||||
347 | if (GEP->getNumOperands() != 3) | ||||
348 | return nullptr; | ||||
349 | |||||
350 | ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1)); | ||||
351 | if (!I0 || !I0->isZero()) | ||||
352 | return nullptr; | ||||
353 | |||||
354 | return GEP->getOperand(2); | ||||
355 | } | ||||
356 | |||||
357 | // Not an instruction handled below to turn into a vector. | ||||
358 | // | ||||
359 | // TODO: Check isTriviallyVectorizable for calls and handle other | ||||
360 | // instructions. | ||||
361 | static bool canVectorizeInst(Instruction *Inst, User *User, | ||||
362 | const DataLayout &DL) { | ||||
363 | switch (Inst->getOpcode()) { | ||||
| |||||
364 | case Instruction::Load: { | ||||
365 | // Currently only handle the case where the Pointer Operand is a GEP. | ||||
366 | // Also we could not vectorize volatile or atomic loads. | ||||
367 | LoadInst *LI = cast<LoadInst>(Inst); | ||||
368 | if (isa<AllocaInst>(User) && | ||||
369 | LI->getPointerOperandType() == User->getType() && | ||||
370 | isa<VectorType>(LI->getType())) | ||||
371 | return true; | ||||
372 | |||||
373 | Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand()); | ||||
374 | if (!PtrInst) | ||||
375 | return false; | ||||
376 | |||||
377 | return (PtrInst->getOpcode() == Instruction::GetElementPtr || | ||||
378 | PtrInst->getOpcode() == Instruction::BitCast) && | ||||
379 | LI->isSimple(); | ||||
380 | } | ||||
381 | case Instruction::BitCast: | ||||
382 | return true; | ||||
383 | case Instruction::Store: { | ||||
384 | // Must be the stored pointer operand, not a stored value, plus | ||||
385 | // since it should be canonical form, the User should be a GEP. | ||||
386 | // Also we could not vectorize volatile or atomic stores. | ||||
387 | StoreInst *SI = cast<StoreInst>(Inst); | ||||
388 | if (isa<AllocaInst>(User) && | ||||
389 | SI->getPointerOperandType() == User->getType() && | ||||
390 | isa<VectorType>(SI->getValueOperand()->getType())) | ||||
391 | return true; | ||||
392 | |||||
393 | Instruction *UserInst = dyn_cast<Instruction>(User); | ||||
394 | if (!UserInst) | ||||
395 | return false; | ||||
396 | |||||
397 | return (SI->getPointerOperand() == User) && | ||||
398 | (UserInst->getOpcode() == Instruction::GetElementPtr || | ||||
399 | UserInst->getOpcode() == Instruction::BitCast) && | ||||
400 | SI->isSimple(); | ||||
401 | } | ||||
402 | default: | ||||
403 | return false; | ||||
404 | } | ||||
405 | } | ||||
406 | |||||
407 | static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL, | ||||
408 | unsigned MaxVGPRs) { | ||||
409 | |||||
410 | if (DisablePromoteAllocaToVector) { | ||||
411 | LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n")do { } while (false); | ||||
412 | return false; | ||||
413 | } | ||||
414 | |||||
415 | Type *AllocaTy = Alloca->getAllocatedType(); | ||||
416 | auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy); | ||||
417 | if (auto *ArrayTy
| ||||
418 | if (VectorType::isValidElementType(ArrayTy->getElementType()) && | ||||
419 | ArrayTy->getNumElements() > 0) | ||||
420 | VectorTy = arrayTypeToVecType(ArrayTy); | ||||
421 | } | ||||
422 | |||||
423 | // Use up to 1/4 of available register budget for vectorization. | ||||
424 | unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8 | ||||
425 | : (MaxVGPRs * 32); | ||||
426 | |||||
427 | if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) { | ||||
428 | LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "do { } while (false) | ||||
429 | << MaxVGPRs << " registers available\n")do { } while (false); | ||||
430 | return false; | ||||
431 | } | ||||
432 | |||||
433 | LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n")do { } while (false); | ||||
434 | |||||
435 | // FIXME: There is no reason why we can't support larger arrays, we | ||||
436 | // are just being conservative for now. | ||||
437 | // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these | ||||
438 | // could also be promoted but we don't currently handle this case | ||||
439 | if (!VectorTy || VectorTy->getNumElements() > 16 || | ||||
440 | VectorTy->getNumElements() < 2) { | ||||
441 | LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n")do { } while (false); | ||||
442 | return false; | ||||
443 | } | ||||
444 | |||||
445 | std::map<GetElementPtrInst*, Value*> GEPVectorIdx; | ||||
446 | std::vector<Value *> WorkList; | ||||
447 | SmallVector<User *, 8> Users(Alloca->users()); | ||||
448 | SmallVector<User *, 8> UseUsers(Users.size(), Alloca); | ||||
449 | Type *VecEltTy = VectorTy->getElementType(); | ||||
450 | while (!Users.empty()) { | ||||
451 | User *AllocaUser = Users.pop_back_val(); | ||||
452 | User *UseUser = UseUsers.pop_back_val(); | ||||
453 | Instruction *Inst = dyn_cast<Instruction>(AllocaUser); | ||||
454 | |||||
455 | GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser); | ||||
456 | if (!GEP
| ||||
457 | if (!canVectorizeInst(Inst, UseUser, DL)) | ||||
458 | return false; | ||||
459 | |||||
460 | if (Inst->getOpcode() == Instruction::BitCast) { | ||||
461 | Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType(); | ||||
462 | Type *ToTy = Inst->getType()->getPointerElementType(); | ||||
463 | if (FromTy->isAggregateType() || ToTy->isAggregateType() || | ||||
464 | DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy)) | ||||
465 | continue; | ||||
466 | |||||
467 | for (User *CastUser : Inst->users()) { | ||||
468 | if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser))) | ||||
469 | continue; | ||||
470 | Users.push_back(CastUser); | ||||
471 | UseUsers.push_back(Inst); | ||||
472 | } | ||||
473 | |||||
474 | continue; | ||||
475 | } | ||||
476 | |||||
477 | WorkList.push_back(AllocaUser); | ||||
478 | continue; | ||||
479 | } | ||||
480 | |||||
481 | Value *Index = GEPToVectorIndex(GEP); | ||||
482 | |||||
483 | // If we can't compute a vector index from this GEP, then we can't | ||||
484 | // promote this alloca to vector. | ||||
485 | if (!Index) { | ||||
486 | LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEPdo { } while (false) | ||||
487 | << '\n')do { } while (false); | ||||
488 | return false; | ||||
489 | } | ||||
490 | |||||
491 | GEPVectorIdx[GEP] = Index; | ||||
492 | Users.append(GEP->user_begin(), GEP->user_end()); | ||||
493 | UseUsers.append(GEP->getNumUses(), GEP); | ||||
494 | } | ||||
495 | |||||
496 | LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "do { } while (false) | ||||
497 | << *VectorTy << '\n')do { } while (false); | ||||
498 | |||||
499 | for (Value *V : WorkList) { | ||||
500 | Instruction *Inst = cast<Instruction>(V); | ||||
501 | IRBuilder<> Builder(Inst); | ||||
502 | switch (Inst->getOpcode()) { | ||||
503 | case Instruction::Load: { | ||||
504 | if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy()) | ||||
505 | break; | ||||
506 | |||||
507 | Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand(); | ||||
508 | Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); | ||||
509 | if (!Index) | ||||
510 | break; | ||||
511 | |||||
512 | Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); | ||||
513 | Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); | ||||
514 | Value *VecValue = Builder.CreateLoad(VectorTy, BitCast); | ||||
515 | Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index); | ||||
516 | if (Inst->getType() != VecEltTy) | ||||
517 | ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType()); | ||||
518 | Inst->replaceAllUsesWith(ExtractElement); | ||||
519 | Inst->eraseFromParent(); | ||||
520 | break; | ||||
521 | } | ||||
522 | case Instruction::Store: { | ||||
523 | StoreInst *SI = cast<StoreInst>(Inst); | ||||
524 | if (SI->getValueOperand()->getType() == AllocaTy || | ||||
525 | SI->getValueOperand()->getType()->isVectorTy()) | ||||
526 | break; | ||||
527 | |||||
528 | Value *Ptr = SI->getPointerOperand(); | ||||
529 | Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); | ||||
530 | if (!Index) | ||||
531 | break; | ||||
532 | |||||
533 | Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); | ||||
534 | Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); | ||||
535 | Value *VecValue = Builder.CreateLoad(VectorTy, BitCast); | ||||
536 | Value *Elt = SI->getValueOperand(); | ||||
537 | if (Elt->getType() != VecEltTy) | ||||
538 | Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy); | ||||
539 | Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index); | ||||
540 | Builder.CreateStore(NewVecValue, BitCast); | ||||
541 | Inst->eraseFromParent(); | ||||
542 | break; | ||||
543 | } | ||||
544 | |||||
545 | default: | ||||
546 | llvm_unreachable("Inconsistency in instructions promotable to vector")__builtin_unreachable(); | ||||
547 | } | ||||
548 | } | ||||
549 | return true; | ||||
550 | } | ||||
551 | |||||
552 | static bool isCallPromotable(CallInst *CI) { | ||||
553 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); | ||||
554 | if (!II) | ||||
555 | return false; | ||||
556 | |||||
557 | switch (II->getIntrinsicID()) { | ||||
558 | case Intrinsic::memcpy: | ||||
559 | case Intrinsic::memmove: | ||||
560 | case Intrinsic::memset: | ||||
561 | case Intrinsic::lifetime_start: | ||||
562 | case Intrinsic::lifetime_end: | ||||
563 | case Intrinsic::invariant_start: | ||||
564 | case Intrinsic::invariant_end: | ||||
565 | case Intrinsic::launder_invariant_group: | ||||
566 | case Intrinsic::strip_invariant_group: | ||||
567 | case Intrinsic::objectsize: | ||||
568 | return true; | ||||
569 | default: | ||||
570 | return false; | ||||
571 | } | ||||
572 | } | ||||
573 | |||||
574 | bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca( | ||||
575 | Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0, | ||||
576 | int OpIdx1) const { | ||||
577 | // Figure out which operand is the one we might not be promoting. | ||||
578 | Value *OtherOp = Inst->getOperand(OpIdx0); | ||||
579 | if (Val == OtherOp) | ||||
580 | OtherOp = Inst->getOperand(OpIdx1); | ||||
581 | |||||
582 | if (isa<ConstantPointerNull>(OtherOp)) | ||||
583 | return true; | ||||
584 | |||||
585 | Value *OtherObj = getUnderlyingObject(OtherOp); | ||||
586 | if (!isa<AllocaInst>(OtherObj)) | ||||
587 | return false; | ||||
588 | |||||
589 | // TODO: We should be able to replace undefs with the right pointer type. | ||||
590 | |||||
591 | // TODO: If we know the other base object is another promotable | ||||
592 | // alloca, not necessarily this alloca, we can do this. The | ||||
593 | // important part is both must have the same address space at | ||||
594 | // the end. | ||||
595 | if (OtherObj != BaseAlloca) { | ||||
596 | LLVM_DEBUG(do { } while (false) | ||||
597 | dbgs() << "Found a binary instruction with another alloca object\n")do { } while (false); | ||||
598 | return false; | ||||
599 | } | ||||
600 | |||||
601 | return true; | ||||
602 | } | ||||
603 | |||||
604 | bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes( | ||||
605 | Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const { | ||||
606 | |||||
607 | for (User *User : Val->users()) { | ||||
608 | if (is_contained(WorkList, User)) | ||||
609 | continue; | ||||
610 | |||||
611 | if (CallInst *CI = dyn_cast<CallInst>(User)) { | ||||
612 | if (!isCallPromotable(CI)) | ||||
613 | return false; | ||||
614 | |||||
615 | WorkList.push_back(User); | ||||
616 | continue; | ||||
617 | } | ||||
618 | |||||
619 | Instruction *UseInst = cast<Instruction>(User); | ||||
620 | if (UseInst->getOpcode() == Instruction::PtrToInt) | ||||
621 | return false; | ||||
622 | |||||
623 | if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) { | ||||
624 | if (LI->isVolatile()) | ||||
625 | return false; | ||||
626 | |||||
627 | continue; | ||||
628 | } | ||||
629 | |||||
630 | if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) { | ||||
631 | if (SI->isVolatile()) | ||||
632 | return false; | ||||
633 | |||||
634 | // Reject if the stored value is not the pointer operand. | ||||
635 | if (SI->getPointerOperand() != Val) | ||||
636 | return false; | ||||
637 | } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { | ||||
638 | if (RMW->isVolatile()) | ||||
639 | return false; | ||||
640 | } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) { | ||||
641 | if (CAS->isVolatile()) | ||||
642 | return false; | ||||
643 | } | ||||
644 | |||||
645 | // Only promote a select if we know that the other select operand | ||||
646 | // is from another pointer that will also be promoted. | ||||
647 | if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) { | ||||
648 | if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1)) | ||||
649 | return false; | ||||
650 | |||||
651 | // May need to rewrite constant operands. | ||||
652 | WorkList.push_back(ICmp); | ||||
653 | } | ||||
654 | |||||
655 | if (UseInst->getOpcode() == Instruction::AddrSpaceCast) { | ||||
656 | // Give up if the pointer may be captured. | ||||
657 | if (PointerMayBeCaptured(UseInst, true, true)) | ||||
658 | return false; | ||||
659 | // Don't collect the users of this. | ||||
660 | WorkList.push_back(User); | ||||
661 | continue; | ||||
662 | } | ||||
663 | |||||
664 | // Do not promote vector/aggregate type instructions. It is hard to track | ||||
665 | // their users. | ||||
666 | if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User)) | ||||
667 | return false; | ||||
668 | |||||
669 | if (!User->getType()->isPointerTy()) | ||||
670 | continue; | ||||
671 | |||||
672 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) { | ||||
673 | // Be conservative if an address could be computed outside the bounds of | ||||
674 | // the alloca. | ||||
675 | if (!GEP->isInBounds()) | ||||
676 | return false; | ||||
677 | } | ||||
678 | |||||
679 | // Only promote a select if we know that the other select operand is from | ||||
680 | // another pointer that will also be promoted. | ||||
681 | if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) { | ||||
682 | if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2)) | ||||
683 | return false; | ||||
684 | } | ||||
685 | |||||
686 | // Repeat for phis. | ||||
687 | if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) { | ||||
688 | // TODO: Handle more complex cases. We should be able to replace loops | ||||
689 | // over arrays. | ||||
690 | switch (Phi->getNumIncomingValues()) { | ||||
691 | case 1: | ||||
692 | break; | ||||
693 | case 2: | ||||
694 | if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1)) | ||||
695 | return false; | ||||
696 | break; | ||||
697 | default: | ||||
698 | return false; | ||||
699 | } | ||||
700 | } | ||||
701 | |||||
702 | WorkList.push_back(User); | ||||
703 | if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList)) | ||||
704 | return false; | ||||
705 | } | ||||
706 | |||||
707 | return true; | ||||
708 | } | ||||
709 | |||||
710 | bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) { | ||||
711 | |||||
712 | FunctionType *FTy = F.getFunctionType(); | ||||
713 | const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); | ||||
714 | |||||
715 | // If the function has any arguments in the local address space, then it's | ||||
716 | // possible these arguments require the entire local memory space, so | ||||
717 | // we cannot use local memory in the pass. | ||||
718 | for (Type *ParamTy : FTy->params()) { | ||||
719 | PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); | ||||
720 | if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { | ||||
721 | LocalMemLimit = 0; | ||||
722 | LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "do { } while (false) | ||||
723 | "local memory disabled.\n")do { } while (false); | ||||
724 | return false; | ||||
725 | } | ||||
726 | } | ||||
727 | |||||
728 | LocalMemLimit = ST.getLocalMemorySize(); | ||||
729 | if (LocalMemLimit == 0) | ||||
730 | return false; | ||||
731 | |||||
732 | SmallVector<const Constant *, 16> Stack; | ||||
733 | SmallPtrSet<const Constant *, 8> VisitedConstants; | ||||
734 | SmallPtrSet<const GlobalVariable *, 8> UsedLDS; | ||||
735 | |||||
736 | auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool { | ||||
737 | for (const User *U : Val->users()) { | ||||
738 | if (const Instruction *Use = dyn_cast<Instruction>(U)) { | ||||
739 | if (Use->getParent()->getParent() == &F) | ||||
740 | return true; | ||||
741 | } else { | ||||
742 | const Constant *C = cast<Constant>(U); | ||||
743 | if (VisitedConstants.insert(C).second) | ||||
744 | Stack.push_back(C); | ||||
745 | } | ||||
746 | } | ||||
747 | |||||
748 | return false; | ||||
749 | }; | ||||
750 | |||||
751 | for (GlobalVariable &GV : Mod->globals()) { | ||||
752 | if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) | ||||
753 | continue; | ||||
754 | |||||
755 | if (visitUsers(&GV, &GV)) { | ||||
756 | UsedLDS.insert(&GV); | ||||
757 | Stack.clear(); | ||||
758 | continue; | ||||
759 | } | ||||
760 | |||||
761 | // For any ConstantExpr uses, we need to recursively search the users until | ||||
762 | // we see a function. | ||||
763 | while (!Stack.empty()) { | ||||
764 | const Constant *C = Stack.pop_back_val(); | ||||
765 | if (visitUsers(&GV, C)) { | ||||
766 | UsedLDS.insert(&GV); | ||||
767 | Stack.clear(); | ||||
768 | break; | ||||
769 | } | ||||
770 | } | ||||
771 | } | ||||
772 | |||||
773 | const DataLayout &DL = Mod->getDataLayout(); | ||||
774 | SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes; | ||||
775 | AllocatedSizes.reserve(UsedLDS.size()); | ||||
776 | |||||
777 | for (const GlobalVariable *GV : UsedLDS) { | ||||
778 | Align Alignment = | ||||
779 | DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType()); | ||||
780 | uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType()); | ||||
781 | AllocatedSizes.emplace_back(AllocSize, Alignment); | ||||
782 | } | ||||
783 | |||||
784 | // Sort to try to estimate the worst case alignment padding | ||||
785 | // | ||||
786 | // FIXME: We should really do something to fix the addresses to a more optimal | ||||
787 | // value instead | ||||
788 | llvm::sort(AllocatedSizes, [](std::pair<uint64_t, Align> LHS, | ||||
789 | std::pair<uint64_t, Align> RHS) { | ||||
790 | return LHS.second < RHS.second; | ||||
791 | }); | ||||
792 | |||||
793 | // Check how much local memory is being used by global objects | ||||
794 | CurrentLocalMemUsage = 0; | ||||
795 | |||||
796 | // FIXME: Try to account for padding here. The real padding and address is | ||||
797 | // currently determined from the inverse order of uses in the function when | ||||
798 | // legalizing, which could also potentially change. We try to estimate the | ||||
799 | // worst case here, but we probably should fix the addresses earlier. | ||||
800 | for (auto Alloc : AllocatedSizes) { | ||||
801 | CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second); | ||||
802 | CurrentLocalMemUsage += Alloc.first; | ||||
803 | } | ||||
804 | |||||
805 | unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, | ||||
806 | F); | ||||
807 | |||||
808 | // Restrict local memory usage so that we don't drastically reduce occupancy, | ||||
809 | // unless it is already significantly reduced. | ||||
810 | |||||
811 | // TODO: Have some sort of hint or other heuristics to guess occupancy based | ||||
812 | // on other factors.. | ||||
813 | unsigned OccupancyHint = ST.getWavesPerEU(F).second; | ||||
814 | if (OccupancyHint == 0) | ||||
815 | OccupancyHint = 7; | ||||
816 | |||||
817 | // Clamp to max value. | ||||
818 | OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU()); | ||||
819 | |||||
820 | // Check the hint but ignore it if it's obviously wrong from the existing LDS | ||||
821 | // usage. | ||||
822 | MaxOccupancy = std::min(OccupancyHint, MaxOccupancy); | ||||
823 | |||||
824 | |||||
825 | // Round up to the next tier of usage. | ||||
826 | unsigned MaxSizeWithWaveCount | ||||
827 | = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F); | ||||
828 | |||||
829 | // Program is possibly broken by using more local mem than available. | ||||
830 | if (CurrentLocalMemUsage > MaxSizeWithWaveCount) | ||||
831 | return false; | ||||
832 | |||||
833 | LocalMemLimit = MaxSizeWithWaveCount; | ||||
834 | |||||
835 | LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsagedo { } while (false) | ||||
836 | << " bytes of LDS\n"do { } while (false) | ||||
837 | << " Rounding size to " << MaxSizeWithWaveCountdo { } while (false) | ||||
838 | << " with a maximum occupancy of " << MaxOccupancy << '\n'do { } while (false) | ||||
839 | << " and " << (LocalMemLimit - CurrentLocalMemUsage)do { } while (false) | ||||
840 | << " available for promotion\n")do { } while (false); | ||||
841 | |||||
842 | return true; | ||||
843 | } | ||||
844 | |||||
845 | // FIXME: Should try to pick the most likely to be profitable allocas first. | ||||
846 | bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) { | ||||
847 | // Array allocations are probably not worth handling, since an allocation of | ||||
848 | // the array type is the canonical form. | ||||
849 | if (!I.isStaticAlloca() || I.isArrayAllocation()) | ||||
850 | return false; | ||||
851 | |||||
852 | const DataLayout &DL = Mod->getDataLayout(); | ||||
853 | IRBuilder<> Builder(&I); | ||||
854 | |||||
855 | // First try to replace the alloca with a vector | ||||
856 | Type *AllocaTy = I.getAllocatedType(); | ||||
857 | |||||
858 | LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { } while (false); | ||||
859 | |||||
860 | if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs)) | ||||
861 | return true; // Promoted to vector. | ||||
862 | |||||
863 | if (DisablePromoteAllocaToLDS) | ||||
864 | return false; | ||||
865 | |||||
866 | const Function &ContainingFunction = *I.getParent()->getParent(); | ||||
867 | CallingConv::ID CC = ContainingFunction.getCallingConv(); | ||||
868 | |||||
869 | // Don't promote the alloca to LDS for shader calling conventions as the work | ||||
870 | // item ID intrinsics are not supported for these calling conventions. | ||||
871 | // Furthermore not all LDS is available for some of the stages. | ||||
872 | switch (CC) { | ||||
873 | case CallingConv::AMDGPU_KERNEL: | ||||
874 | case CallingConv::SPIR_KERNEL: | ||||
875 | break; | ||||
876 | default: | ||||
877 | LLVM_DEBUG(do { } while (false) | ||||
878 | dbgs()do { } while (false) | ||||
879 | << " promote alloca to LDS not supported with calling convention.\n")do { } while (false); | ||||
880 | return false; | ||||
881 | } | ||||
882 | |||||
883 | // Not likely to have sufficient local memory for promotion. | ||||
884 | if (!SufficientLDS) | ||||
885 | return false; | ||||
886 | |||||
887 | const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction); | ||||
888 | unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second; | ||||
889 | |||||
890 | Align Alignment = | ||||
891 | DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType()); | ||||
892 | |||||
893 | // FIXME: This computed padding is likely wrong since it depends on inverse | ||||
894 | // usage order. | ||||
895 | // | ||||
896 | // FIXME: It is also possible that if we're allowed to use all of the memory | ||||
897 | // could could end up using more than the maximum due to alignment padding. | ||||
898 | |||||
899 | uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment); | ||||
900 | uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy); | ||||
901 | NewSize += AllocSize; | ||||
902 | |||||
903 | if (NewSize > LocalMemLimit) { | ||||
904 | LLVM_DEBUG(dbgs() << " " << AllocSizedo { } while (false) | ||||
905 | << " bytes of local memory not available to promote\n")do { } while (false); | ||||
906 | return false; | ||||
907 | } | ||||
908 | |||||
909 | CurrentLocalMemUsage = NewSize; | ||||
910 | |||||
911 | std::vector<Value*> WorkList; | ||||
912 | |||||
913 | if (!collectUsesWithPtrTypes(&I, &I, WorkList)) { | ||||
914 | LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n")do { } while (false); | ||||
915 | return false; | ||||
916 | } | ||||
917 | |||||
918 | LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n")do { } while (false); | ||||
919 | |||||
920 | Function *F = I.getParent()->getParent(); | ||||
921 | |||||
922 | Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); | ||||
923 | GlobalVariable *GV = new GlobalVariable( | ||||
924 | *Mod, GVTy, false, GlobalValue::InternalLinkage, | ||||
925 | UndefValue::get(GVTy), | ||||
926 | Twine(F->getName()) + Twine('.') + I.getName(), | ||||
927 | nullptr, | ||||
928 | GlobalVariable::NotThreadLocal, | ||||
929 | AMDGPUAS::LOCAL_ADDRESS); | ||||
930 | GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); | ||||
931 | GV->setAlignment(MaybeAlign(I.getAlignment())); | ||||
932 | |||||
933 | Value *TCntY, *TCntZ; | ||||
934 | |||||
935 | std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder); | ||||
936 | Value *TIdX = getWorkitemID(Builder, 0); | ||||
937 | Value *TIdY = getWorkitemID(Builder, 1); | ||||
938 | Value *TIdZ = getWorkitemID(Builder, 2); | ||||
939 | |||||
940 | Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true); | ||||
941 | Tmp0 = Builder.CreateMul(Tmp0, TIdX); | ||||
942 | Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true); | ||||
943 | Value *TID = Builder.CreateAdd(Tmp0, Tmp1); | ||||
944 | TID = Builder.CreateAdd(TID, TIdZ); | ||||
945 | |||||
946 | Value *Indices[] = { | ||||
947 | Constant::getNullValue(Type::getInt32Ty(Mod->getContext())), | ||||
948 | TID | ||||
949 | }; | ||||
950 | |||||
951 | Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices); | ||||
952 | I.mutateType(Offset->getType()); | ||||
953 | I.replaceAllUsesWith(Offset); | ||||
954 | I.eraseFromParent(); | ||||
955 | |||||
956 | SmallVector<IntrinsicInst *> DeferredIntrs; | ||||
957 | |||||
958 | for (Value *V : WorkList) { | ||||
959 | CallInst *Call = dyn_cast<CallInst>(V); | ||||
960 | if (!Call) { | ||||
961 | if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) { | ||||
962 | Value *Src0 = CI->getOperand(0); | ||||
963 | PointerType *NewTy = PointerType::getWithSamePointeeType( | ||||
964 | cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS); | ||||
965 | |||||
966 | if (isa<ConstantPointerNull>(CI->getOperand(0))) | ||||
967 | CI->setOperand(0, ConstantPointerNull::get(NewTy)); | ||||
968 | |||||
969 | if (isa<ConstantPointerNull>(CI->getOperand(1))) | ||||
970 | CI->setOperand(1, ConstantPointerNull::get(NewTy)); | ||||
971 | |||||
972 | continue; | ||||
973 | } | ||||
974 | |||||
975 | // The operand's value should be corrected on its own and we don't want to | ||||
976 | // touch the users. | ||||
977 | if (isa<AddrSpaceCastInst>(V)) | ||||
978 | continue; | ||||
979 | |||||
980 | PointerType *NewTy = PointerType::getWithSamePointeeType( | ||||
981 | cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS); | ||||
982 | |||||
983 | // FIXME: It doesn't really make sense to try to do this for all | ||||
984 | // instructions. | ||||
985 | V->mutateType(NewTy); | ||||
986 | |||||
987 | // Adjust the types of any constant operands. | ||||
988 | if (SelectInst *SI = dyn_cast<SelectInst>(V)) { | ||||
989 | if (isa<ConstantPointerNull>(SI->getOperand(1))) | ||||
990 | SI->setOperand(1, ConstantPointerNull::get(NewTy)); | ||||
991 | |||||
992 | if (isa<ConstantPointerNull>(SI->getOperand(2))) | ||||
993 | SI->setOperand(2, ConstantPointerNull::get(NewTy)); | ||||
994 | } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { | ||||
995 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | ||||
996 | if (isa<ConstantPointerNull>(Phi->getIncomingValue(I))) | ||||
997 | Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy)); | ||||
998 | } | ||||
999 | } | ||||
1000 | |||||
1001 | continue; | ||||
1002 | } | ||||
1003 | |||||
1004 | IntrinsicInst *Intr = cast<IntrinsicInst>(Call); | ||||
1005 | Builder.SetInsertPoint(Intr); | ||||
1006 | switch (Intr->getIntrinsicID()) { | ||||
1007 | case Intrinsic::lifetime_start: | ||||
1008 | case Intrinsic::lifetime_end: | ||||
1009 | // These intrinsics are for address space 0 only | ||||
1010 | Intr->eraseFromParent(); | ||||
1011 | continue; | ||||
1012 | case Intrinsic::memcpy: | ||||
1013 | case Intrinsic::memmove: | ||||
1014 | // These have 2 pointer operands. In case if second pointer also needs | ||||
1015 | // to be replaced we defer processing of these intrinsics until all | ||||
1016 | // other values are processed. | ||||
1017 | DeferredIntrs.push_back(Intr); | ||||
1018 | continue; | ||||
1019 | case Intrinsic::memset: { | ||||
1020 | MemSetInst *MemSet = cast<MemSetInst>(Intr); | ||||
1021 | Builder.CreateMemSet( | ||||
1022 | MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(), | ||||
1023 | MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile()); | ||||
1024 | Intr->eraseFromParent(); | ||||
1025 | continue; | ||||
1026 | } | ||||
1027 | case Intrinsic::invariant_start: | ||||
1028 | case Intrinsic::invariant_end: | ||||
1029 | case Intrinsic::launder_invariant_group: | ||||
1030 | case Intrinsic::strip_invariant_group: | ||||
1031 | Intr->eraseFromParent(); | ||||
1032 | // FIXME: I think the invariant marker should still theoretically apply, | ||||
1033 | // but the intrinsics need to be changed to accept pointers with any | ||||
1034 | // address space. | ||||
1035 | continue; | ||||
1036 | case Intrinsic::objectsize: { | ||||
1037 | Value *Src = Intr->getOperand(0); | ||||
1038 | Function *ObjectSize = Intrinsic::getDeclaration( | ||||
1039 | Mod, Intrinsic::objectsize, | ||||
1040 | {Intr->getType(), | ||||
1041 | PointerType::getWithSamePointeeType( | ||||
1042 | cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)}); | ||||
1043 | |||||
1044 | CallInst *NewCall = Builder.CreateCall( | ||||
1045 | ObjectSize, | ||||
1046 | {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)}); | ||||
1047 | Intr->replaceAllUsesWith(NewCall); | ||||
1048 | Intr->eraseFromParent(); | ||||
1049 | continue; | ||||
1050 | } | ||||
1051 | default: | ||||
1052 | Intr->print(errs()); | ||||
1053 | llvm_unreachable("Don't know how to promote alloca intrinsic use.")__builtin_unreachable(); | ||||
1054 | } | ||||
1055 | } | ||||
1056 | |||||
1057 | for (IntrinsicInst *Intr : DeferredIntrs) { | ||||
1058 | Builder.SetInsertPoint(Intr); | ||||
1059 | Intrinsic::ID ID = Intr->getIntrinsicID(); | ||||
1060 | assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove)((void)0); | ||||
1061 | |||||
1062 | MemTransferInst *MI = cast<MemTransferInst>(Intr); | ||||
1063 | auto *B = | ||||
1064 | Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(), | ||||
1065 | MI->getRawSource(), MI->getSourceAlign(), | ||||
1066 | MI->getLength(), MI->isVolatile()); | ||||
1067 | |||||
1068 | for (unsigned I = 1; I != 3; ++I) { | ||||
1069 | if (uint64_t Bytes = Intr->getDereferenceableBytes(I)) { | ||||
1070 | B->addDereferenceableAttr(I, Bytes); | ||||
1071 | } | ||||
1072 | } | ||||
1073 | |||||
1074 | Intr->eraseFromParent(); | ||||
1075 | } | ||||
1076 | |||||
1077 | return true; | ||||
1078 | } | ||||
1079 | |||||
1080 | bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) { | ||||
1081 | // Array allocations are probably not worth handling, since an allocation of | ||||
1082 | // the array type is the canonical form. | ||||
1083 | if (!I.isStaticAlloca() || I.isArrayAllocation()) | ||||
1084 | return false; | ||||
1085 | |||||
1086 | LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { } while (false); | ||||
1087 | |||||
1088 | Module *Mod = I.getParent()->getParent()->getParent(); | ||||
1089 | return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs); | ||||
1090 | } | ||||
1091 | |||||
1092 | bool promoteAllocasToVector(Function &F, TargetMachine &TM) { | ||||
1093 | if (DisablePromoteAllocaToVector) | ||||
1094 | return false; | ||||
1095 | |||||
1096 | const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); | ||||
1097 | if (!ST.isPromoteAllocaEnabled()) | ||||
1098 | return false; | ||||
1099 | |||||
1100 | unsigned MaxVGPRs; | ||||
1101 | if (TM.getTargetTriple().getArch() == Triple::amdgcn) { | ||||
1102 | const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); | ||||
1103 | MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); | ||||
1104 | } else { | ||||
1105 | MaxVGPRs = 128; | ||||
1106 | } | ||||
1107 | |||||
1108 | bool Changed = false; | ||||
1109 | BasicBlock &EntryBB = *F.begin(); | ||||
1110 | |||||
1111 | SmallVector<AllocaInst *, 16> Allocas; | ||||
1112 | for (Instruction &I : EntryBB) { | ||||
1113 | if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) | ||||
1114 | Allocas.push_back(AI); | ||||
1115 | } | ||||
1116 | |||||
1117 | for (AllocaInst *AI : Allocas) { | ||||
1118 | if (handlePromoteAllocaToVector(*AI, MaxVGPRs)) | ||||
1119 | Changed = true; | ||||
1120 | } | ||||
1121 | |||||
1122 | return Changed; | ||||
1123 | } | ||||
1124 | |||||
1125 | bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) { | ||||
1126 | if (skipFunction(F)) | ||||
1127 | return false; | ||||
1128 | if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { | ||||
1129 | return promoteAllocasToVector(F, TPC->getTM<TargetMachine>()); | ||||
1130 | } | ||||
1131 | return false; | ||||
1132 | } | ||||
1133 | |||||
1134 | PreservedAnalyses | ||||
1135 | AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) { | ||||
1136 | bool Changed = promoteAllocasToVector(F, TM); | ||||
| |||||
1137 | if (Changed) { | ||||
1138 | PreservedAnalyses PA; | ||||
1139 | PA.preserveSet<CFGAnalyses>(); | ||||
1140 | return PA; | ||||
1141 | } | ||||
1142 | return PreservedAnalyses::all(); | ||||
1143 | } | ||||
1144 | |||||
1145 | FunctionPass *llvm::createAMDGPUPromoteAlloca() { | ||||
1146 | return new AMDGPUPromoteAlloca(); | ||||
1147 | } | ||||
1148 | |||||
1149 | FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() { | ||||
1150 | return new AMDGPUPromoteAllocaToVector(); | ||||
1151 | } |
1 | //===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the SmallVector class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_ADT_SMALLVECTOR_H |
14 | #define LLVM_ADT_SMALLVECTOR_H |
15 | |
16 | #include "llvm/ADT/iterator_range.h" |
17 | #include "llvm/Support/Compiler.h" |
18 | #include "llvm/Support/ErrorHandling.h" |
19 | #include "llvm/Support/MemAlloc.h" |
20 | #include "llvm/Support/type_traits.h" |
21 | #include <algorithm> |
22 | #include <cassert> |
23 | #include <cstddef> |
24 | #include <cstdlib> |
25 | #include <cstring> |
26 | #include <functional> |
27 | #include <initializer_list> |
28 | #include <iterator> |
29 | #include <limits> |
30 | #include <memory> |
31 | #include <new> |
32 | #include <type_traits> |
33 | #include <utility> |
34 | |
35 | namespace llvm { |
36 | |
37 | /// This is all the stuff common to all SmallVectors. |
38 | /// |
39 | /// The template parameter specifies the type which should be used to hold the |
40 | /// Size and Capacity of the SmallVector, so it can be adjusted. |
41 | /// Using 32 bit size is desirable to shrink the size of the SmallVector. |
42 | /// Using 64 bit size is desirable for cases like SmallVector<char>, where a |
43 | /// 32 bit size would limit the vector to ~4GB. SmallVectors are used for |
44 | /// buffering bitcode output - which can exceed 4GB. |
45 | template <class Size_T> class SmallVectorBase { |
46 | protected: |
47 | void *BeginX; |
48 | Size_T Size = 0, Capacity; |
49 | |
50 | /// The maximum value of the Size_T used. |
51 | static constexpr size_t SizeTypeMax() { |
52 | return std::numeric_limits<Size_T>::max(); |
53 | } |
54 | |
55 | SmallVectorBase() = delete; |
56 | SmallVectorBase(void *FirstEl, size_t TotalCapacity) |
57 | : BeginX(FirstEl), Capacity(TotalCapacity) {} |
58 | |
59 | /// This is a helper for \a grow() that's out of line to reduce code |
60 | /// duplication. This function will report a fatal error if it can't grow at |
61 | /// least to \p MinSize. |
62 | void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity); |
63 | |
64 | /// This is an implementation of the grow() method which only works |
65 | /// on POD-like data types and is out of line to reduce code duplication. |
66 | /// This function will report a fatal error if it cannot increase capacity. |
67 | void grow_pod(void *FirstEl, size_t MinSize, size_t TSize); |
68 | |
69 | public: |
70 | size_t size() const { return Size; } |
71 | size_t capacity() const { return Capacity; } |
72 | |
73 | LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; } |
74 | |
75 | /// Set the array size to \p N, which the current array must have enough |
76 | /// capacity for. |
77 | /// |
78 | /// This does not construct or destroy any elements in the vector. |
79 | /// |
80 | /// Clients can use this in conjunction with capacity() to write past the end |
81 | /// of the buffer when they know that more elements are available, and only |
82 | /// update the size later. This avoids the cost of value initializing elements |
83 | /// which will only be overwritten. |
84 | void set_size(size_t N) { |
85 | assert(N <= capacity())((void)0); |
86 | Size = N; |
87 | } |
88 | }; |
89 | |
90 | template <class T> |
91 | using SmallVectorSizeType = |
92 | typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t, |
93 | uint32_t>::type; |
94 | |
95 | /// Figure out the offset of the first element. |
96 | template <class T, typename = void> struct SmallVectorAlignmentAndSize { |
97 | alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof( |
98 | SmallVectorBase<SmallVectorSizeType<T>>)]; |
99 | alignas(T) char FirstEl[sizeof(T)]; |
100 | }; |
101 | |
102 | /// This is the part of SmallVectorTemplateBase which does not depend on whether |
103 | /// the type T is a POD. The extra dummy template argument is used by ArrayRef |
104 | /// to avoid unnecessarily requiring T to be complete. |
105 | template <typename T, typename = void> |
106 | class SmallVectorTemplateCommon |
107 | : public SmallVectorBase<SmallVectorSizeType<T>> { |
108 | using Base = SmallVectorBase<SmallVectorSizeType<T>>; |
109 | |
110 | /// Find the address of the first element. For this pointer math to be valid |
111 | /// with small-size of 0 for T with lots of alignment, it's important that |
112 | /// SmallVectorStorage is properly-aligned even for small-size of 0. |
113 | void *getFirstEl() const { |
114 | return const_cast<void *>(reinterpret_cast<const void *>( |
115 | reinterpret_cast<const char *>(this) + |
116 | offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl ))); |
117 | } |
118 | // Space after 'FirstEl' is clobbered, do not add any instance vars after it. |
119 | |
120 | protected: |
121 | SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {} |
122 | |
123 | void grow_pod(size_t MinSize, size_t TSize) { |
124 | Base::grow_pod(getFirstEl(), MinSize, TSize); |
125 | } |
126 | |
127 | /// Return true if this is a smallvector which has not had dynamic |
128 | /// memory allocated for it. |
129 | bool isSmall() const { return this->BeginX == getFirstEl(); } |
130 | |
131 | /// Put this vector in a state of being small. |
132 | void resetToSmall() { |
133 | this->BeginX = getFirstEl(); |
134 | this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect. |
135 | } |
136 | |
137 | /// Return true if V is an internal reference to the given range. |
138 | bool isReferenceToRange(const void *V, const void *First, const void *Last) const { |
139 | // Use std::less to avoid UB. |
140 | std::less<> LessThan; |
141 | return !LessThan(V, First) && LessThan(V, Last); |
142 | } |
143 | |
144 | /// Return true if V is an internal reference to this vector. |
145 | bool isReferenceToStorage(const void *V) const { |
146 | return isReferenceToRange(V, this->begin(), this->end()); |
147 | } |
148 | |
149 | /// Return true if First and Last form a valid (possibly empty) range in this |
150 | /// vector's storage. |
151 | bool isRangeInStorage(const void *First, const void *Last) const { |
152 | // Use std::less to avoid UB. |
153 | std::less<> LessThan; |
154 | return !LessThan(First, this->begin()) && !LessThan(Last, First) && |
155 | !LessThan(this->end(), Last); |
156 | } |
157 | |
158 | /// Return true unless Elt will be invalidated by resizing the vector to |
159 | /// NewSize. |
160 | bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { |
161 | // Past the end. |
162 | if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true)) |
163 | return true; |
164 | |
165 | // Return false if Elt will be destroyed by shrinking. |
166 | if (NewSize <= this->size()) |
167 | return Elt < this->begin() + NewSize; |
168 | |
169 | // Return false if we need to grow. |
170 | return NewSize <= this->capacity(); |
171 | } |
172 | |
173 | /// Check whether Elt will be invalidated by resizing the vector to NewSize. |
174 | void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { |
175 | assert(isSafeToReferenceAfterResize(Elt, NewSize) &&((void)0) |
176 | "Attempting to reference an element of the vector in an operation "((void)0) |
177 | "that invalidates it")((void)0); |
178 | } |
179 | |
180 | /// Check whether Elt will be invalidated by increasing the size of the |
181 | /// vector by N. |
182 | void assertSafeToAdd(const void *Elt, size_t N = 1) { |
183 | this->assertSafeToReferenceAfterResize(Elt, this->size() + N); |
184 | } |
185 | |
186 | /// Check whether any part of the range will be invalidated by clearing. |
187 | void assertSafeToReferenceAfterClear(const T *From, const T *To) { |
188 | if (From == To) |
189 | return; |
190 | this->assertSafeToReferenceAfterResize(From, 0); |
191 | this->assertSafeToReferenceAfterResize(To - 1, 0); |
192 | } |
193 | template < |
194 | class ItTy, |
195 | std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value, |
196 | bool> = false> |
197 | void assertSafeToReferenceAfterClear(ItTy, ItTy) {} |
198 | |
199 | /// Check whether any part of the range will be invalidated by growing. |
200 | void assertSafeToAddRange(const T *From, const T *To) { |
201 | if (From == To) |
202 | return; |
203 | this->assertSafeToAdd(From, To - From); |
204 | this->assertSafeToAdd(To - 1, To - From); |
205 | } |
206 | template < |
207 | class ItTy, |
208 | std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value, |
209 | bool> = false> |
210 | void assertSafeToAddRange(ItTy, ItTy) {} |
211 | |
212 | /// Reserve enough space to add one element, and return the updated element |
213 | /// pointer in case it was a reference to the storage. |
214 | template <class U> |
215 | static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt, |
216 | size_t N) { |
217 | size_t NewSize = This->size() + N; |
218 | if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true )) |
219 | return &Elt; |
220 | |
221 | bool ReferencesStorage = false; |
222 | int64_t Index = -1; |
223 | if (!U::TakesParamByValue) { |
224 | if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt )), false)) { |
225 | ReferencesStorage = true; |
226 | Index = &Elt - This->begin(); |
227 | } |
228 | } |
229 | This->grow(NewSize); |
230 | return ReferencesStorage ? This->begin() + Index : &Elt; |
231 | } |
232 | |
233 | public: |
234 | using size_type = size_t; |
235 | using difference_type = ptrdiff_t; |
236 | using value_type = T; |
237 | using iterator = T *; |
238 | using const_iterator = const T *; |
239 | |
240 | using const_reverse_iterator = std::reverse_iterator<const_iterator>; |
241 | using reverse_iterator = std::reverse_iterator<iterator>; |
242 | |
243 | using reference = T &; |
244 | using const_reference = const T &; |
245 | using pointer = T *; |
246 | using const_pointer = const T *; |
247 | |
248 | using Base::capacity; |
249 | using Base::empty; |
250 | using Base::size; |
251 | |
252 | // forward iterator creation methods. |
253 | iterator begin() { return (iterator)this->BeginX; } |
254 | const_iterator begin() const { return (const_iterator)this->BeginX; } |
255 | iterator end() { return begin() + size(); } |
256 | const_iterator end() const { return begin() + size(); } |
257 | |
258 | // reverse iterator creation methods. |
259 | reverse_iterator rbegin() { return reverse_iterator(end()); } |
260 | const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); } |
261 | reverse_iterator rend() { return reverse_iterator(begin()); } |
262 | const_reverse_iterator rend() const { return const_reverse_iterator(begin());} |
263 | |
264 | size_type size_in_bytes() const { return size() * sizeof(T); } |
265 | size_type max_size() const { |
266 | return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T)); |
267 | } |
268 | |
269 | size_t capacity_in_bytes() const { return capacity() * sizeof(T); } |
270 | |
271 | /// Return a pointer to the vector's buffer, even if empty(). |
272 | pointer data() { return pointer(begin()); } |
273 | /// Return a pointer to the vector's buffer, even if empty(). |
274 | const_pointer data() const { return const_pointer(begin()); } |
275 | |
276 | reference operator[](size_type idx) { |
277 | assert(idx < size())((void)0); |
278 | return begin()[idx]; |
279 | } |
280 | const_reference operator[](size_type idx) const { |
281 | assert(idx < size())((void)0); |
282 | return begin()[idx]; |
283 | } |
284 | |
285 | reference front() { |
286 | assert(!empty())((void)0); |
287 | return begin()[0]; |
288 | } |
289 | const_reference front() const { |
290 | assert(!empty())((void)0); |
291 | return begin()[0]; |
292 | } |
293 | |
294 | reference back() { |
295 | assert(!empty())((void)0); |
296 | return end()[-1]; |
297 | } |
298 | const_reference back() const { |
299 | assert(!empty())((void)0); |
300 | return end()[-1]; |
301 | } |
302 | }; |
303 | |
304 | /// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put |
305 | /// method implementations that are designed to work with non-trivial T's. |
306 | /// |
307 | /// We approximate is_trivially_copyable with trivial move/copy construction and |
308 | /// trivial destruction. While the standard doesn't specify that you're allowed |
309 | /// copy these types with memcpy, there is no way for the type to observe this. |
310 | /// This catches the important case of std::pair<POD, POD>, which is not |
311 | /// trivially assignable. |
312 | template <typename T, bool = (is_trivially_copy_constructible<T>::value) && |
313 | (is_trivially_move_constructible<T>::value) && |
314 | std::is_trivially_destructible<T>::value> |
315 | class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> { |
316 | friend class SmallVectorTemplateCommon<T>; |
317 | |
318 | protected: |
319 | static constexpr bool TakesParamByValue = false; |
320 | using ValueParamT = const T &; |
321 | |
322 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
323 | |
324 | static void destroy_range(T *S, T *E) { |
325 | while (S != E) { |
326 | --E; |
327 | E->~T(); |
328 | } |
329 | } |
330 | |
331 | /// Move the range [I, E) into the uninitialized memory starting with "Dest", |
332 | /// constructing elements as needed. |
333 | template<typename It1, typename It2> |
334 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
335 | std::uninitialized_copy(std::make_move_iterator(I), |
336 | std::make_move_iterator(E), Dest); |
337 | } |
338 | |
339 | /// Copy the range [I, E) onto the uninitialized memory starting with "Dest", |
340 | /// constructing elements as needed. |
341 | template<typename It1, typename It2> |
342 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
343 | std::uninitialized_copy(I, E, Dest); |
344 | } |
345 | |
346 | /// Grow the allocated memory (without initializing new elements), doubling |
347 | /// the size of the allocated memory. Guarantees space for at least one more |
348 | /// element, or MinSize more elements if specified. |
349 | void grow(size_t MinSize = 0); |
350 | |
351 | /// Create a new allocation big enough for \p MinSize and pass back its size |
352 | /// in \p NewCapacity. This is the first section of \a grow(). |
353 | T *mallocForGrow(size_t MinSize, size_t &NewCapacity) { |
354 | return static_cast<T *>( |
355 | SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow( |
356 | MinSize, sizeof(T), NewCapacity)); |
357 | } |
358 | |
359 | /// Move existing elements over to the new allocation \p NewElts, the middle |
360 | /// section of \a grow(). |
361 | void moveElementsForGrow(T *NewElts); |
362 | |
363 | /// Transfer ownership of the allocation, finishing up \a grow(). |
364 | void takeAllocationForGrow(T *NewElts, size_t NewCapacity); |
365 | |
366 | /// Reserve enough space to add one element, and return the updated element |
367 | /// pointer in case it was a reference to the storage. |
368 | const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) { |
369 | return this->reserveForParamAndGetAddressImpl(this, Elt, N); |
370 | } |
371 | |
372 | /// Reserve enough space to add one element, and return the updated element |
373 | /// pointer in case it was a reference to the storage. |
374 | T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) { |
375 | return const_cast<T *>( |
376 | this->reserveForParamAndGetAddressImpl(this, Elt, N)); |
377 | } |
378 | |
379 | static T &&forward_value_param(T &&V) { return std::move(V); } |
380 | static const T &forward_value_param(const T &V) { return V; } |
381 | |
382 | void growAndAssign(size_t NumElts, const T &Elt) { |
383 | // Grow manually in case Elt is an internal reference. |
384 | size_t NewCapacity; |
385 | T *NewElts = mallocForGrow(NumElts, NewCapacity); |
386 | std::uninitialized_fill_n(NewElts, NumElts, Elt); |
387 | this->destroy_range(this->begin(), this->end()); |
388 | takeAllocationForGrow(NewElts, NewCapacity); |
389 | this->set_size(NumElts); |
390 | } |
391 | |
392 | template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) { |
393 | // Grow manually in case one of Args is an internal reference. |
394 | size_t NewCapacity; |
395 | T *NewElts = mallocForGrow(0, NewCapacity); |
396 | ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...); |
397 | moveElementsForGrow(NewElts); |
398 | takeAllocationForGrow(NewElts, NewCapacity); |
399 | this->set_size(this->size() + 1); |
400 | return this->back(); |
401 | } |
402 | |
403 | public: |
404 | void push_back(const T &Elt) { |
405 | const T *EltPtr = reserveForParamAndGetAddress(Elt); |
406 | ::new ((void *)this->end()) T(*EltPtr); |
407 | this->set_size(this->size() + 1); |
408 | } |
409 | |
410 | void push_back(T &&Elt) { |
411 | T *EltPtr = reserveForParamAndGetAddress(Elt); |
412 | ::new ((void *)this->end()) T(::std::move(*EltPtr)); |
413 | this->set_size(this->size() + 1); |
414 | } |
415 | |
416 | void pop_back() { |
417 | this->set_size(this->size() - 1); |
418 | this->end()->~T(); |
419 | } |
420 | }; |
421 | |
422 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
423 | template <typename T, bool TriviallyCopyable> |
424 | void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) { |
425 | size_t NewCapacity; |
426 | T *NewElts = mallocForGrow(MinSize, NewCapacity); |
427 | moveElementsForGrow(NewElts); |
428 | takeAllocationForGrow(NewElts, NewCapacity); |
429 | } |
430 | |
431 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
432 | template <typename T, bool TriviallyCopyable> |
433 | void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow( |
434 | T *NewElts) { |
435 | // Move the elements over. |
436 | this->uninitialized_move(this->begin(), this->end(), NewElts); |
437 | |
438 | // Destroy the original elements. |
439 | destroy_range(this->begin(), this->end()); |
440 | } |
441 | |
442 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
443 | template <typename T, bool TriviallyCopyable> |
444 | void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow( |
445 | T *NewElts, size_t NewCapacity) { |
446 | // If this wasn't grown from the inline copy, deallocate the old space. |
447 | if (!this->isSmall()) |
448 | free(this->begin()); |
449 | |
450 | this->BeginX = NewElts; |
451 | this->Capacity = NewCapacity; |
452 | } |
453 | |
454 | /// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put |
455 | /// method implementations that are designed to work with trivially copyable |
456 | /// T's. This allows using memcpy in place of copy/move construction and |
457 | /// skipping destruction. |
458 | template <typename T> |
459 | class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> { |
460 | friend class SmallVectorTemplateCommon<T>; |
461 | |
462 | protected: |
463 | /// True if it's cheap enough to take parameters by value. Doing so avoids |
464 | /// overhead related to mitigations for reference invalidation. |
465 | static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *); |
466 | |
467 | /// Either const T& or T, depending on whether it's cheap enough to take |
468 | /// parameters by value. |
469 | using ValueParamT = |
470 | typename std::conditional<TakesParamByValue, T, const T &>::type; |
471 | |
472 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
473 | |
474 | // No need to do a destroy loop for POD's. |
475 | static void destroy_range(T *, T *) {} |
476 | |
477 | /// Move the range [I, E) onto the uninitialized memory |
478 | /// starting with "Dest", constructing elements into it as needed. |
479 | template<typename It1, typename It2> |
480 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
481 | // Just do a copy. |
482 | uninitialized_copy(I, E, Dest); |
483 | } |
484 | |
485 | /// Copy the range [I, E) onto the uninitialized memory |
486 | /// starting with "Dest", constructing elements into it as needed. |
487 | template<typename It1, typename It2> |
488 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
489 | // Arbitrary iterator types; just use the basic implementation. |
490 | std::uninitialized_copy(I, E, Dest); |
491 | } |
492 | |
493 | /// Copy the range [I, E) onto the uninitialized memory |
494 | /// starting with "Dest", constructing elements into it as needed. |
495 | template <typename T1, typename T2> |
496 | static void uninitialized_copy( |
497 | T1 *I, T1 *E, T2 *Dest, |
498 | std::enable_if_t<std::is_same<typename std::remove_const<T1>::type, |
499 | T2>::value> * = nullptr) { |
500 | // Use memcpy for PODs iterated by pointers (which includes SmallVector |
501 | // iterators): std::uninitialized_copy optimizes to memmove, but we can |
502 | // use memcpy here. Note that I and E are iterators and thus might be |
503 | // invalid for memcpy if they are equal. |
504 | if (I != E) |
505 | memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T)); |
506 | } |
507 | |
508 | /// Double the size of the allocated memory, guaranteeing space for at |
509 | /// least one more element or MinSize if specified. |
510 | void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); } |
511 | |
512 | /// Reserve enough space to add one element, and return the updated element |
513 | /// pointer in case it was a reference to the storage. |
514 | const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) { |
515 | return this->reserveForParamAndGetAddressImpl(this, Elt, N); |
516 | } |
517 | |
518 | /// Reserve enough space to add one element, and return the updated element |
519 | /// pointer in case it was a reference to the storage. |
520 | T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) { |
521 | return const_cast<T *>( |
522 | this->reserveForParamAndGetAddressImpl(this, Elt, N)); |
523 | } |
524 | |
525 | /// Copy \p V or return a reference, depending on \a ValueParamT. |
526 | static ValueParamT forward_value_param(ValueParamT V) { return V; } |
527 | |
528 | void growAndAssign(size_t NumElts, T Elt) { |
529 | // Elt has been copied in case it's an internal reference, side-stepping |
530 | // reference invalidation problems without losing the realloc optimization. |
531 | this->set_size(0); |
532 | this->grow(NumElts); |
533 | std::uninitialized_fill_n(this->begin(), NumElts, Elt); |
534 | this->set_size(NumElts); |
535 | } |
536 | |
537 | template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) { |
538 | // Use push_back with a copy in case Args has an internal reference, |
539 | // side-stepping reference invalidation problems without losing the realloc |
540 | // optimization. |
541 | push_back(T(std::forward<ArgTypes>(Args)...)); |
542 | return this->back(); |
543 | } |
544 | |
545 | public: |
546 | void push_back(ValueParamT Elt) { |
547 | const T *EltPtr = reserveForParamAndGetAddress(Elt); |
548 | memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T)); |
549 | this->set_size(this->size() + 1); |
550 | } |
551 | |
552 | void pop_back() { this->set_size(this->size() - 1); } |
553 | }; |
554 | |
555 | /// This class consists of common code factored out of the SmallVector class to |
556 | /// reduce code duplication based on the SmallVector 'N' template parameter. |
557 | template <typename T> |
558 | class SmallVectorImpl : public SmallVectorTemplateBase<T> { |
559 | using SuperClass = SmallVectorTemplateBase<T>; |
560 | |
561 | public: |
562 | using iterator = typename SuperClass::iterator; |
563 | using const_iterator = typename SuperClass::const_iterator; |
564 | using reference = typename SuperClass::reference; |
565 | using size_type = typename SuperClass::size_type; |
566 | |
567 | protected: |
568 | using SmallVectorTemplateBase<T>::TakesParamByValue; |
569 | using ValueParamT = typename SuperClass::ValueParamT; |
570 | |
571 | // Default ctor - Initialize to empty. |
572 | explicit SmallVectorImpl(unsigned N) |
573 | : SmallVectorTemplateBase<T>(N) {} |
574 | |
575 | public: |
576 | SmallVectorImpl(const SmallVectorImpl &) = delete; |
577 | |
578 | ~SmallVectorImpl() { |
579 | // Subclass has already destructed this vector's elements. |
580 | // If this wasn't grown from the inline copy, deallocate the old space. |
581 | if (!this->isSmall()) |
582 | free(this->begin()); |
583 | } |
584 | |
585 | void clear() { |
586 | this->destroy_range(this->begin(), this->end()); |
587 | this->Size = 0; |
588 | } |
589 | |
590 | private: |
591 | template <bool ForOverwrite> void resizeImpl(size_type N) { |
592 | if (N < this->size()) { |
593 | this->pop_back_n(this->size() - N); |
594 | } else if (N > this->size()) { |
595 | this->reserve(N); |
596 | for (auto I = this->end(), E = this->begin() + N; I != E; ++I) |
597 | if (ForOverwrite) |
598 | new (&*I) T; |
599 | else |
600 | new (&*I) T(); |
601 | this->set_size(N); |
602 | } |
603 | } |
604 | |
605 | public: |
606 | void resize(size_type N) { resizeImpl<false>(N); } |
607 | |
608 | /// Like resize, but \ref T is POD, the new values won't be initialized. |
609 | void resize_for_overwrite(size_type N) { resizeImpl<true>(N); } |
610 | |
611 | void resize(size_type N, ValueParamT NV) { |
612 | if (N == this->size()) |
613 | return; |
614 | |
615 | if (N < this->size()) { |
616 | this->pop_back_n(this->size() - N); |
617 | return; |
618 | } |
619 | |
620 | // N > this->size(). Defer to append. |
621 | this->append(N - this->size(), NV); |
622 | } |
623 | |
624 | void reserve(size_type N) { |
625 | if (this->capacity() < N) |
626 | this->grow(N); |
627 | } |
628 | |
629 | void pop_back_n(size_type NumItems) { |
630 | assert(this->size() >= NumItems)((void)0); |
631 | this->destroy_range(this->end() - NumItems, this->end()); |
632 | this->set_size(this->size() - NumItems); |
633 | } |
634 | |
635 | LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() { |
636 | T Result = ::std::move(this->back()); |
637 | this->pop_back(); |
638 | return Result; |
639 | } |
640 | |
641 | void swap(SmallVectorImpl &RHS); |
642 | |
643 | /// Add the specified range to the end of the SmallVector. |
644 | template <typename in_iter, |
645 | typename = std::enable_if_t<std::is_convertible< |
646 | typename std::iterator_traits<in_iter>::iterator_category, |
647 | std::input_iterator_tag>::value>> |
648 | void append(in_iter in_start, in_iter in_end) { |
649 | this->assertSafeToAddRange(in_start, in_end); |
650 | size_type NumInputs = std::distance(in_start, in_end); |
651 | this->reserve(this->size() + NumInputs); |
652 | this->uninitialized_copy(in_start, in_end, this->end()); |
653 | this->set_size(this->size() + NumInputs); |
654 | } |
655 | |
656 | /// Append \p NumInputs copies of \p Elt to the end. |
657 | void append(size_type NumInputs, ValueParamT Elt) { |
658 | const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs); |
659 | std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr); |
660 | this->set_size(this->size() + NumInputs); |
661 | } |
662 | |
663 | void append(std::initializer_list<T> IL) { |
664 | append(IL.begin(), IL.end()); |
665 | } |
666 | |
667 | void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); } |
668 | |
669 | void assign(size_type NumElts, ValueParamT Elt) { |
670 | // Note that Elt could be an internal reference. |
671 | if (NumElts > this->capacity()) { |
672 | this->growAndAssign(NumElts, Elt); |
673 | return; |
674 | } |
675 | |
676 | // Assign over existing elements. |
677 | std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt); |
678 | if (NumElts > this->size()) |
679 | std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt); |
680 | else if (NumElts < this->size()) |
681 | this->destroy_range(this->begin() + NumElts, this->end()); |
682 | this->set_size(NumElts); |
683 | } |
684 | |
685 | // FIXME: Consider assigning over existing elements, rather than clearing & |
686 | // re-initializing them - for all assign(...) variants. |
687 | |
688 | template <typename in_iter, |
689 | typename = std::enable_if_t<std::is_convertible< |
690 | typename std::iterator_traits<in_iter>::iterator_category, |
691 | std::input_iterator_tag>::value>> |
692 | void assign(in_iter in_start, in_iter in_end) { |
693 | this->assertSafeToReferenceAfterClear(in_start, in_end); |
694 | clear(); |
695 | append(in_start, in_end); |
696 | } |
697 | |
698 | void assign(std::initializer_list<T> IL) { |
699 | clear(); |
700 | append(IL); |
701 | } |
702 | |
703 | void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); } |
704 | |
705 | iterator erase(const_iterator CI) { |
706 | // Just cast away constness because this is a non-const member function. |
707 | iterator I = const_cast<iterator>(CI); |
708 | |
709 | assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")((void)0); |
710 | |
711 | iterator N = I; |
712 | // Shift all elts down one. |
713 | std::move(I+1, this->end(), I); |
714 | // Drop the last elt. |
715 | this->pop_back(); |
716 | return(N); |
717 | } |
718 | |
719 | iterator erase(const_iterator CS, const_iterator CE) { |
720 | // Just cast away constness because this is a non-const member function. |
721 | iterator S = const_cast<iterator>(CS); |
722 | iterator E = const_cast<iterator>(CE); |
723 | |
724 | assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")((void)0); |
725 | |
726 | iterator N = S; |
727 | // Shift all elts down. |
728 | iterator I = std::move(E, this->end(), S); |
729 | // Drop the last elts. |
730 | this->destroy_range(I, this->end()); |
731 | this->set_size(I - this->begin()); |
732 | return(N); |
733 | } |
734 | |
735 | private: |
736 | template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) { |
737 | // Callers ensure that ArgType is derived from T. |
738 | static_assert( |
739 | std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>, |
740 | T>::value, |
741 | "ArgType must be derived from T!"); |
742 | |
743 | if (I == this->end()) { // Important special case for empty vector. |
744 | this->push_back(::std::forward<ArgType>(Elt)); |
745 | return this->end()-1; |
746 | } |
747 | |
748 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")((void)0); |
749 | |
750 | // Grow if necessary. |
751 | size_t Index = I - this->begin(); |
752 | std::remove_reference_t<ArgType> *EltPtr = |
753 | this->reserveForParamAndGetAddress(Elt); |
754 | I = this->begin() + Index; |
755 | |
756 | ::new ((void*) this->end()) T(::std::move(this->back())); |
757 | // Push everything else over. |
758 | std::move_backward(I, this->end()-1, this->end()); |
759 | this->set_size(this->size() + 1); |
760 | |
761 | // If we just moved the element we're inserting, be sure to update |
762 | // the reference (never happens if TakesParamByValue). |
763 | static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value, |
764 | "ArgType must be 'T' when taking by value!"); |
765 | if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end())) |
766 | ++EltPtr; |
767 | |
768 | *I = ::std::forward<ArgType>(*EltPtr); |
769 | return I; |
770 | } |
771 | |
772 | public: |
773 | iterator insert(iterator I, T &&Elt) { |
774 | return insert_one_impl(I, this->forward_value_param(std::move(Elt))); |
775 | } |
776 | |
777 | iterator insert(iterator I, const T &Elt) { |
778 | return insert_one_impl(I, this->forward_value_param(Elt)); |
779 | } |
780 | |
781 | iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) { |
782 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
783 | size_t InsertElt = I - this->begin(); |
784 | |
785 | if (I == this->end()) { // Important special case for empty vector. |
786 | append(NumToInsert, Elt); |
787 | return this->begin()+InsertElt; |
788 | } |
789 | |
790 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")((void)0); |
791 | |
792 | // Ensure there is enough space, and get the (maybe updated) address of |
793 | // Elt. |
794 | const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert); |
795 | |
796 | // Uninvalidate the iterator. |
797 | I = this->begin()+InsertElt; |
798 | |
799 | // If there are more elements between the insertion point and the end of the |
800 | // range than there are being inserted, we can use a simple approach to |
801 | // insertion. Since we already reserved space, we know that this won't |
802 | // reallocate the vector. |
803 | if (size_t(this->end()-I) >= NumToInsert) { |
804 | T *OldEnd = this->end(); |
805 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
806 | std::move_iterator<iterator>(this->end())); |
807 | |
808 | // Copy the existing elements that get replaced. |
809 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
810 | |
811 | // If we just moved the element we're inserting, be sure to update |
812 | // the reference (never happens if TakesParamByValue). |
813 | if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) |
814 | EltPtr += NumToInsert; |
815 | |
816 | std::fill_n(I, NumToInsert, *EltPtr); |
817 | return I; |
818 | } |
819 | |
820 | // Otherwise, we're inserting more elements than exist already, and we're |
821 | // not inserting at the end. |
822 | |
823 | // Move over the elements that we're about to overwrite. |
824 | T *OldEnd = this->end(); |
825 | this->set_size(this->size() + NumToInsert); |
826 | size_t NumOverwritten = OldEnd-I; |
827 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
828 | |
829 | // If we just moved the element we're inserting, be sure to update |
830 | // the reference (never happens if TakesParamByValue). |
831 | if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) |
832 | EltPtr += NumToInsert; |
833 | |
834 | // Replace the overwritten part. |
835 | std::fill_n(I, NumOverwritten, *EltPtr); |
836 | |
837 | // Insert the non-overwritten middle part. |
838 | std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr); |
839 | return I; |
840 | } |
841 | |
842 | template <typename ItTy, |
843 | typename = std::enable_if_t<std::is_convertible< |
844 | typename std::iterator_traits<ItTy>::iterator_category, |
845 | std::input_iterator_tag>::value>> |
846 | iterator insert(iterator I, ItTy From, ItTy To) { |
847 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
848 | size_t InsertElt = I - this->begin(); |
849 | |
850 | if (I == this->end()) { // Important special case for empty vector. |
851 | append(From, To); |
852 | return this->begin()+InsertElt; |
853 | } |
854 | |
855 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")((void)0); |
856 | |
857 | // Check that the reserve that follows doesn't invalidate the iterators. |
858 | this->assertSafeToAddRange(From, To); |
859 | |
860 | size_t NumToInsert = std::distance(From, To); |
861 | |
862 | // Ensure there is enough space. |
863 | reserve(this->size() + NumToInsert); |
864 | |
865 | // Uninvalidate the iterator. |
866 | I = this->begin()+InsertElt; |
867 | |
868 | // If there are more elements between the insertion point and the end of the |
869 | // range than there are being inserted, we can use a simple approach to |
870 | // insertion. Since we already reserved space, we know that this won't |
871 | // reallocate the vector. |
872 | if (size_t(this->end()-I) >= NumToInsert) { |
873 | T *OldEnd = this->end(); |
874 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
875 | std::move_iterator<iterator>(this->end())); |
876 | |
877 | // Copy the existing elements that get replaced. |
878 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
879 | |
880 | std::copy(From, To, I); |
881 | return I; |
882 | } |
883 | |
884 | // Otherwise, we're inserting more elements than exist already, and we're |
885 | // not inserting at the end. |
886 | |
887 | // Move over the elements that we're about to overwrite. |
888 | T *OldEnd = this->end(); |
889 | this->set_size(this->size() + NumToInsert); |
890 | size_t NumOverwritten = OldEnd-I; |
891 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
892 | |
893 | // Replace the overwritten part. |
894 | for (T *J = I; NumOverwritten > 0; --NumOverwritten) { |
895 | *J = *From; |
896 | ++J; ++From; |
897 | } |
898 | |
899 | // Insert the non-overwritten middle part. |
900 | this->uninitialized_copy(From, To, OldEnd); |
901 | return I; |
902 | } |
903 | |
904 | void insert(iterator I, std::initializer_list<T> IL) { |
905 | insert(I, IL.begin(), IL.end()); |
906 | } |
907 | |
908 | template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) { |
909 | if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity ()), false)) |
910 | return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...); |
911 | |
912 | ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...); |
913 | this->set_size(this->size() + 1); |
914 | return this->back(); |
915 | } |
916 | |
917 | SmallVectorImpl &operator=(const SmallVectorImpl &RHS); |
918 | |
919 | SmallVectorImpl &operator=(SmallVectorImpl &&RHS); |
920 | |
921 | bool operator==(const SmallVectorImpl &RHS) const { |
922 | if (this->size() != RHS.size()) return false; |
923 | return std::equal(this->begin(), this->end(), RHS.begin()); |
924 | } |
925 | bool operator!=(const SmallVectorImpl &RHS) const { |
926 | return !(*this == RHS); |
927 | } |
928 | |
929 | bool operator<(const SmallVectorImpl &RHS) const { |
930 | return std::lexicographical_compare(this->begin(), this->end(), |
931 | RHS.begin(), RHS.end()); |
932 | } |
933 | }; |
934 | |
935 | template <typename T> |
936 | void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) { |
937 | if (this == &RHS) return; |
938 | |
939 | // We can only avoid copying elements if neither vector is small. |
940 | if (!this->isSmall() && !RHS.isSmall()) { |
941 | std::swap(this->BeginX, RHS.BeginX); |
942 | std::swap(this->Size, RHS.Size); |
943 | std::swap(this->Capacity, RHS.Capacity); |
944 | return; |
945 | } |
946 | this->reserve(RHS.size()); |
947 | RHS.reserve(this->size()); |
948 | |
949 | // Swap the shared elements. |
950 | size_t NumShared = this->size(); |
951 | if (NumShared > RHS.size()) NumShared = RHS.size(); |
952 | for (size_type i = 0; i != NumShared; ++i) |
953 | std::swap((*this)[i], RHS[i]); |
954 | |
955 | // Copy over the extra elts. |
956 | if (this->size() > RHS.size()) { |
957 | size_t EltDiff = this->size() - RHS.size(); |
958 | this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end()); |
959 | RHS.set_size(RHS.size() + EltDiff); |
960 | this->destroy_range(this->begin()+NumShared, this->end()); |
961 | this->set_size(NumShared); |
962 | } else if (RHS.size() > this->size()) { |
963 | size_t EltDiff = RHS.size() - this->size(); |
964 | this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end()); |
965 | this->set_size(this->size() + EltDiff); |
966 | this->destroy_range(RHS.begin()+NumShared, RHS.end()); |
967 | RHS.set_size(NumShared); |
968 | } |
969 | } |
970 | |
971 | template <typename T> |
972 | SmallVectorImpl<T> &SmallVectorImpl<T>:: |
973 | operator=(const SmallVectorImpl<T> &RHS) { |
974 | // Avoid self-assignment. |
975 | if (this == &RHS) return *this; |
976 | |
977 | // If we already have sufficient space, assign the common elements, then |
978 | // destroy any excess. |
979 | size_t RHSSize = RHS.size(); |
980 | size_t CurSize = this->size(); |
981 | if (CurSize >= RHSSize) { |
982 | // Assign common elements. |
983 | iterator NewEnd; |
984 | if (RHSSize) |
985 | NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin()); |
986 | else |
987 | NewEnd = this->begin(); |
988 | |
989 | // Destroy excess elements. |
990 | this->destroy_range(NewEnd, this->end()); |
991 | |
992 | // Trim. |
993 | this->set_size(RHSSize); |
994 | return *this; |
995 | } |
996 | |
997 | // If we have to grow to have enough elements, destroy the current elements. |
998 | // This allows us to avoid copying them during the grow. |
999 | // FIXME: don't do this if they're efficiently moveable. |
1000 | if (this->capacity() < RHSSize) { |
1001 | // Destroy current elements. |
1002 | this->clear(); |
1003 | CurSize = 0; |
1004 | this->grow(RHSSize); |
1005 | } else if (CurSize) { |
1006 | // Otherwise, use assignment for the already-constructed elements. |
1007 | std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
1008 | } |
1009 | |
1010 | // Copy construct the new elements in place. |
1011 | this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(), |
1012 | this->begin()+CurSize); |
1013 | |
1014 | // Set end. |
1015 | this->set_size(RHSSize); |
1016 | return *this; |
1017 | } |
1018 | |
1019 | template <typename T> |
1020 | SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) { |
1021 | // Avoid self-assignment. |
1022 | if (this == &RHS) return *this; |
1023 | |
1024 | // If the RHS isn't small, clear this vector and then steal its buffer. |
1025 | if (!RHS.isSmall()) { |
1026 | this->destroy_range(this->begin(), this->end()); |
1027 | if (!this->isSmall()) free(this->begin()); |
1028 | this->BeginX = RHS.BeginX; |
1029 | this->Size = RHS.Size; |
1030 | this->Capacity = RHS.Capacity; |
1031 | RHS.resetToSmall(); |
1032 | return *this; |
1033 | } |
1034 | |
1035 | // If we already have sufficient space, assign the common elements, then |
1036 | // destroy any excess. |
1037 | size_t RHSSize = RHS.size(); |
1038 | size_t CurSize = this->size(); |
1039 | if (CurSize >= RHSSize) { |
1040 | // Assign common elements. |
1041 | iterator NewEnd = this->begin(); |
1042 | if (RHSSize) |
1043 | NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd); |
1044 | |
1045 | // Destroy excess elements and trim the bounds. |
1046 | this->destroy_range(NewEnd, this->end()); |
1047 | this->set_size(RHSSize); |
1048 | |
1049 | // Clear the RHS. |
1050 | RHS.clear(); |
1051 | |
1052 | return *this; |
1053 | } |
1054 | |
1055 | // If we have to grow to have enough elements, destroy the current elements. |
1056 | // This allows us to avoid copying them during the grow. |
1057 | // FIXME: this may not actually make any sense if we can efficiently move |
1058 | // elements. |
1059 | if (this->capacity() < RHSSize) { |
1060 | // Destroy current elements. |
1061 | this->clear(); |
1062 | CurSize = 0; |
1063 | this->grow(RHSSize); |
1064 | } else if (CurSize) { |
1065 | // Otherwise, use assignment for the already-constructed elements. |
1066 | std::move(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
1067 | } |
1068 | |
1069 | // Move-construct the new elements in place. |
1070 | this->uninitialized_move(RHS.begin()+CurSize, RHS.end(), |
1071 | this->begin()+CurSize); |
1072 | |
1073 | // Set end. |
1074 | this->set_size(RHSSize); |
1075 | |
1076 | RHS.clear(); |
1077 | return *this; |
1078 | } |
1079 | |
1080 | /// Storage for the SmallVector elements. This is specialized for the N=0 case |
1081 | /// to avoid allocating unnecessary storage. |
1082 | template <typename T, unsigned N> |
1083 | struct SmallVectorStorage { |
1084 | alignas(T) char InlineElts[N * sizeof(T)]; |
1085 | }; |
1086 | |
1087 | /// We need the storage to be properly aligned even for small-size of 0 so that |
1088 | /// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is |
1089 | /// well-defined. |
1090 | template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {}; |
1091 | |
1092 | /// Forward declaration of SmallVector so that |
1093 | /// calculateSmallVectorDefaultInlinedElements can reference |
1094 | /// `sizeof(SmallVector<T, 0>)`. |
1095 | template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector; |
1096 | |
1097 | /// Helper class for calculating the default number of inline elements for |
1098 | /// `SmallVector<T>`. |
1099 | /// |
1100 | /// This should be migrated to a constexpr function when our minimum |
1101 | /// compiler support is enough for multi-statement constexpr functions. |
1102 | template <typename T> struct CalculateSmallVectorDefaultInlinedElements { |
1103 | // Parameter controlling the default number of inlined elements |
1104 | // for `SmallVector<T>`. |
1105 | // |
1106 | // The default number of inlined elements ensures that |
1107 | // 1. There is at least one inlined element. |
1108 | // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless |
1109 | // it contradicts 1. |
1110 | static constexpr size_t kPreferredSmallVectorSizeof = 64; |
1111 | |
1112 | // static_assert that sizeof(T) is not "too big". |
1113 | // |
1114 | // Because our policy guarantees at least one inlined element, it is possible |
1115 | // for an arbitrarily large inlined element to allocate an arbitrarily large |
1116 | // amount of inline storage. We generally consider it an antipattern for a |
1117 | // SmallVector to allocate an excessive amount of inline storage, so we want |
1118 | // to call attention to these cases and make sure that users are making an |
1119 | // intentional decision if they request a lot of inline storage. |
1120 | // |
1121 | // We want this assertion to trigger in pathological cases, but otherwise |
1122 | // not be too easy to hit. To accomplish that, the cutoff is actually somewhat |
1123 | // larger than kPreferredSmallVectorSizeof (otherwise, |
1124 | // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that |
1125 | // pattern seems useful in practice). |
1126 | // |
1127 | // One wrinkle is that this assertion is in theory non-portable, since |
1128 | // sizeof(T) is in general platform-dependent. However, we don't expect this |
1129 | // to be much of an issue, because most LLVM development happens on 64-bit |
1130 | // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for |
1131 | // 32-bit hosts, dodging the issue. The reverse situation, where development |
1132 | // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a |
1133 | // 64-bit host, is expected to be very rare. |
1134 | static_assert( |
1135 | sizeof(T) <= 256, |
1136 | "You are trying to use a default number of inlined elements for " |
1137 | "`SmallVector<T>` but `sizeof(T)` is really big! Please use an " |
1138 | "explicit number of inlined elements with `SmallVector<T, N>` to make " |
1139 | "sure you really want that much inline storage."); |
1140 | |
1141 | // Discount the size of the header itself when calculating the maximum inline |
1142 | // bytes. |
1143 | static constexpr size_t PreferredInlineBytes = |
1144 | kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>); |
1145 | static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T); |
1146 | static constexpr size_t value = |
1147 | NumElementsThatFit == 0 ? 1 : NumElementsThatFit; |
1148 | }; |
1149 | |
1150 | /// This is a 'vector' (really, a variable-sized array), optimized |
1151 | /// for the case when the array is small. It contains some number of elements |
1152 | /// in-place, which allows it to avoid heap allocation when the actual number of |
1153 | /// elements is below that threshold. This allows normal "small" cases to be |
1154 | /// fast without losing generality for large inputs. |
1155 | /// |
1156 | /// \note |
1157 | /// In the absence of a well-motivated choice for the number of inlined |
1158 | /// elements \p N, it is recommended to use \c SmallVector<T> (that is, |
1159 | /// omitting the \p N). This will choose a default number of inlined elements |
1160 | /// reasonable for allocation on the stack (for example, trying to keep \c |
1161 | /// sizeof(SmallVector<T>) around 64 bytes). |
1162 | /// |
1163 | /// \warning This does not attempt to be exception safe. |
1164 | /// |
1165 | /// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h |
1166 | template <typename T, |
1167 | unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value> |
1168 | class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>, |
1169 | SmallVectorStorage<T, N> { |
1170 | public: |
1171 | SmallVector() : SmallVectorImpl<T>(N) {} |
1172 | |
1173 | ~SmallVector() { |
1174 | // Destroy the constructed elements in the vector. |
1175 | this->destroy_range(this->begin(), this->end()); |
1176 | } |
1177 | |
1178 | explicit SmallVector(size_t Size, const T &Value = T()) |
1179 | : SmallVectorImpl<T>(N) { |
1180 | this->assign(Size, Value); |
1181 | } |
1182 | |
1183 | template <typename ItTy, |
1184 | typename = std::enable_if_t<std::is_convertible< |
1185 | typename std::iterator_traits<ItTy>::iterator_category, |
1186 | std::input_iterator_tag>::value>> |
1187 | SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) { |
1188 | this->append(S, E); |
1189 | } |
1190 | |
1191 | template <typename RangeTy> |
1192 | explicit SmallVector(const iterator_range<RangeTy> &R) |
1193 | : SmallVectorImpl<T>(N) { |
1194 | this->append(R.begin(), R.end()); |
1195 | } |
1196 | |
1197 | SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) { |
1198 | this->assign(IL); |
1199 | } |
1200 | |
1201 | SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) { |
1202 | if (!RHS.empty()) |
1203 | SmallVectorImpl<T>::operator=(RHS); |
1204 | } |
1205 | |
1206 | SmallVector &operator=(const SmallVector &RHS) { |
1207 | SmallVectorImpl<T>::operator=(RHS); |
1208 | return *this; |
1209 | } |
1210 | |
1211 | SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) { |
1212 | if (!RHS.empty()) |
1213 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1214 | } |
1215 | |
1216 | SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) { |
1217 | if (!RHS.empty()) |
1218 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1219 | } |
1220 | |
1221 | SmallVector &operator=(SmallVector &&RHS) { |
1222 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1223 | return *this; |
1224 | } |
1225 | |
1226 | SmallVector &operator=(SmallVectorImpl<T> &&RHS) { |
1227 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1228 | return *this; |
1229 | } |
1230 | |
1231 | SmallVector &operator=(std::initializer_list<T> IL) { |
1232 | this->assign(IL); |
1233 | return *this; |
1234 | } |
1235 | }; |
1236 | |
1237 | template <typename T, unsigned N> |
1238 | inline size_t capacity_in_bytes(const SmallVector<T, N> &X) { |
1239 | return X.capacity_in_bytes(); |
1240 | } |
1241 | |
1242 | /// Given a range of type R, iterate the entire range and return a |
1243 | /// SmallVector with elements of the vector. This is useful, for example, |
1244 | /// when you want to iterate a range and then sort the results. |
1245 | template <unsigned Size, typename R> |
1246 | SmallVector<typename std::remove_const<typename std::remove_reference< |
1247 | decltype(*std::begin(std::declval<R &>()))>::type>::type, |
1248 | Size> |
1249 | to_vector(R &&Range) { |
1250 | return {std::begin(Range), std::end(Range)}; |
1251 | } |
1252 | |
1253 | } // end namespace llvm |
1254 | |
1255 | namespace std { |
1256 | |
1257 | /// Implement std::swap in terms of SmallVector swap. |
1258 | template<typename T> |
1259 | inline void |
1260 | swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) { |
1261 | LHS.swap(RHS); |
1262 | } |
1263 | |
1264 | /// Implement std::swap in terms of SmallVector swap. |
1265 | template<typename T, unsigned N> |
1266 | inline void |
1267 | swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) { |
1268 | LHS.swap(RHS); |
1269 | } |
1270 | |
1271 | } // end namespace std |
1272 | |
1273 | #endif // LLVM_ADT_SMALLVECTOR_H |