File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR/IRBuilder.h |
Warning: | line 1648, column 28 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | /// \file | |||
9 | /// | |||
10 | /// This file implements the OpenMPIRBuilder class, which is used as a | |||
11 | /// convenient way to create LLVM instructions for OpenMP directives. | |||
12 | /// | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" | |||
16 | ||||
17 | #include "llvm/ADT/StringRef.h" | |||
18 | #include "llvm/ADT/Triple.h" | |||
19 | #include "llvm/IR/CFG.h" | |||
20 | #include "llvm/IR/DebugInfo.h" | |||
21 | #include "llvm/IR/IRBuilder.h" | |||
22 | #include "llvm/IR/MDBuilder.h" | |||
23 | #include "llvm/IR/Value.h" | |||
24 | #include "llvm/Support/CommandLine.h" | |||
25 | #include "llvm/Support/Error.h" | |||
26 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
27 | #include "llvm/Transforms/Utils/CodeExtractor.h" | |||
28 | ||||
29 | #include <sstream> | |||
30 | ||||
31 | #define DEBUG_TYPE"openmp-ir-builder" "openmp-ir-builder" | |||
32 | ||||
33 | using namespace llvm; | |||
34 | using namespace omp; | |||
35 | ||||
36 | static cl::opt<bool> | |||
37 | OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden, | |||
38 | cl::desc("Use optimistic attributes describing " | |||
39 | "'as-if' properties of runtime calls."), | |||
40 | cl::init(false)); | |||
41 | ||||
42 | void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) { | |||
43 | LLVMContext &Ctx = Fn.getContext(); | |||
44 | ||||
45 | // Get the function's current attributes. | |||
46 | auto Attrs = Fn.getAttributes(); | |||
47 | auto FnAttrs = Attrs.getFnAttributes(); | |||
48 | auto RetAttrs = Attrs.getRetAttributes(); | |||
49 | SmallVector<AttributeSet, 4> ArgAttrs; | |||
50 | for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo) | |||
51 | ArgAttrs.emplace_back(Attrs.getParamAttributes(ArgNo)); | |||
52 | ||||
53 | #define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet; | |||
54 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
55 | ||||
56 | // Add attributes to the function declaration. | |||
57 | switch (FnID) { | |||
58 | #define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \ | |||
59 | case Enum: \ | |||
60 | FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \ | |||
61 | RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \ | |||
62 | for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \ | |||
63 | ArgAttrs[ArgNo] = \ | |||
64 | ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \ | |||
65 | Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \ | |||
66 | break; | |||
67 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
68 | default: | |||
69 | // Attributes are optional. | |||
70 | break; | |||
71 | } | |||
72 | } | |||
73 | ||||
74 | FunctionCallee | |||
75 | OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) { | |||
76 | FunctionType *FnTy = nullptr; | |||
77 | Function *Fn = nullptr; | |||
78 | ||||
79 | // Try to find the declation in the module first. | |||
80 | switch (FnID) { | |||
81 | #define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \ | |||
82 | case Enum: \ | |||
83 | FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \ | |||
84 | IsVarArg); \ | |||
85 | Fn = M.getFunction(Str); \ | |||
86 | break; | |||
87 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
88 | } | |||
89 | ||||
90 | if (!Fn) { | |||
91 | // Create a new declaration if we need one. | |||
92 | switch (FnID) { | |||
93 | #define OMP_RTL(Enum, Str, ...) \ | |||
94 | case Enum: \ | |||
95 | Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \ | |||
96 | break; | |||
97 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
98 | } | |||
99 | ||||
100 | // Add information if the runtime function takes a callback function | |||
101 | if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) { | |||
102 | if (!Fn->hasMetadata(LLVMContext::MD_callback)) { | |||
103 | LLVMContext &Ctx = Fn->getContext(); | |||
104 | MDBuilder MDB(Ctx); | |||
105 | // Annotate the callback behavior of the runtime function: | |||
106 | // - The callback callee is argument number 2 (microtask). | |||
107 | // - The first two arguments of the callback callee are unknown (-1). | |||
108 | // - All variadic arguments to the runtime function are passed to the | |||
109 | // callback callee. | |||
110 | Fn->addMetadata( | |||
111 | LLVMContext::MD_callback, | |||
112 | *MDNode::get(Ctx, {MDB.createCallbackEncoding( | |||
113 | 2, {-1, -1}, /* VarArgsArePassed */ true)})); | |||
114 | } | |||
115 | } | |||
116 | ||||
117 | LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()do { } while (false) | |||
118 | << " with type " << *Fn->getFunctionType() << "\n")do { } while (false); | |||
119 | addAttributes(FnID, *Fn); | |||
120 | ||||
121 | } else { | |||
122 | LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()do { } while (false) | |||
123 | << " with type " << *Fn->getFunctionType() << "\n")do { } while (false); | |||
124 | } | |||
125 | ||||
126 | assert(Fn && "Failed to create OpenMP runtime function")((void)0); | |||
127 | ||||
128 | // Cast the function to the expected type if necessary | |||
129 | Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo()); | |||
130 | return {FnTy, C}; | |||
131 | } | |||
132 | ||||
133 | Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) { | |||
134 | FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID); | |||
135 | auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee()); | |||
136 | assert(Fn && "Failed to create OpenMP runtime function pointer")((void)0); | |||
137 | return Fn; | |||
138 | } | |||
139 | ||||
140 | void OpenMPIRBuilder::initialize() { initializeTypes(M); } | |||
141 | ||||
142 | void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) { | |||
143 | SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; | |||
144 | SmallVector<BasicBlock *, 32> Blocks; | |||
145 | SmallVector<OutlineInfo, 16> DeferredOutlines; | |||
146 | for (OutlineInfo &OI : OutlineInfos) { | |||
147 | // Skip functions that have not finalized yet; may happen with nested | |||
148 | // function generation. | |||
149 | if (Fn && OI.getFunction() != Fn) { | |||
150 | DeferredOutlines.push_back(OI); | |||
151 | continue; | |||
152 | } | |||
153 | ||||
154 | ParallelRegionBlockSet.clear(); | |||
155 | Blocks.clear(); | |||
156 | OI.collectBlocks(ParallelRegionBlockSet, Blocks); | |||
157 | ||||
158 | Function *OuterFn = OI.getFunction(); | |||
159 | CodeExtractorAnalysisCache CEAC(*OuterFn); | |||
160 | CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, | |||
161 | /* AggregateArgs */ false, | |||
162 | /* BlockFrequencyInfo */ nullptr, | |||
163 | /* BranchProbabilityInfo */ nullptr, | |||
164 | /* AssumptionCache */ nullptr, | |||
165 | /* AllowVarArgs */ true, | |||
166 | /* AllowAlloca */ true, | |||
167 | /* Suffix */ ".omp_par"); | |||
168 | ||||
169 | LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n")do { } while (false); | |||
170 | LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()do { } while (false) | |||
171 | << " Exit: " << OI.ExitBB->getName() << "\n")do { } while (false); | |||
172 | assert(Extractor.isEligible() &&((void)0) | |||
173 | "Expected OpenMP outlining to be possible!")((void)0); | |||
174 | ||||
175 | Function *OutlinedFn = Extractor.extractCodeRegion(CEAC); | |||
176 | ||||
177 | LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n")do { } while (false); | |||
178 | LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n")do { } while (false); | |||
179 | assert(OutlinedFn->getReturnType()->isVoidTy() &&((void)0) | |||
180 | "OpenMP outlined functions should not return a value!")((void)0); | |||
181 | ||||
182 | // For compability with the clang CG we move the outlined function after the | |||
183 | // one with the parallel region. | |||
184 | OutlinedFn->removeFromParent(); | |||
185 | M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn); | |||
186 | ||||
187 | // Remove the artificial entry introduced by the extractor right away, we | |||
188 | // made our own entry block after all. | |||
189 | { | |||
190 | BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock(); | |||
191 | assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB)((void)0); | |||
192 | assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry)((void)0); | |||
193 | if (AllowExtractorSinking) { | |||
194 | // Move instructions from the to-be-deleted ArtificialEntry to the entry | |||
195 | // basic block of the parallel region. CodeExtractor may have sunk | |||
196 | // allocas/bitcasts for values that are solely used in the outlined | |||
197 | // region and do not escape. | |||
198 | assert(!ArtificialEntry.empty() &&((void)0) | |||
199 | "Expected instructions to sink in the outlined region")((void)0); | |||
200 | for (BasicBlock::iterator It = ArtificialEntry.begin(), | |||
201 | End = ArtificialEntry.end(); | |||
202 | It != End;) { | |||
203 | Instruction &I = *It; | |||
204 | It++; | |||
205 | ||||
206 | if (I.isTerminator()) | |||
207 | continue; | |||
208 | ||||
209 | I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt()); | |||
210 | } | |||
211 | } | |||
212 | OI.EntryBB->moveBefore(&ArtificialEntry); | |||
213 | ArtificialEntry.eraseFromParent(); | |||
214 | } | |||
215 | assert(&OutlinedFn->getEntryBlock() == OI.EntryBB)((void)0); | |||
216 | assert(OutlinedFn && OutlinedFn->getNumUses() == 1)((void)0); | |||
217 | ||||
218 | // Run a user callback, e.g. to add attributes. | |||
219 | if (OI.PostOutlineCB) | |||
220 | OI.PostOutlineCB(*OutlinedFn); | |||
221 | } | |||
222 | ||||
223 | // Remove work items that have been completed. | |||
224 | OutlineInfos = std::move(DeferredOutlines); | |||
225 | } | |||
226 | ||||
227 | OpenMPIRBuilder::~OpenMPIRBuilder() { | |||
228 | assert(OutlineInfos.empty() && "There must be no outstanding outlinings")((void)0); | |||
229 | } | |||
230 | ||||
231 | Value *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr, | |||
232 | IdentFlag LocFlags, | |||
233 | unsigned Reserve2Flags) { | |||
234 | // Enable "C-mode". | |||
235 | LocFlags |= OMP_IDENT_FLAG_KMPC; | |||
236 | ||||
237 | Value *&Ident = | |||
238 | IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}]; | |||
239 | if (!Ident) { | |||
240 | Constant *I32Null = ConstantInt::getNullValue(Int32); | |||
241 | Constant *IdentData[] = { | |||
242 | I32Null, ConstantInt::get(Int32, uint32_t(LocFlags)), | |||
243 | ConstantInt::get(Int32, Reserve2Flags), I32Null, SrcLocStr}; | |||
244 | Constant *Initializer = ConstantStruct::get( | |||
245 | cast<StructType>(IdentPtr->getPointerElementType()), IdentData); | |||
246 | ||||
247 | // Look for existing encoding of the location + flags, not needed but | |||
248 | // minimizes the difference to the existing solution while we transition. | |||
249 | for (GlobalVariable &GV : M.getGlobalList()) | |||
250 | if (GV.getType() == IdentPtr && GV.hasInitializer()) | |||
251 | if (GV.getInitializer() == Initializer) | |||
252 | return Ident = &GV; | |||
253 | ||||
254 | auto *GV = new GlobalVariable(M, IdentPtr->getPointerElementType(), | |||
255 | /* isConstant = */ true, | |||
256 | GlobalValue::PrivateLinkage, Initializer); | |||
257 | GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); | |||
258 | GV->setAlignment(Align(8)); | |||
259 | Ident = GV; | |||
260 | } | |||
261 | return Builder.CreatePointerCast(Ident, IdentPtr); | |||
262 | } | |||
263 | ||||
264 | Type *OpenMPIRBuilder::getLanemaskType() { | |||
265 | LLVMContext &Ctx = M.getContext(); | |||
266 | Triple triple(M.getTargetTriple()); | |||
267 | ||||
268 | // This test is adequate until deviceRTL has finer grained lane widths | |||
269 | return triple.isAMDGCN() ? Type::getInt64Ty(Ctx) : Type::getInt32Ty(Ctx); | |||
270 | } | |||
271 | ||||
272 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr) { | |||
273 | Constant *&SrcLocStr = SrcLocStrMap[LocStr]; | |||
274 | if (!SrcLocStr) { | |||
275 | Constant *Initializer = | |||
276 | ConstantDataArray::getString(M.getContext(), LocStr); | |||
277 | ||||
278 | // Look for existing encoding of the location, not needed but minimizes the | |||
279 | // difference to the existing solution while we transition. | |||
280 | for (GlobalVariable &GV : M.getGlobalList()) | |||
281 | if (GV.isConstant() && GV.hasInitializer() && | |||
282 | GV.getInitializer() == Initializer) | |||
283 | return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr); | |||
284 | ||||
285 | SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "", | |||
286 | /* AddressSpace */ 0, &M); | |||
287 | } | |||
288 | return SrcLocStr; | |||
289 | } | |||
290 | ||||
291 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName, | |||
292 | StringRef FileName, | |||
293 | unsigned Line, | |||
294 | unsigned Column) { | |||
295 | SmallString<128> Buffer; | |||
296 | Buffer.push_back(';'); | |||
297 | Buffer.append(FileName); | |||
298 | Buffer.push_back(';'); | |||
299 | Buffer.append(FunctionName); | |||
300 | Buffer.push_back(';'); | |||
301 | Buffer.append(std::to_string(Line)); | |||
302 | Buffer.push_back(';'); | |||
303 | Buffer.append(std::to_string(Column)); | |||
304 | Buffer.push_back(';'); | |||
305 | Buffer.push_back(';'); | |||
306 | return getOrCreateSrcLocStr(Buffer.str()); | |||
307 | } | |||
308 | ||||
309 | Constant *OpenMPIRBuilder::getOrCreateDefaultSrcLocStr() { | |||
310 | return getOrCreateSrcLocStr(";unknown;unknown;0;0;;"); | |||
311 | } | |||
312 | ||||
313 | Constant * | |||
314 | OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc) { | |||
315 | DILocation *DIL = Loc.DL.get(); | |||
316 | if (!DIL) | |||
317 | return getOrCreateDefaultSrcLocStr(); | |||
318 | StringRef FileName = M.getName(); | |||
319 | if (DIFile *DIF = DIL->getFile()) | |||
320 | if (Optional<StringRef> Source = DIF->getSource()) | |||
321 | FileName = *Source; | |||
322 | StringRef Function = DIL->getScope()->getSubprogram()->getName(); | |||
323 | Function = | |||
324 | !Function.empty() ? Function : Loc.IP.getBlock()->getParent()->getName(); | |||
325 | return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(), | |||
326 | DIL->getColumn()); | |||
327 | } | |||
328 | ||||
329 | Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) { | |||
330 | return Builder.CreateCall( | |||
331 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident, | |||
332 | "omp_global_thread_num"); | |||
333 | } | |||
334 | ||||
335 | OpenMPIRBuilder::InsertPointTy | |||
336 | OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK, | |||
337 | bool ForceSimpleCall, bool CheckCancelFlag) { | |||
338 | if (!updateToLocation(Loc)) | |||
339 | return Loc.IP; | |||
340 | return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag); | |||
341 | } | |||
342 | ||||
343 | OpenMPIRBuilder::InsertPointTy | |||
344 | OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind, | |||
345 | bool ForceSimpleCall, bool CheckCancelFlag) { | |||
346 | // Build call __kmpc_cancel_barrier(loc, thread_id) or | |||
347 | // __kmpc_barrier(loc, thread_id); | |||
348 | ||||
349 | IdentFlag BarrierLocFlags; | |||
350 | switch (Kind) { | |||
351 | case OMPD_for: | |||
352 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR; | |||
353 | break; | |||
354 | case OMPD_sections: | |||
355 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS; | |||
356 | break; | |||
357 | case OMPD_single: | |||
358 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE; | |||
359 | break; | |||
360 | case OMPD_barrier: | |||
361 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL; | |||
362 | break; | |||
363 | default: | |||
364 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL; | |||
365 | break; | |||
366 | } | |||
367 | ||||
368 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
369 | Value *Args[] = {getOrCreateIdent(SrcLocStr, BarrierLocFlags), | |||
370 | getOrCreateThreadID(getOrCreateIdent(SrcLocStr))}; | |||
371 | ||||
372 | // If we are in a cancellable parallel region, barriers are cancellation | |||
373 | // points. | |||
374 | // TODO: Check why we would force simple calls or to ignore the cancel flag. | |||
375 | bool UseCancelBarrier = | |||
376 | !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel); | |||
377 | ||||
378 | Value *Result = | |||
379 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr( | |||
380 | UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier | |||
381 | : OMPRTL___kmpc_barrier), | |||
382 | Args); | |||
383 | ||||
384 | if (UseCancelBarrier && CheckCancelFlag) | |||
385 | emitCancelationCheckImpl(Result, OMPD_parallel); | |||
386 | ||||
387 | return Builder.saveIP(); | |||
388 | } | |||
389 | ||||
390 | OpenMPIRBuilder::InsertPointTy | |||
391 | OpenMPIRBuilder::createCancel(const LocationDescription &Loc, | |||
392 | Value *IfCondition, | |||
393 | omp::Directive CanceledDirective) { | |||
394 | if (!updateToLocation(Loc)) | |||
395 | return Loc.IP; | |||
396 | ||||
397 | // LLVM utilities like blocks with terminators. | |||
398 | auto *UI = Builder.CreateUnreachable(); | |||
399 | ||||
400 | Instruction *ThenTI = UI, *ElseTI = nullptr; | |||
401 | if (IfCondition) | |||
402 | SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); | |||
403 | Builder.SetInsertPoint(ThenTI); | |||
404 | ||||
405 | Value *CancelKind = nullptr; | |||
406 | switch (CanceledDirective) { | |||
407 | #define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \ | |||
408 | case DirectiveEnum: \ | |||
409 | CancelKind = Builder.getInt32(Value); \ | |||
410 | break; | |||
411 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
412 | default: | |||
413 | llvm_unreachable("Unknown cancel kind!")__builtin_unreachable(); | |||
414 | } | |||
415 | ||||
416 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
417 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
418 | Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind}; | |||
419 | Value *Result = Builder.CreateCall( | |||
420 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args); | |||
421 | auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) { | |||
422 | if (CanceledDirective == OMPD_parallel) { | |||
423 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
424 | Builder.restoreIP(IP); | |||
425 | createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), | |||
426 | omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false, | |||
427 | /* CheckCancelFlag */ false); | |||
428 | } | |||
429 | }; | |||
430 | ||||
431 | // The actual cancel logic is shared with others, e.g., cancel_barriers. | |||
432 | emitCancelationCheckImpl(Result, CanceledDirective, ExitCB); | |||
433 | ||||
434 | // Update the insertion point and remove the terminator we introduced. | |||
435 | Builder.SetInsertPoint(UI->getParent()); | |||
436 | UI->eraseFromParent(); | |||
437 | ||||
438 | return Builder.saveIP(); | |||
439 | } | |||
440 | ||||
441 | void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag, | |||
442 | omp::Directive CanceledDirective, | |||
443 | FinalizeCallbackTy ExitCB) { | |||
444 | assert(isLastFinalizationInfoCancellable(CanceledDirective) &&((void)0) | |||
445 | "Unexpected cancellation!")((void)0); | |||
446 | ||||
447 | // For a cancel barrier we create two new blocks. | |||
448 | BasicBlock *BB = Builder.GetInsertBlock(); | |||
449 | BasicBlock *NonCancellationBlock; | |||
450 | if (Builder.GetInsertPoint() == BB->end()) { | |||
451 | // TODO: This branch will not be needed once we moved to the | |||
452 | // OpenMPIRBuilder codegen completely. | |||
453 | NonCancellationBlock = BasicBlock::Create( | |||
454 | BB->getContext(), BB->getName() + ".cont", BB->getParent()); | |||
455 | } else { | |||
456 | NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint()); | |||
457 | BB->getTerminator()->eraseFromParent(); | |||
458 | Builder.SetInsertPoint(BB); | |||
459 | } | |||
460 | BasicBlock *CancellationBlock = BasicBlock::Create( | |||
461 | BB->getContext(), BB->getName() + ".cncl", BB->getParent()); | |||
462 | ||||
463 | // Jump to them based on the return value. | |||
464 | Value *Cmp = Builder.CreateIsNull(CancelFlag); | |||
465 | Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock, | |||
466 | /* TODO weight */ nullptr, nullptr); | |||
467 | ||||
468 | // From the cancellation block we finalize all variables and go to the | |||
469 | // post finalization block that is known to the FiniCB callback. | |||
470 | Builder.SetInsertPoint(CancellationBlock); | |||
471 | if (ExitCB) | |||
472 | ExitCB(Builder.saveIP()); | |||
473 | auto &FI = FinalizationStack.back(); | |||
474 | FI.FiniCB(Builder.saveIP()); | |||
475 | ||||
476 | // The continuation block is where code generation continues. | |||
477 | Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin()); | |||
478 | } | |||
479 | ||||
480 | IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel( | |||
481 | const LocationDescription &Loc, InsertPointTy OuterAllocaIP, | |||
482 | BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, | |||
483 | FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, | |||
484 | omp::ProcBindKind ProcBind, bool IsCancellable) { | |||
485 | if (!updateToLocation(Loc)) | |||
486 | return Loc.IP; | |||
487 | ||||
488 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
489 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
490 | Value *ThreadID = getOrCreateThreadID(Ident); | |||
491 | ||||
492 | if (NumThreads) { | |||
493 | // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads) | |||
494 | Value *Args[] = { | |||
495 | Ident, ThreadID, | |||
496 | Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)}; | |||
497 | Builder.CreateCall( | |||
498 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args); | |||
499 | } | |||
500 | ||||
501 | if (ProcBind != OMP_PROC_BIND_default) { | |||
502 | // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind) | |||
503 | Value *Args[] = { | |||
504 | Ident, ThreadID, | |||
505 | ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)}; | |||
506 | Builder.CreateCall( | |||
507 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args); | |||
508 | } | |||
509 | ||||
510 | BasicBlock *InsertBB = Builder.GetInsertBlock(); | |||
511 | Function *OuterFn = InsertBB->getParent(); | |||
512 | ||||
513 | // Save the outer alloca block because the insertion iterator may get | |||
514 | // invalidated and we still need this later. | |||
515 | BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock(); | |||
516 | ||||
517 | // Vector to remember instructions we used only during the modeling but which | |||
518 | // we want to delete at the end. | |||
519 | SmallVector<Instruction *, 4> ToBeDeleted; | |||
520 | ||||
521 | // Change the location to the outer alloca insertion point to create and | |||
522 | // initialize the allocas we pass into the parallel region. | |||
523 | Builder.restoreIP(OuterAllocaIP); | |||
524 | AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr"); | |||
525 | AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr"); | |||
526 | ||||
527 | // If there is an if condition we actually use the TIDAddr and ZeroAddr in the | |||
528 | // program, otherwise we only need them for modeling purposes to get the | |||
529 | // associated arguments in the outlined function. In the former case, | |||
530 | // initialize the allocas properly, in the latter case, delete them later. | |||
531 | if (IfCondition) { | |||
532 | Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr); | |||
533 | Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr); | |||
534 | } else { | |||
535 | ToBeDeleted.push_back(TIDAddr); | |||
536 | ToBeDeleted.push_back(ZeroAddr); | |||
537 | } | |||
538 | ||||
539 | // Create an artificial insertion point that will also ensure the blocks we | |||
540 | // are about to split are not degenerated. | |||
541 | auto *UI = new UnreachableInst(Builder.getContext(), InsertBB); | |||
542 | ||||
543 | Instruction *ThenTI = UI, *ElseTI = nullptr; | |||
544 | if (IfCondition) | |||
545 | SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); | |||
546 | ||||
547 | BasicBlock *ThenBB = ThenTI->getParent(); | |||
548 | BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry"); | |||
549 | BasicBlock *PRegBodyBB = | |||
550 | PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region"); | |||
551 | BasicBlock *PRegPreFiniBB = | |||
552 | PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize"); | |||
553 | BasicBlock *PRegExitBB = | |||
554 | PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit"); | |||
555 | ||||
556 | auto FiniCBWrapper = [&](InsertPointTy IP) { | |||
557 | // Hide "open-ended" blocks from the given FiniCB by setting the right jump | |||
558 | // target to the region exit block. | |||
559 | if (IP.getBlock()->end() == IP.getPoint()) { | |||
560 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
561 | Builder.restoreIP(IP); | |||
562 | Instruction *I = Builder.CreateBr(PRegExitBB); | |||
563 | IP = InsertPointTy(I->getParent(), I->getIterator()); | |||
564 | } | |||
565 | assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&((void)0) | |||
566 | IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&((void)0) | |||
567 | "Unexpected insertion point for finalization call!")((void)0); | |||
568 | return FiniCB(IP); | |||
569 | }; | |||
570 | ||||
571 | FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable}); | |||
572 | ||||
573 | // Generate the privatization allocas in the block that will become the entry | |||
574 | // of the outlined function. | |||
575 | Builder.SetInsertPoint(PRegEntryBB->getTerminator()); | |||
576 | InsertPointTy InnerAllocaIP = Builder.saveIP(); | |||
577 | ||||
578 | AllocaInst *PrivTIDAddr = | |||
579 | Builder.CreateAlloca(Int32, nullptr, "tid.addr.local"); | |||
580 | Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid"); | |||
581 | ||||
582 | // Add some fake uses for OpenMP provided arguments. | |||
583 | ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use")); | |||
584 | Instruction *ZeroAddrUse = Builder.CreateLoad(Int32, ZeroAddr, | |||
585 | "zero.addr.use"); | |||
586 | ToBeDeleted.push_back(ZeroAddrUse); | |||
587 | ||||
588 | // ThenBB | |||
589 | // | | |||
590 | // V | |||
591 | // PRegionEntryBB <- Privatization allocas are placed here. | |||
592 | // | | |||
593 | // V | |||
594 | // PRegionBodyBB <- BodeGen is invoked here. | |||
595 | // | | |||
596 | // V | |||
597 | // PRegPreFiniBB <- The block we will start finalization from. | |||
598 | // | | |||
599 | // V | |||
600 | // PRegionExitBB <- A common exit to simplify block collection. | |||
601 | // | |||
602 | ||||
603 | LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n")do { } while (false); | |||
604 | ||||
605 | // Let the caller create the body. | |||
606 | assert(BodyGenCB && "Expected body generation callback!")((void)0); | |||
607 | InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin()); | |||
608 | BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB); | |||
609 | ||||
610 | LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n")do { } while (false); | |||
611 | ||||
612 | FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call); | |||
613 | if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) { | |||
614 | if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) { | |||
615 | llvm::LLVMContext &Ctx = F->getContext(); | |||
616 | MDBuilder MDB(Ctx); | |||
617 | // Annotate the callback behavior of the __kmpc_fork_call: | |||
618 | // - The callback callee is argument number 2 (microtask). | |||
619 | // - The first two arguments of the callback callee are unknown (-1). | |||
620 | // - All variadic arguments to the __kmpc_fork_call are passed to the | |||
621 | // callback callee. | |||
622 | F->addMetadata( | |||
623 | llvm::LLVMContext::MD_callback, | |||
624 | *llvm::MDNode::get( | |||
625 | Ctx, {MDB.createCallbackEncoding(2, {-1, -1}, | |||
626 | /* VarArgsArePassed */ true)})); | |||
627 | } | |||
628 | } | |||
629 | ||||
630 | OutlineInfo OI; | |||
631 | OI.PostOutlineCB = [=](Function &OutlinedFn) { | |||
632 | // Add some known attributes. | |||
633 | OutlinedFn.addParamAttr(0, Attribute::NoAlias); | |||
634 | OutlinedFn.addParamAttr(1, Attribute::NoAlias); | |||
635 | OutlinedFn.addFnAttr(Attribute::NoUnwind); | |||
636 | OutlinedFn.addFnAttr(Attribute::NoRecurse); | |||
637 | ||||
638 | assert(OutlinedFn.arg_size() >= 2 &&((void)0) | |||
639 | "Expected at least tid and bounded tid as arguments")((void)0); | |||
640 | unsigned NumCapturedVars = | |||
641 | OutlinedFn.arg_size() - /* tid & bounded tid */ 2; | |||
642 | ||||
643 | CallInst *CI = cast<CallInst>(OutlinedFn.user_back()); | |||
644 | CI->getParent()->setName("omp_parallel"); | |||
645 | Builder.SetInsertPoint(CI); | |||
646 | ||||
647 | // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn); | |||
648 | Value *ForkCallArgs[] = { | |||
649 | Ident, Builder.getInt32(NumCapturedVars), | |||
650 | Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)}; | |||
651 | ||||
652 | SmallVector<Value *, 16> RealArgs; | |||
653 | RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs)); | |||
654 | RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end()); | |||
655 | ||||
656 | Builder.CreateCall(RTLFn, RealArgs); | |||
657 | ||||
658 | LLVM_DEBUG(dbgs() << "With fork_call placed: "do { } while (false) | |||
659 | << *Builder.GetInsertBlock()->getParent() << "\n")do { } while (false); | |||
660 | ||||
661 | InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end()); | |||
662 | ||||
663 | // Initialize the local TID stack location with the argument value. | |||
664 | Builder.SetInsertPoint(PrivTID); | |||
665 | Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin(); | |||
666 | Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr); | |||
667 | ||||
668 | // If no "if" clause was present we do not need the call created during | |||
669 | // outlining, otherwise we reuse it in the serialized parallel region. | |||
670 | if (!ElseTI) { | |||
671 | CI->eraseFromParent(); | |||
672 | } else { | |||
673 | ||||
674 | // If an "if" clause was present we are now generating the serialized | |||
675 | // version into the "else" branch. | |||
676 | Builder.SetInsertPoint(ElseTI); | |||
677 | ||||
678 | // Build calls __kmpc_serialized_parallel(&Ident, GTid); | |||
679 | Value *SerializedParallelCallArgs[] = {Ident, ThreadID}; | |||
680 | Builder.CreateCall( | |||
681 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel), | |||
682 | SerializedParallelCallArgs); | |||
683 | ||||
684 | // OutlinedFn(>id, &zero, CapturedStruct); | |||
685 | CI->removeFromParent(); | |||
686 | Builder.Insert(CI); | |||
687 | ||||
688 | // __kmpc_end_serialized_parallel(&Ident, GTid); | |||
689 | Value *EndArgs[] = {Ident, ThreadID}; | |||
690 | Builder.CreateCall( | |||
691 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel), | |||
692 | EndArgs); | |||
693 | ||||
694 | LLVM_DEBUG(dbgs() << "With serialized parallel region: "do { } while (false) | |||
695 | << *Builder.GetInsertBlock()->getParent() << "\n")do { } while (false); | |||
696 | } | |||
697 | ||||
698 | for (Instruction *I : ToBeDeleted) | |||
699 | I->eraseFromParent(); | |||
700 | }; | |||
701 | ||||
702 | // Adjust the finalization stack, verify the adjustment, and call the | |||
703 | // finalize function a last time to finalize values between the pre-fini | |||
704 | // block and the exit block if we left the parallel "the normal way". | |||
705 | auto FiniInfo = FinalizationStack.pop_back_val(); | |||
706 | (void)FiniInfo; | |||
707 | assert(FiniInfo.DK == OMPD_parallel &&((void)0) | |||
708 | "Unexpected finalization stack state!")((void)0); | |||
709 | ||||
710 | Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator(); | |||
711 | ||||
712 | InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator()); | |||
713 | FiniCB(PreFiniIP); | |||
714 | ||||
715 | OI.EntryBB = PRegEntryBB; | |||
716 | OI.ExitBB = PRegExitBB; | |||
717 | ||||
718 | SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; | |||
719 | SmallVector<BasicBlock *, 32> Blocks; | |||
720 | OI.collectBlocks(ParallelRegionBlockSet, Blocks); | |||
721 | ||||
722 | // Ensure a single exit node for the outlined region by creating one. | |||
723 | // We might have multiple incoming edges to the exit now due to finalizations, | |||
724 | // e.g., cancel calls that cause the control flow to leave the region. | |||
725 | BasicBlock *PRegOutlinedExitBB = PRegExitBB; | |||
726 | PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt()); | |||
727 | PRegOutlinedExitBB->setName("omp.par.outlined.exit"); | |||
728 | Blocks.push_back(PRegOutlinedExitBB); | |||
729 | ||||
730 | CodeExtractorAnalysisCache CEAC(*OuterFn); | |||
731 | CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, | |||
732 | /* AggregateArgs */ false, | |||
733 | /* BlockFrequencyInfo */ nullptr, | |||
734 | /* BranchProbabilityInfo */ nullptr, | |||
735 | /* AssumptionCache */ nullptr, | |||
736 | /* AllowVarArgs */ true, | |||
737 | /* AllowAlloca */ true, | |||
738 | /* Suffix */ ".omp_par"); | |||
739 | ||||
740 | // Find inputs to, outputs from the code region. | |||
741 | BasicBlock *CommonExit = nullptr; | |||
742 | SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands; | |||
743 | Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit); | |||
744 | Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands); | |||
745 | ||||
746 | LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n")do { } while (false); | |||
747 | ||||
748 | FunctionCallee TIDRTLFn = | |||
749 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num); | |||
750 | ||||
751 | auto PrivHelper = [&](Value &V) { | |||
752 | if (&V == TIDAddr || &V == ZeroAddr) | |||
753 | return; | |||
754 | ||||
755 | SetVector<Use *> Uses; | |||
756 | for (Use &U : V.uses()) | |||
757 | if (auto *UserI = dyn_cast<Instruction>(U.getUser())) | |||
758 | if (ParallelRegionBlockSet.count(UserI->getParent())) | |||
759 | Uses.insert(&U); | |||
760 | ||||
761 | // __kmpc_fork_call expects extra arguments as pointers. If the input | |||
762 | // already has a pointer type, everything is fine. Otherwise, store the | |||
763 | // value onto stack and load it back inside the to-be-outlined region. This | |||
764 | // will ensure only the pointer will be passed to the function. | |||
765 | // FIXME: if there are more than 15 trailing arguments, they must be | |||
766 | // additionally packed in a struct. | |||
767 | Value *Inner = &V; | |||
768 | if (!V.getType()->isPointerTy()) { | |||
769 | IRBuilder<>::InsertPointGuard Guard(Builder); | |||
770 | LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n")do { } while (false); | |||
771 | ||||
772 | Builder.restoreIP(OuterAllocaIP); | |||
773 | Value *Ptr = | |||
774 | Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded"); | |||
775 | ||||
776 | // Store to stack at end of the block that currently branches to the entry | |||
777 | // block of the to-be-outlined region. | |||
778 | Builder.SetInsertPoint(InsertBB, | |||
779 | InsertBB->getTerminator()->getIterator()); | |||
780 | Builder.CreateStore(&V, Ptr); | |||
781 | ||||
782 | // Load back next to allocations in the to-be-outlined region. | |||
783 | Builder.restoreIP(InnerAllocaIP); | |||
784 | Inner = Builder.CreateLoad(V.getType(), Ptr); | |||
785 | } | |||
786 | ||||
787 | Value *ReplacementValue = nullptr; | |||
788 | CallInst *CI = dyn_cast<CallInst>(&V); | |||
789 | if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) { | |||
790 | ReplacementValue = PrivTID; | |||
791 | } else { | |||
792 | Builder.restoreIP( | |||
793 | PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue)); | |||
794 | assert(ReplacementValue &&((void)0) | |||
795 | "Expected copy/create callback to set replacement value!")((void)0); | |||
796 | if (ReplacementValue == &V) | |||
797 | return; | |||
798 | } | |||
799 | ||||
800 | for (Use *UPtr : Uses) | |||
801 | UPtr->set(ReplacementValue); | |||
802 | }; | |||
803 | ||||
804 | // Reset the inner alloca insertion as it will be used for loading the values | |||
805 | // wrapped into pointers before passing them into the to-be-outlined region. | |||
806 | // Configure it to insert immediately after the fake use of zero address so | |||
807 | // that they are available in the generated body and so that the | |||
808 | // OpenMP-related values (thread ID and zero address pointers) remain leading | |||
809 | // in the argument list. | |||
810 | InnerAllocaIP = IRBuilder<>::InsertPoint( | |||
811 | ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator()); | |||
812 | ||||
813 | // Reset the outer alloca insertion point to the entry of the relevant block | |||
814 | // in case it was invalidated. | |||
815 | OuterAllocaIP = IRBuilder<>::InsertPoint( | |||
816 | OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt()); | |||
817 | ||||
818 | for (Value *Input : Inputs) { | |||
819 | LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n")do { } while (false); | |||
820 | PrivHelper(*Input); | |||
821 | } | |||
822 | LLVM_DEBUG({do { } while (false) | |||
823 | for (Value *Output : Outputs)do { } while (false) | |||
824 | LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");do { } while (false) | |||
825 | })do { } while (false); | |||
826 | assert(Outputs.empty() &&((void)0) | |||
827 | "OpenMP outlining should not produce live-out values!")((void)0); | |||
828 | ||||
829 | LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n")do { } while (false); | |||
830 | LLVM_DEBUG({do { } while (false) | |||
831 | for (auto *BB : Blocks)do { } while (false) | |||
832 | dbgs() << " PBR: " << BB->getName() << "\n";do { } while (false) | |||
833 | })do { } while (false); | |||
834 | ||||
835 | // Register the outlined info. | |||
836 | addOutlineInfo(std::move(OI)); | |||
837 | ||||
838 | InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end()); | |||
839 | UI->eraseFromParent(); | |||
840 | ||||
841 | return AfterIP; | |||
842 | } | |||
843 | ||||
844 | void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) { | |||
845 | // Build call void __kmpc_flush(ident_t *loc) | |||
846 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
847 | Value *Args[] = {getOrCreateIdent(SrcLocStr)}; | |||
848 | ||||
849 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args); | |||
850 | } | |||
851 | ||||
852 | void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) { | |||
853 | if (!updateToLocation(Loc)) | |||
854 | return; | |||
855 | emitFlush(Loc); | |||
856 | } | |||
857 | ||||
858 | void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) { | |||
859 | // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 | |||
860 | // global_tid); | |||
861 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
862 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
863 | Value *Args[] = {Ident, getOrCreateThreadID(Ident)}; | |||
864 | ||||
865 | // Ignore return result until untied tasks are supported. | |||
866 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait), | |||
867 | Args); | |||
868 | } | |||
869 | ||||
870 | void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) { | |||
871 | if (!updateToLocation(Loc)) | |||
872 | return; | |||
873 | emitTaskwaitImpl(Loc); | |||
874 | } | |||
875 | ||||
876 | void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) { | |||
877 | // Build call __kmpc_omp_taskyield(loc, thread_id, 0); | |||
878 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
879 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
880 | Constant *I32Null = ConstantInt::getNullValue(Int32); | |||
881 | Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null}; | |||
882 | ||||
883 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield), | |||
884 | Args); | |||
885 | } | |||
886 | ||||
887 | void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) { | |||
888 | if (!updateToLocation(Loc)) | |||
889 | return; | |||
890 | emitTaskyieldImpl(Loc); | |||
891 | } | |||
892 | ||||
893 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections( | |||
894 | const LocationDescription &Loc, InsertPointTy AllocaIP, | |||
895 | ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, | |||
896 | FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) { | |||
897 | if (!updateToLocation(Loc)) | |||
898 | return Loc.IP; | |||
899 | ||||
900 | auto FiniCBWrapper = [&](InsertPointTy IP) { | |||
901 | if (IP.getBlock()->end() != IP.getPoint()) | |||
902 | return FiniCB(IP); | |||
903 | // This must be done otherwise any nested constructs using FinalizeOMPRegion | |||
904 | // will fail because that function requires the Finalization Basic Block to | |||
905 | // have a terminator, which is already removed by EmitOMPRegionBody. | |||
906 | // IP is currently at cancelation block. | |||
907 | // We need to backtrack to the condition block to fetch | |||
908 | // the exit block and create a branch from cancelation | |||
909 | // to exit block. | |||
910 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
911 | Builder.restoreIP(IP); | |||
912 | auto *CaseBB = IP.getBlock()->getSinglePredecessor(); | |||
913 | auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); | |||
914 | auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); | |||
915 | Instruction *I = Builder.CreateBr(ExitBB); | |||
916 | IP = InsertPointTy(I->getParent(), I->getIterator()); | |||
917 | return FiniCB(IP); | |||
918 | }; | |||
919 | ||||
920 | FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable}); | |||
921 | ||||
922 | // Each section is emitted as a switch case | |||
923 | // Each finalization callback is handled from clang.EmitOMPSectionDirective() | |||
924 | // -> OMP.createSection() which generates the IR for each section | |||
925 | // Iterate through all sections and emit a switch construct: | |||
926 | // switch (IV) { | |||
927 | // case 0: | |||
928 | // <SectionStmt[0]>; | |||
929 | // break; | |||
930 | // ... | |||
931 | // case <NumSection> - 1: | |||
932 | // <SectionStmt[<NumSection> - 1]>; | |||
933 | // break; | |||
934 | // } | |||
935 | // ... | |||
936 | // section_loop.after: | |||
937 | // <FiniCB>; | |||
938 | auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) { | |||
939 | auto *CurFn = CodeGenIP.getBlock()->getParent(); | |||
940 | auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor(); | |||
941 | auto *ForExitBB = CodeGenIP.getBlock() | |||
942 | ->getSinglePredecessor() | |||
943 | ->getTerminator() | |||
944 | ->getSuccessor(1); | |||
945 | SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB); | |||
946 | Builder.restoreIP(CodeGenIP); | |||
947 | unsigned CaseNumber = 0; | |||
948 | for (auto SectionCB : SectionCBs) { | |||
949 | auto *CaseBB = BasicBlock::Create(M.getContext(), | |||
950 | "omp_section_loop.body.case", CurFn); | |||
951 | SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB); | |||
952 | Builder.SetInsertPoint(CaseBB); | |||
953 | SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB); | |||
954 | CaseNumber++; | |||
955 | } | |||
956 | // remove the existing terminator from body BB since there can be no | |||
957 | // terminators after switch/case | |||
958 | CodeGenIP.getBlock()->getTerminator()->eraseFromParent(); | |||
959 | }; | |||
960 | // Loop body ends here | |||
961 | // LowerBound, UpperBound, and STride for createCanonicalLoop | |||
962 | Type *I32Ty = Type::getInt32Ty(M.getContext()); | |||
963 | Value *LB = ConstantInt::get(I32Ty, 0); | |||
964 | Value *UB = ConstantInt::get(I32Ty, SectionCBs.size()); | |||
965 | Value *ST = ConstantInt::get(I32Ty, 1); | |||
966 | llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop( | |||
967 | Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop"); | |||
968 | LoopInfo = createStaticWorkshareLoop(Loc, LoopInfo, AllocaIP, true); | |||
969 | BasicBlock *LoopAfterBB = LoopInfo->getAfter(); | |||
970 | Instruction *SplitPos = LoopAfterBB->getTerminator(); | |||
971 | if (!isa_and_nonnull<BranchInst>(SplitPos)) | |||
972 | SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB); | |||
973 | // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB, | |||
974 | // which requires a BB with branch | |||
975 | BasicBlock *ExitBB = | |||
976 | LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end"); | |||
977 | SplitPos->eraseFromParent(); | |||
978 | ||||
979 | // Apply the finalization callback in LoopAfterBB | |||
980 | auto FiniInfo = FinalizationStack.pop_back_val(); | |||
981 | assert(FiniInfo.DK == OMPD_sections &&((void)0) | |||
982 | "Unexpected finalization stack state!")((void)0); | |||
983 | Builder.SetInsertPoint(LoopAfterBB->getTerminator()); | |||
984 | FiniInfo.FiniCB(Builder.saveIP()); | |||
985 | Builder.SetInsertPoint(ExitBB); | |||
986 | ||||
987 | return Builder.saveIP(); | |||
988 | } | |||
989 | ||||
990 | OpenMPIRBuilder::InsertPointTy | |||
991 | OpenMPIRBuilder::createSection(const LocationDescription &Loc, | |||
992 | BodyGenCallbackTy BodyGenCB, | |||
993 | FinalizeCallbackTy FiniCB) { | |||
994 | if (!updateToLocation(Loc)) | |||
995 | return Loc.IP; | |||
996 | ||||
997 | auto FiniCBWrapper = [&](InsertPointTy IP) { | |||
998 | if (IP.getBlock()->end() != IP.getPoint()) | |||
999 | return FiniCB(IP); | |||
1000 | // This must be done otherwise any nested constructs using FinalizeOMPRegion | |||
1001 | // will fail because that function requires the Finalization Basic Block to | |||
1002 | // have a terminator, which is already removed by EmitOMPRegionBody. | |||
1003 | // IP is currently at cancelation block. | |||
1004 | // We need to backtrack to the condition block to fetch | |||
1005 | // the exit block and create a branch from cancelation | |||
1006 | // to exit block. | |||
1007 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
1008 | Builder.restoreIP(IP); | |||
1009 | auto *CaseBB = Loc.IP.getBlock(); | |||
1010 | auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); | |||
1011 | auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); | |||
1012 | Instruction *I = Builder.CreateBr(ExitBB); | |||
1013 | IP = InsertPointTy(I->getParent(), I->getIterator()); | |||
1014 | return FiniCB(IP); | |||
1015 | }; | |||
1016 | ||||
1017 | Directive OMPD = Directive::OMPD_sections; | |||
1018 | // Since we are using Finalization Callback here, HasFinalize | |||
1019 | // and IsCancellable have to be true | |||
1020 | return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper, | |||
1021 | /*Conditional*/ false, /*hasFinalize*/ true, | |||
1022 | /*IsCancellable*/ true); | |||
1023 | } | |||
1024 | ||||
1025 | OpenMPIRBuilder::InsertPointTy | |||
1026 | OpenMPIRBuilder::createMaster(const LocationDescription &Loc, | |||
1027 | BodyGenCallbackTy BodyGenCB, | |||
1028 | FinalizeCallbackTy FiniCB) { | |||
1029 | ||||
1030 | if (!updateToLocation(Loc)) | |||
1031 | return Loc.IP; | |||
1032 | ||||
1033 | Directive OMPD = Directive::OMPD_master; | |||
1034 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
1035 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
1036 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
1037 | Value *Args[] = {Ident, ThreadId}; | |||
1038 | ||||
1039 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master); | |||
1040 | Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); | |||
1041 | ||||
1042 | Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master); | |||
1043 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); | |||
1044 | ||||
1045 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | |||
1046 | /*Conditional*/ true, /*hasFinalize*/ true); | |||
1047 | } | |||
1048 | ||||
1049 | OpenMPIRBuilder::InsertPointTy | |||
1050 | OpenMPIRBuilder::createMasked(const LocationDescription &Loc, | |||
1051 | BodyGenCallbackTy BodyGenCB, | |||
1052 | FinalizeCallbackTy FiniCB, Value *Filter) { | |||
1053 | if (!updateToLocation(Loc)) | |||
1054 | return Loc.IP; | |||
1055 | ||||
1056 | Directive OMPD = Directive::OMPD_masked; | |||
1057 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
1058 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
1059 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
1060 | Value *Args[] = {Ident, ThreadId, Filter}; | |||
1061 | Value *ArgsEnd[] = {Ident, ThreadId}; | |||
1062 | ||||
1063 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked); | |||
1064 | Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); | |||
1065 | ||||
1066 | Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked); | |||
1067 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd); | |||
1068 | ||||
1069 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | |||
1070 | /*Conditional*/ true, /*hasFinalize*/ true); | |||
1071 | } | |||
1072 | ||||
1073 | CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton( | |||
1074 | DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, | |||
1075 | BasicBlock *PostInsertBefore, const Twine &Name) { | |||
1076 | Module *M = F->getParent(); | |||
1077 | LLVMContext &Ctx = M->getContext(); | |||
1078 | Type *IndVarTy = TripCount->getType(); | |||
1079 | ||||
1080 | // Create the basic block structure. | |||
1081 | BasicBlock *Preheader = | |||
1082 | BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore); | |||
1083 | BasicBlock *Header = | |||
1084 | BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore); | |||
1085 | BasicBlock *Cond = | |||
1086 | BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore); | |||
1087 | BasicBlock *Body = | |||
1088 | BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore); | |||
1089 | BasicBlock *Latch = | |||
1090 | BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore); | |||
1091 | BasicBlock *Exit = | |||
1092 | BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore); | |||
1093 | BasicBlock *After = | |||
1094 | BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore); | |||
1095 | ||||
1096 | // Use specified DebugLoc for new instructions. | |||
1097 | Builder.SetCurrentDebugLocation(DL); | |||
1098 | ||||
1099 | Builder.SetInsertPoint(Preheader); | |||
1100 | Builder.CreateBr(Header); | |||
1101 | ||||
1102 | Builder.SetInsertPoint(Header); | |||
1103 | PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv"); | |||
1104 | IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader); | |||
1105 | Builder.CreateBr(Cond); | |||
1106 | ||||
1107 | Builder.SetInsertPoint(Cond); | |||
1108 | Value *Cmp = | |||
1109 | Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp"); | |||
1110 | Builder.CreateCondBr(Cmp, Body, Exit); | |||
1111 | ||||
1112 | Builder.SetInsertPoint(Body); | |||
1113 | Builder.CreateBr(Latch); | |||
1114 | ||||
1115 | Builder.SetInsertPoint(Latch); | |||
1116 | Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1), | |||
1117 | "omp_" + Name + ".next", /*HasNUW=*/true); | |||
1118 | Builder.CreateBr(Header); | |||
1119 | IndVarPHI->addIncoming(Next, Latch); | |||
1120 | ||||
1121 | Builder.SetInsertPoint(Exit); | |||
1122 | Builder.CreateBr(After); | |||
1123 | ||||
1124 | // Remember and return the canonical control flow. | |||
1125 | LoopInfos.emplace_front(); | |||
1126 | CanonicalLoopInfo *CL = &LoopInfos.front(); | |||
1127 | ||||
1128 | CL->Preheader = Preheader; | |||
1129 | CL->Header = Header; | |||
1130 | CL->Cond = Cond; | |||
1131 | CL->Body = Body; | |||
1132 | CL->Latch = Latch; | |||
1133 | CL->Exit = Exit; | |||
1134 | CL->After = After; | |||
1135 | ||||
1136 | CL->IsValid = true; | |||
1137 | ||||
1138 | #ifndef NDEBUG1 | |||
1139 | CL->assertOK(); | |||
1140 | #endif | |||
1141 | return CL; | |||
1142 | } | |||
1143 | ||||
1144 | CanonicalLoopInfo * | |||
1145 | OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc, | |||
1146 | LoopBodyGenCallbackTy BodyGenCB, | |||
1147 | Value *TripCount, const Twine &Name) { | |||
1148 | BasicBlock *BB = Loc.IP.getBlock(); | |||
1149 | BasicBlock *NextBB = BB->getNextNode(); | |||
1150 | ||||
1151 | CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(), | |||
1152 | NextBB, NextBB, Name); | |||
1153 | BasicBlock *After = CL->getAfter(); | |||
1154 | ||||
1155 | // If location is not set, don't connect the loop. | |||
1156 | if (updateToLocation(Loc)) { | |||
1157 | // Split the loop at the insertion point: Branch to the preheader and move | |||
1158 | // every following instruction to after the loop (the After BB). Also, the | |||
1159 | // new successor is the loop's after block. | |||
1160 | Builder.CreateBr(CL->Preheader); | |||
1161 | After->getInstList().splice(After->begin(), BB->getInstList(), | |||
1162 | Builder.GetInsertPoint(), BB->end()); | |||
1163 | After->replaceSuccessorsPhiUsesWith(BB, After); | |||
1164 | } | |||
1165 | ||||
1166 | // Emit the body content. We do it after connecting the loop to the CFG to | |||
1167 | // avoid that the callback encounters degenerate BBs. | |||
1168 | BodyGenCB(CL->getBodyIP(), CL->getIndVar()); | |||
1169 | ||||
1170 | #ifndef NDEBUG1 | |||
1171 | CL->assertOK(); | |||
1172 | #endif | |||
1173 | return CL; | |||
1174 | } | |||
1175 | ||||
1176 | CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop( | |||
1177 | const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, | |||
1178 | Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, | |||
1179 | InsertPointTy ComputeIP, const Twine &Name) { | |||
1180 | ||||
1181 | // Consider the following difficulties (assuming 8-bit signed integers): | |||
1182 | // * Adding \p Step to the loop counter which passes \p Stop may overflow: | |||
1183 | // DO I = 1, 100, 50 | |||
1184 | /// * A \p Step of INT_MIN cannot not be normalized to a positive direction: | |||
1185 | // DO I = 100, 0, -128 | |||
1186 | ||||
1187 | // Start, Stop and Step must be of the same integer type. | |||
1188 | auto *IndVarTy = cast<IntegerType>(Start->getType()); | |||
1189 | assert(IndVarTy == Stop->getType() && "Stop type mismatch")((void)0); | |||
1190 | assert(IndVarTy == Step->getType() && "Step type mismatch")((void)0); | |||
1191 | ||||
1192 | LocationDescription ComputeLoc = | |||
1193 | ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc; | |||
1194 | updateToLocation(ComputeLoc); | |||
1195 | ||||
1196 | ConstantInt *Zero = ConstantInt::get(IndVarTy, 0); | |||
1197 | ConstantInt *One = ConstantInt::get(IndVarTy, 1); | |||
1198 | ||||
1199 | // Like Step, but always positive. | |||
1200 | Value *Incr = Step; | |||
1201 | ||||
1202 | // Distance between Start and Stop; always positive. | |||
1203 | Value *Span; | |||
1204 | ||||
1205 | // Condition whether there are no iterations are executed at all, e.g. because | |||
1206 | // UB < LB. | |||
1207 | Value *ZeroCmp; | |||
1208 | ||||
1209 | if (IsSigned) { | |||
1210 | // Ensure that increment is positive. If not, negate and invert LB and UB. | |||
1211 | Value *IsNeg = Builder.CreateICmpSLT(Step, Zero); | |||
1212 | Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step); | |||
1213 | Value *LB = Builder.CreateSelect(IsNeg, Stop, Start); | |||
1214 | Value *UB = Builder.CreateSelect(IsNeg, Start, Stop); | |||
1215 | Span = Builder.CreateSub(UB, LB, "", false, true); | |||
1216 | ZeroCmp = Builder.CreateICmp( | |||
1217 | InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB); | |||
1218 | } else { | |||
1219 | Span = Builder.CreateSub(Stop, Start, "", true); | |||
1220 | ZeroCmp = Builder.CreateICmp( | |||
1221 | InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start); | |||
1222 | } | |||
1223 | ||||
1224 | Value *CountIfLooping; | |||
1225 | if (InclusiveStop) { | |||
1226 | CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One); | |||
1227 | } else { | |||
1228 | // Avoid incrementing past stop since it could overflow. | |||
1229 | Value *CountIfTwo = Builder.CreateAdd( | |||
1230 | Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One); | |||
1231 | Value *OneCmp = Builder.CreateICmp( | |||
1232 | InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr); | |||
1233 | CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo); | |||
1234 | } | |||
1235 | Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping, | |||
1236 | "omp_" + Name + ".tripcount"); | |||
1237 | ||||
1238 | auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) { | |||
1239 | Builder.restoreIP(CodeGenIP); | |||
1240 | Value *Span = Builder.CreateMul(IV, Step); | |||
1241 | Value *IndVar = Builder.CreateAdd(Span, Start); | |||
1242 | BodyGenCB(Builder.saveIP(), IndVar); | |||
1243 | }; | |||
1244 | LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP(); | |||
1245 | return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name); | |||
1246 | } | |||
1247 | ||||
1248 | // Returns an LLVM function to call for initializing loop bounds using OpenMP | |||
1249 | // static scheduling depending on `type`. Only i32 and i64 are supported by the | |||
1250 | // runtime. Always interpret integers as unsigned similarly to | |||
1251 | // CanonicalLoopInfo. | |||
1252 | static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M, | |||
1253 | OpenMPIRBuilder &OMPBuilder) { | |||
1254 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | |||
1255 | if (Bitwidth == 32) | |||
1256 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
1257 | M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u); | |||
1258 | if (Bitwidth == 64) | |||
1259 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
1260 | M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u); | |||
1261 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")__builtin_unreachable(); | |||
1262 | } | |||
1263 | ||||
1264 | // Sets the number of loop iterations to the given value. This value must be | |||
1265 | // valid in the condition block (i.e., defined in the preheader) and is | |||
1266 | // interpreted as an unsigned integer. | |||
1267 | void setCanonicalLoopTripCount(CanonicalLoopInfo *CLI, Value *TripCount) { | |||
1268 | Instruction *CmpI = &CLI->getCond()->front(); | |||
1269 | assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount")((void)0); | |||
1270 | CmpI->setOperand(1, TripCount); | |||
1271 | CLI->assertOK(); | |||
1272 | } | |||
1273 | ||||
1274 | CanonicalLoopInfo *OpenMPIRBuilder::createStaticWorkshareLoop( | |||
1275 | const LocationDescription &Loc, CanonicalLoopInfo *CLI, | |||
1276 | InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk) { | |||
1277 | // Set up the source location value for OpenMP runtime. | |||
1278 | if (!updateToLocation(Loc)) | |||
1279 | return nullptr; | |||
1280 | ||||
1281 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
1282 | Value *SrcLoc = getOrCreateIdent(SrcLocStr); | |||
1283 | ||||
1284 | // Declare useful OpenMP runtime functions. | |||
1285 | Value *IV = CLI->getIndVar(); | |||
1286 | Type *IVTy = IV->getType(); | |||
1287 | FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this); | |||
1288 | FunctionCallee StaticFini = | |||
1289 | getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); | |||
1290 | ||||
1291 | // Allocate space for computed loop bounds as expected by the "init" function. | |||
1292 | Builder.restoreIP(AllocaIP); | |||
1293 | Type *I32Type = Type::getInt32Ty(M.getContext()); | |||
1294 | Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); | |||
1295 | Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); | |||
1296 | Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); | |||
1297 | Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); | |||
1298 | ||||
1299 | // At the end of the preheader, prepare for calling the "init" function by | |||
1300 | // storing the current loop bounds into the allocated space. A canonical loop | |||
1301 | // always iterates from 0 to trip-count with step 1. Note that "init" expects | |||
1302 | // and produces an inclusive upper bound. | |||
1303 | Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); | |||
1304 | Constant *Zero = ConstantInt::get(IVTy, 0); | |||
1305 | Constant *One = ConstantInt::get(IVTy, 1); | |||
1306 | Builder.CreateStore(Zero, PLowerBound); | |||
1307 | Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One); | |||
1308 | Builder.CreateStore(UpperBound, PUpperBound); | |||
1309 | Builder.CreateStore(One, PStride); | |||
1310 | ||||
1311 | if (!Chunk) | |||
1312 | Chunk = One; | |||
1313 | ||||
1314 | Value *ThreadNum = getOrCreateThreadID(SrcLoc); | |||
1315 | ||||
1316 | Constant *SchedulingType = | |||
1317 | ConstantInt::get(I32Type, static_cast<int>(OMPScheduleType::Static)); | |||
1318 | ||||
1319 | // Call the "init" function and update the trip count of the loop with the | |||
1320 | // value it produced. | |||
1321 | Builder.CreateCall(StaticInit, | |||
1322 | {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound, | |||
1323 | PUpperBound, PStride, One, Chunk}); | |||
1324 | Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound); | |||
1325 | Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound); | |||
1326 | Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound); | |||
1327 | Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One); | |||
1328 | setCanonicalLoopTripCount(CLI, TripCount); | |||
1329 | ||||
1330 | // Update all uses of the induction variable except the one in the condition | |||
1331 | // block that compares it with the actual upper bound, and the increment in | |||
1332 | // the latch block. | |||
1333 | // TODO: this can eventually move to CanonicalLoopInfo or to a new | |||
1334 | // CanonicalLoopInfoUpdater interface. | |||
1335 | Builder.SetInsertPoint(CLI->getBody(), CLI->getBody()->getFirstInsertionPt()); | |||
1336 | Value *UpdatedIV = Builder.CreateAdd(IV, LowerBound); | |||
1337 | IV->replaceUsesWithIf(UpdatedIV, [&](Use &U) { | |||
1338 | auto *Instr = dyn_cast<Instruction>(U.getUser()); | |||
1339 | return !Instr || | |||
1340 | (Instr->getParent() != CLI->getCond() && | |||
1341 | Instr->getParent() != CLI->getLatch() && Instr != UpdatedIV); | |||
1342 | }); | |||
1343 | ||||
1344 | // In the "exit" block, call the "fini" function. | |||
1345 | Builder.SetInsertPoint(CLI->getExit(), | |||
1346 | CLI->getExit()->getTerminator()->getIterator()); | |||
1347 | Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); | |||
1348 | ||||
1349 | // Add the barrier if requested. | |||
1350 | if (NeedsBarrier) | |||
1351 | createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), | |||
1352 | omp::Directive::OMPD_for, /* ForceSimpleCall */ false, | |||
1353 | /* CheckCancelFlag */ false); | |||
1354 | ||||
1355 | CLI->assertOK(); | |||
1356 | return CLI; | |||
1357 | } | |||
1358 | ||||
1359 | CanonicalLoopInfo *OpenMPIRBuilder::createWorkshareLoop( | |||
1360 | const LocationDescription &Loc, CanonicalLoopInfo *CLI, | |||
1361 | InsertPointTy AllocaIP, bool NeedsBarrier) { | |||
1362 | // Currently only supports static schedules. | |||
1363 | return createStaticWorkshareLoop(Loc, CLI, AllocaIP, NeedsBarrier); | |||
1364 | } | |||
1365 | ||||
1366 | /// Returns an LLVM function to call for initializing loop bounds using OpenMP | |||
1367 | /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by | |||
1368 | /// the runtime. Always interpret integers as unsigned similarly to | |||
1369 | /// CanonicalLoopInfo. | |||
1370 | static FunctionCallee | |||
1371 | getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { | |||
1372 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | |||
1373 | if (Bitwidth == 32) | |||
1374 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
1375 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u); | |||
1376 | if (Bitwidth == 64) | |||
1377 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
1378 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u); | |||
1379 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")__builtin_unreachable(); | |||
1380 | } | |||
1381 | ||||
1382 | /// Returns an LLVM function to call for updating the next loop using OpenMP | |||
1383 | /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by | |||
1384 | /// the runtime. Always interpret integers as unsigned similarly to | |||
1385 | /// CanonicalLoopInfo. | |||
1386 | static FunctionCallee | |||
1387 | getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { | |||
1388 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | |||
1389 | if (Bitwidth == 32) | |||
1390 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
1391 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u); | |||
1392 | if (Bitwidth == 64) | |||
1393 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
1394 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u); | |||
1395 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")__builtin_unreachable(); | |||
1396 | } | |||
1397 | ||||
1398 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createDynamicWorkshareLoop( | |||
1399 | const LocationDescription &Loc, CanonicalLoopInfo *CLI, | |||
1400 | InsertPointTy AllocaIP, OMPScheduleType SchedType, bool NeedsBarrier, | |||
1401 | Value *Chunk) { | |||
1402 | // Set up the source location value for OpenMP runtime. | |||
1403 | Builder.SetCurrentDebugLocation(Loc.DL); | |||
1404 | ||||
1405 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
1406 | Value *SrcLoc = getOrCreateIdent(SrcLocStr); | |||
1407 | ||||
1408 | // Declare useful OpenMP runtime functions. | |||
1409 | Value *IV = CLI->getIndVar(); | |||
1410 | Type *IVTy = IV->getType(); | |||
1411 | FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this); | |||
1412 | FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this); | |||
1413 | ||||
1414 | // Allocate space for computed loop bounds as expected by the "init" function. | |||
1415 | Builder.restoreIP(AllocaIP); | |||
| ||||
1416 | Type *I32Type = Type::getInt32Ty(M.getContext()); | |||
1417 | Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); | |||
1418 | Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); | |||
1419 | Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); | |||
1420 | Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); | |||
1421 | ||||
1422 | // At the end of the preheader, prepare for calling the "init" function by | |||
1423 | // storing the current loop bounds into the allocated space. A canonical loop | |||
1424 | // always iterates from 0 to trip-count with step 1. Note that "init" expects | |||
1425 | // and produces an inclusive upper bound. | |||
1426 | BasicBlock *PreHeader = CLI->getPreheader(); | |||
1427 | Builder.SetInsertPoint(PreHeader->getTerminator()); | |||
1428 | Constant *One = ConstantInt::get(IVTy, 1); | |||
1429 | Builder.CreateStore(One, PLowerBound); | |||
1430 | Value *UpperBound = CLI->getTripCount(); | |||
1431 | Builder.CreateStore(UpperBound, PUpperBound); | |||
1432 | Builder.CreateStore(One, PStride); | |||
1433 | ||||
1434 | BasicBlock *Header = CLI->getHeader(); | |||
1435 | BasicBlock *Exit = CLI->getExit(); | |||
1436 | BasicBlock *Cond = CLI->getCond(); | |||
1437 | InsertPointTy AfterIP = CLI->getAfterIP(); | |||
1438 | ||||
1439 | // The CLI will be "broken" in the code below, as the loop is no longer | |||
1440 | // a valid canonical loop. | |||
1441 | ||||
1442 | if (!Chunk) | |||
1443 | Chunk = One; | |||
1444 | ||||
1445 | Value *ThreadNum = getOrCreateThreadID(SrcLoc); | |||
1446 | ||||
1447 | Constant *SchedulingType = | |||
1448 | ConstantInt::get(I32Type, static_cast<int>(SchedType)); | |||
1449 | ||||
1450 | // Call the "init" function. | |||
1451 | Builder.CreateCall(DynamicInit, | |||
1452 | {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One, | |||
1453 | UpperBound, /* step */ One, Chunk}); | |||
1454 | ||||
1455 | // An outer loop around the existing one. | |||
1456 | BasicBlock *OuterCond = BasicBlock::Create( | |||
1457 | PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond", | |||
1458 | PreHeader->getParent()); | |||
1459 | // This needs to be 32-bit always, so can't use the IVTy Zero above. | |||
1460 | Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt()); | |||
1461 | Value *Res = | |||
1462 | Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter, | |||
1463 | PLowerBound, PUpperBound, PStride}); | |||
1464 | Constant *Zero32 = ConstantInt::get(I32Type, 0); | |||
1465 | Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32); | |||
1466 | Value *LowerBound = | |||
1467 | Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb"); | |||
1468 | Builder.CreateCondBr(MoreWork, Header, Exit); | |||
1469 | ||||
1470 | // Change PHI-node in loop header to use outer cond rather than preheader, | |||
1471 | // and set IV to the LowerBound. | |||
1472 | Instruction *Phi = &Header->front(); | |||
1473 | auto *PI = cast<PHINode>(Phi); | |||
1474 | PI->setIncomingBlock(0, OuterCond); | |||
1475 | PI->setIncomingValue(0, LowerBound); | |||
1476 | ||||
1477 | // Then set the pre-header to jump to the OuterCond | |||
1478 | Instruction *Term = PreHeader->getTerminator(); | |||
1479 | auto *Br = cast<BranchInst>(Term); | |||
1480 | Br->setSuccessor(0, OuterCond); | |||
1481 | ||||
1482 | // Modify the inner condition: | |||
1483 | // * Use the UpperBound returned from the DynamicNext call. | |||
1484 | // * jump to the loop outer loop when done with one of the inner loops. | |||
1485 | Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt()); | |||
1486 | UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub"); | |||
1487 | Instruction *Comp = &*Builder.GetInsertPoint(); | |||
1488 | auto *CI = cast<CmpInst>(Comp); | |||
1489 | CI->setOperand(1, UpperBound); | |||
1490 | // Redirect the inner exit to branch to outer condition. | |||
1491 | Instruction *Branch = &Cond->back(); | |||
1492 | auto *BI = cast<BranchInst>(Branch); | |||
1493 | assert(BI->getSuccessor(1) == Exit)((void)0); | |||
1494 | BI->setSuccessor(1, OuterCond); | |||
1495 | ||||
1496 | // Add the barrier if requested. | |||
1497 | if (NeedsBarrier) { | |||
1498 | Builder.SetInsertPoint(&Exit->back()); | |||
1499 | createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), | |||
1500 | omp::Directive::OMPD_for, /* ForceSimpleCall */ false, | |||
1501 | /* CheckCancelFlag */ false); | |||
1502 | } | |||
1503 | ||||
1504 | return AfterIP; | |||
1505 | } | |||
1506 | ||||
1507 | /// Make \p Source branch to \p Target. | |||
1508 | /// | |||
1509 | /// Handles two situations: | |||
1510 | /// * \p Source already has an unconditional branch. | |||
1511 | /// * \p Source is a degenerate block (no terminator because the BB is | |||
1512 | /// the current head of the IR construction). | |||
1513 | static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) { | |||
1514 | if (Instruction *Term = Source->getTerminator()) { | |||
1515 | auto *Br = cast<BranchInst>(Term); | |||
1516 | assert(!Br->isConditional() &&((void)0) | |||
1517 | "BB's terminator must be an unconditional branch (or degenerate)")((void)0); | |||
1518 | BasicBlock *Succ = Br->getSuccessor(0); | |||
1519 | Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true); | |||
1520 | Br->setSuccessor(0, Target); | |||
1521 | return; | |||
1522 | } | |||
1523 | ||||
1524 | auto *NewBr = BranchInst::Create(Target, Source); | |||
1525 | NewBr->setDebugLoc(DL); | |||
1526 | } | |||
1527 | ||||
1528 | /// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is, | |||
1529 | /// after this \p OldTarget will be orphaned. | |||
1530 | static void redirectAllPredecessorsTo(BasicBlock *OldTarget, | |||
1531 | BasicBlock *NewTarget, DebugLoc DL) { | |||
1532 | for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget))) | |||
1533 | redirectTo(Pred, NewTarget, DL); | |||
1534 | } | |||
1535 | ||||
1536 | /// Determine which blocks in \p BBs are reachable from outside and remove the | |||
1537 | /// ones that are not reachable from the function. | |||
1538 | static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) { | |||
1539 | SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()}; | |||
1540 | auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) { | |||
1541 | for (Use &U : BB->uses()) { | |||
1542 | auto *UseInst = dyn_cast<Instruction>(U.getUser()); | |||
1543 | if (!UseInst) | |||
1544 | continue; | |||
1545 | if (BBsToErase.count(UseInst->getParent())) | |||
1546 | continue; | |||
1547 | return true; | |||
1548 | } | |||
1549 | return false; | |||
1550 | }; | |||
1551 | ||||
1552 | while (true) { | |||
1553 | bool Changed = false; | |||
1554 | for (BasicBlock *BB : make_early_inc_range(BBsToErase)) { | |||
1555 | if (HasRemainingUses(BB)) { | |||
1556 | BBsToErase.erase(BB); | |||
1557 | Changed = true; | |||
1558 | } | |||
1559 | } | |||
1560 | if (!Changed) | |||
1561 | break; | |||
1562 | } | |||
1563 | ||||
1564 | SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end()); | |||
1565 | DeleteDeadBlocks(BBVec); | |||
1566 | } | |||
1567 | ||||
1568 | CanonicalLoopInfo * | |||
1569 | OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, | |||
1570 | InsertPointTy ComputeIP) { | |||
1571 | assert(Loops.size() >= 1 && "At least one loop required")((void)0); | |||
1572 | size_t NumLoops = Loops.size(); | |||
1573 | ||||
1574 | // Nothing to do if there is already just one loop. | |||
1575 | if (NumLoops == 1) | |||
1576 | return Loops.front(); | |||
1577 | ||||
1578 | CanonicalLoopInfo *Outermost = Loops.front(); | |||
1579 | CanonicalLoopInfo *Innermost = Loops.back(); | |||
1580 | BasicBlock *OrigPreheader = Outermost->getPreheader(); | |||
1581 | BasicBlock *OrigAfter = Outermost->getAfter(); | |||
1582 | Function *F = OrigPreheader->getParent(); | |||
1583 | ||||
1584 | // Setup the IRBuilder for inserting the trip count computation. | |||
1585 | Builder.SetCurrentDebugLocation(DL); | |||
1586 | if (ComputeIP.isSet()) | |||
1587 | Builder.restoreIP(ComputeIP); | |||
1588 | else | |||
1589 | Builder.restoreIP(Outermost->getPreheaderIP()); | |||
1590 | ||||
1591 | // Derive the collapsed' loop trip count. | |||
1592 | // TODO: Find common/largest indvar type. | |||
1593 | Value *CollapsedTripCount = nullptr; | |||
1594 | for (CanonicalLoopInfo *L : Loops) { | |||
1595 | Value *OrigTripCount = L->getTripCount(); | |||
1596 | if (!CollapsedTripCount) { | |||
1597 | CollapsedTripCount = OrigTripCount; | |||
1598 | continue; | |||
1599 | } | |||
1600 | ||||
1601 | // TODO: Enable UndefinedSanitizer to diagnose an overflow here. | |||
1602 | CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount, | |||
1603 | {}, /*HasNUW=*/true); | |||
1604 | } | |||
1605 | ||||
1606 | // Create the collapsed loop control flow. | |||
1607 | CanonicalLoopInfo *Result = | |||
1608 | createLoopSkeleton(DL, CollapsedTripCount, F, | |||
1609 | OrigPreheader->getNextNode(), OrigAfter, "collapsed"); | |||
1610 | ||||
1611 | // Build the collapsed loop body code. | |||
1612 | // Start with deriving the input loop induction variables from the collapsed | |||
1613 | // one, using a divmod scheme. To preserve the original loops' order, the | |||
1614 | // innermost loop use the least significant bits. | |||
1615 | Builder.restoreIP(Result->getBodyIP()); | |||
1616 | ||||
1617 | Value *Leftover = Result->getIndVar(); | |||
1618 | SmallVector<Value *> NewIndVars; | |||
1619 | NewIndVars.set_size(NumLoops); | |||
1620 | for (int i = NumLoops - 1; i >= 1; --i) { | |||
1621 | Value *OrigTripCount = Loops[i]->getTripCount(); | |||
1622 | ||||
1623 | Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount); | |||
1624 | NewIndVars[i] = NewIndVar; | |||
1625 | ||||
1626 | Leftover = Builder.CreateUDiv(Leftover, OrigTripCount); | |||
1627 | } | |||
1628 | // Outermost loop gets all the remaining bits. | |||
1629 | NewIndVars[0] = Leftover; | |||
1630 | ||||
1631 | // Construct the loop body control flow. | |||
1632 | // We progressively construct the branch structure following in direction of | |||
1633 | // the control flow, from the leading in-between code, the loop nest body, the | |||
1634 | // trailing in-between code, and rejoining the collapsed loop's latch. | |||
1635 | // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If | |||
1636 | // the ContinueBlock is set, continue with that block. If ContinuePred, use | |||
1637 | // its predecessors as sources. | |||
1638 | BasicBlock *ContinueBlock = Result->getBody(); | |||
1639 | BasicBlock *ContinuePred = nullptr; | |||
1640 | auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest, | |||
1641 | BasicBlock *NextSrc) { | |||
1642 | if (ContinueBlock) | |||
1643 | redirectTo(ContinueBlock, Dest, DL); | |||
1644 | else | |||
1645 | redirectAllPredecessorsTo(ContinuePred, Dest, DL); | |||
1646 | ||||
1647 | ContinueBlock = nullptr; | |||
1648 | ContinuePred = NextSrc; | |||
1649 | }; | |||
1650 | ||||
1651 | // The code before the nested loop of each level. | |||
1652 | // Because we are sinking it into the nest, it will be executed more often | |||
1653 | // that the original loop. More sophisticated schemes could keep track of what | |||
1654 | // the in-between code is and instantiate it only once per thread. | |||
1655 | for (size_t i = 0; i < NumLoops - 1; ++i) | |||
1656 | ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader()); | |||
1657 | ||||
1658 | // Connect the loop nest body. | |||
1659 | ContinueWith(Innermost->getBody(), Innermost->getLatch()); | |||
1660 | ||||
1661 | // The code after the nested loop at each level. | |||
1662 | for (size_t i = NumLoops - 1; i > 0; --i) | |||
1663 | ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch()); | |||
1664 | ||||
1665 | // Connect the finished loop to the collapsed loop latch. | |||
1666 | ContinueWith(Result->getLatch(), nullptr); | |||
1667 | ||||
1668 | // Replace the input loops with the new collapsed loop. | |||
1669 | redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL); | |||
1670 | redirectTo(Result->getAfter(), Outermost->getAfter(), DL); | |||
1671 | ||||
1672 | // Replace the input loop indvars with the derived ones. | |||
1673 | for (size_t i = 0; i < NumLoops; ++i) | |||
1674 | Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]); | |||
1675 | ||||
1676 | // Remove unused parts of the input loops. | |||
1677 | SmallVector<BasicBlock *, 12> OldControlBBs; | |||
1678 | OldControlBBs.reserve(6 * Loops.size()); | |||
1679 | for (CanonicalLoopInfo *Loop : Loops) | |||
1680 | Loop->collectControlBlocks(OldControlBBs); | |||
1681 | removeUnusedBlocksFromParent(OldControlBBs); | |||
1682 | ||||
1683 | #ifndef NDEBUG1 | |||
1684 | Result->assertOK(); | |||
1685 | #endif | |||
1686 | return Result; | |||
1687 | } | |||
1688 | ||||
1689 | std::vector<CanonicalLoopInfo *> | |||
1690 | OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, | |||
1691 | ArrayRef<Value *> TileSizes) { | |||
1692 | assert(TileSizes.size() == Loops.size() &&((void)0) | |||
1693 | "Must pass as many tile sizes as there are loops")((void)0); | |||
1694 | int NumLoops = Loops.size(); | |||
1695 | assert(NumLoops >= 1 && "At least one loop to tile required")((void)0); | |||
1696 | ||||
1697 | CanonicalLoopInfo *OutermostLoop = Loops.front(); | |||
1698 | CanonicalLoopInfo *InnermostLoop = Loops.back(); | |||
1699 | Function *F = OutermostLoop->getBody()->getParent(); | |||
1700 | BasicBlock *InnerEnter = InnermostLoop->getBody(); | |||
1701 | BasicBlock *InnerLatch = InnermostLoop->getLatch(); | |||
1702 | ||||
1703 | // Collect original trip counts and induction variable to be accessible by | |||
1704 | // index. Also, the structure of the original loops is not preserved during | |||
1705 | // the construction of the tiled loops, so do it before we scavenge the BBs of | |||
1706 | // any original CanonicalLoopInfo. | |||
1707 | SmallVector<Value *, 4> OrigTripCounts, OrigIndVars; | |||
1708 | for (CanonicalLoopInfo *L : Loops) { | |||
1709 | OrigTripCounts.push_back(L->getTripCount()); | |||
1710 | OrigIndVars.push_back(L->getIndVar()); | |||
1711 | } | |||
1712 | ||||
1713 | // Collect the code between loop headers. These may contain SSA definitions | |||
1714 | // that are used in the loop nest body. To be usable with in the innermost | |||
1715 | // body, these BasicBlocks will be sunk into the loop nest body. That is, | |||
1716 | // these instructions may be executed more often than before the tiling. | |||
1717 | // TODO: It would be sufficient to only sink them into body of the | |||
1718 | // corresponding tile loop. | |||
1719 | SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode; | |||
1720 | for (int i = 0; i < NumLoops - 1; ++i) { | |||
1721 | CanonicalLoopInfo *Surrounding = Loops[i]; | |||
1722 | CanonicalLoopInfo *Nested = Loops[i + 1]; | |||
1723 | ||||
1724 | BasicBlock *EnterBB = Surrounding->getBody(); | |||
1725 | BasicBlock *ExitBB = Nested->getHeader(); | |||
1726 | InbetweenCode.emplace_back(EnterBB, ExitBB); | |||
1727 | } | |||
1728 | ||||
1729 | // Compute the trip counts of the floor loops. | |||
1730 | Builder.SetCurrentDebugLocation(DL); | |||
1731 | Builder.restoreIP(OutermostLoop->getPreheaderIP()); | |||
1732 | SmallVector<Value *, 4> FloorCount, FloorRems; | |||
1733 | for (int i = 0; i < NumLoops; ++i) { | |||
1734 | Value *TileSize = TileSizes[i]; | |||
1735 | Value *OrigTripCount = OrigTripCounts[i]; | |||
1736 | Type *IVType = OrigTripCount->getType(); | |||
1737 | ||||
1738 | Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize); | |||
1739 | Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize); | |||
1740 | ||||
1741 | // 0 if tripcount divides the tilesize, 1 otherwise. | |||
1742 | // 1 means we need an additional iteration for a partial tile. | |||
1743 | // | |||
1744 | // Unfortunately we cannot just use the roundup-formula | |||
1745 | // (tripcount + tilesize - 1)/tilesize | |||
1746 | // because the summation might overflow. We do not want introduce undefined | |||
1747 | // behavior when the untiled loop nest did not. | |||
1748 | Value *FloorTripOverflow = | |||
1749 | Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0)); | |||
1750 | ||||
1751 | FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType); | |||
1752 | FloorTripCount = | |||
1753 | Builder.CreateAdd(FloorTripCount, FloorTripOverflow, | |||
1754 | "omp_floor" + Twine(i) + ".tripcount", true); | |||
1755 | ||||
1756 | // Remember some values for later use. | |||
1757 | FloorCount.push_back(FloorTripCount); | |||
1758 | FloorRems.push_back(FloorTripRem); | |||
1759 | } | |||
1760 | ||||
1761 | // Generate the new loop nest, from the outermost to the innermost. | |||
1762 | std::vector<CanonicalLoopInfo *> Result; | |||
1763 | Result.reserve(NumLoops * 2); | |||
1764 | ||||
1765 | // The basic block of the surrounding loop that enters the nest generated | |||
1766 | // loop. | |||
1767 | BasicBlock *Enter = OutermostLoop->getPreheader(); | |||
1768 | ||||
1769 | // The basic block of the surrounding loop where the inner code should | |||
1770 | // continue. | |||
1771 | BasicBlock *Continue = OutermostLoop->getAfter(); | |||
1772 | ||||
1773 | // Where the next loop basic block should be inserted. | |||
1774 | BasicBlock *OutroInsertBefore = InnermostLoop->getExit(); | |||
1775 | ||||
1776 | auto EmbeddNewLoop = | |||
1777 | [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore]( | |||
1778 | Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * { | |||
1779 | CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton( | |||
1780 | DL, TripCount, F, InnerEnter, OutroInsertBefore, Name); | |||
1781 | redirectTo(Enter, EmbeddedLoop->getPreheader(), DL); | |||
1782 | redirectTo(EmbeddedLoop->getAfter(), Continue, DL); | |||
1783 | ||||
1784 | // Setup the position where the next embedded loop connects to this loop. | |||
1785 | Enter = EmbeddedLoop->getBody(); | |||
1786 | Continue = EmbeddedLoop->getLatch(); | |||
1787 | OutroInsertBefore = EmbeddedLoop->getLatch(); | |||
1788 | return EmbeddedLoop; | |||
1789 | }; | |||
1790 | ||||
1791 | auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts, | |||
1792 | const Twine &NameBase) { | |||
1793 | for (auto P : enumerate(TripCounts)) { | |||
1794 | CanonicalLoopInfo *EmbeddedLoop = | |||
1795 | EmbeddNewLoop(P.value(), NameBase + Twine(P.index())); | |||
1796 | Result.push_back(EmbeddedLoop); | |||
1797 | } | |||
1798 | }; | |||
1799 | ||||
1800 | EmbeddNewLoops(FloorCount, "floor"); | |||
1801 | ||||
1802 | // Within the innermost floor loop, emit the code that computes the tile | |||
1803 | // sizes. | |||
1804 | Builder.SetInsertPoint(Enter->getTerminator()); | |||
1805 | SmallVector<Value *, 4> TileCounts; | |||
1806 | for (int i = 0; i < NumLoops; ++i) { | |||
1807 | CanonicalLoopInfo *FloorLoop = Result[i]; | |||
1808 | Value *TileSize = TileSizes[i]; | |||
1809 | ||||
1810 | Value *FloorIsEpilogue = | |||
1811 | Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]); | |||
1812 | Value *TileTripCount = | |||
1813 | Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize); | |||
1814 | ||||
1815 | TileCounts.push_back(TileTripCount); | |||
1816 | } | |||
1817 | ||||
1818 | // Create the tile loops. | |||
1819 | EmbeddNewLoops(TileCounts, "tile"); | |||
1820 | ||||
1821 | // Insert the inbetween code into the body. | |||
1822 | BasicBlock *BodyEnter = Enter; | |||
1823 | BasicBlock *BodyEntered = nullptr; | |||
1824 | for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) { | |||
1825 | BasicBlock *EnterBB = P.first; | |||
1826 | BasicBlock *ExitBB = P.second; | |||
1827 | ||||
1828 | if (BodyEnter) | |||
1829 | redirectTo(BodyEnter, EnterBB, DL); | |||
1830 | else | |||
1831 | redirectAllPredecessorsTo(BodyEntered, EnterBB, DL); | |||
1832 | ||||
1833 | BodyEnter = nullptr; | |||
1834 | BodyEntered = ExitBB; | |||
1835 | } | |||
1836 | ||||
1837 | // Append the original loop nest body into the generated loop nest body. | |||
1838 | if (BodyEnter) | |||
1839 | redirectTo(BodyEnter, InnerEnter, DL); | |||
1840 | else | |||
1841 | redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL); | |||
1842 | redirectAllPredecessorsTo(InnerLatch, Continue, DL); | |||
1843 | ||||
1844 | // Replace the original induction variable with an induction variable computed | |||
1845 | // from the tile and floor induction variables. | |||
1846 | Builder.restoreIP(Result.back()->getBodyIP()); | |||
1847 | for (int i = 0; i < NumLoops; ++i) { | |||
1848 | CanonicalLoopInfo *FloorLoop = Result[i]; | |||
1849 | CanonicalLoopInfo *TileLoop = Result[NumLoops + i]; | |||
1850 | Value *OrigIndVar = OrigIndVars[i]; | |||
1851 | Value *Size = TileSizes[i]; | |||
1852 | ||||
1853 | Value *Scale = | |||
1854 | Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true); | |||
1855 | Value *Shift = | |||
1856 | Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true); | |||
1857 | OrigIndVar->replaceAllUsesWith(Shift); | |||
1858 | } | |||
1859 | ||||
1860 | // Remove unused parts of the original loops. | |||
1861 | SmallVector<BasicBlock *, 12> OldControlBBs; | |||
1862 | OldControlBBs.reserve(6 * Loops.size()); | |||
1863 | for (CanonicalLoopInfo *Loop : Loops) | |||
1864 | Loop->collectControlBlocks(OldControlBBs); | |||
1865 | removeUnusedBlocksFromParent(OldControlBBs); | |||
1866 | ||||
1867 | #ifndef NDEBUG1 | |||
1868 | for (CanonicalLoopInfo *GenL : Result) | |||
1869 | GenL->assertOK(); | |||
1870 | #endif | |||
1871 | return Result; | |||
1872 | } | |||
1873 | ||||
1874 | OpenMPIRBuilder::InsertPointTy | |||
1875 | OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc, | |||
1876 | llvm::Value *BufSize, llvm::Value *CpyBuf, | |||
1877 | llvm::Value *CpyFn, llvm::Value *DidIt) { | |||
1878 | if (!updateToLocation(Loc)) | |||
1879 | return Loc.IP; | |||
1880 | ||||
1881 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
1882 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
1883 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
1884 | ||||
1885 | llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt); | |||
1886 | ||||
1887 | Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD}; | |||
1888 | ||||
1889 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate); | |||
1890 | Builder.CreateCall(Fn, Args); | |||
1891 | ||||
1892 | return Builder.saveIP(); | |||
1893 | } | |||
1894 | ||||
1895 | OpenMPIRBuilder::InsertPointTy | |||
1896 | OpenMPIRBuilder::createSingle(const LocationDescription &Loc, | |||
1897 | BodyGenCallbackTy BodyGenCB, | |||
1898 | FinalizeCallbackTy FiniCB, llvm::Value *DidIt) { | |||
1899 | ||||
1900 | if (!updateToLocation(Loc)) | |||
1901 | return Loc.IP; | |||
1902 | ||||
1903 | // If needed (i.e. not null), initialize `DidIt` with 0 | |||
1904 | if (DidIt) { | |||
1905 | Builder.CreateStore(Builder.getInt32(0), DidIt); | |||
1906 | } | |||
1907 | ||||
1908 | Directive OMPD = Directive::OMPD_single; | |||
1909 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
1910 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
1911 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
1912 | Value *Args[] = {Ident, ThreadId}; | |||
1913 | ||||
1914 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single); | |||
1915 | Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); | |||
1916 | ||||
1917 | Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single); | |||
1918 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); | |||
1919 | ||||
1920 | // generates the following: | |||
1921 | // if (__kmpc_single()) { | |||
1922 | // .... single region ... | |||
1923 | // __kmpc_end_single | |||
1924 | // } | |||
1925 | ||||
1926 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | |||
1927 | /*Conditional*/ true, /*hasFinalize*/ true); | |||
1928 | } | |||
1929 | ||||
1930 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical( | |||
1931 | const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, | |||
1932 | FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) { | |||
1933 | ||||
1934 | if (!updateToLocation(Loc)) | |||
1935 | return Loc.IP; | |||
1936 | ||||
1937 | Directive OMPD = Directive::OMPD_critical; | |||
1938 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
1939 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
1940 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
1941 | Value *LockVar = getOMPCriticalRegionLock(CriticalName); | |||
1942 | Value *Args[] = {Ident, ThreadId, LockVar}; | |||
1943 | ||||
1944 | SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args)); | |||
1945 | Function *RTFn = nullptr; | |||
1946 | if (HintInst) { | |||
1947 | // Add Hint to entry Args and create call | |||
1948 | EnterArgs.push_back(HintInst); | |||
1949 | RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint); | |||
1950 | } else { | |||
1951 | RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical); | |||
1952 | } | |||
1953 | Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs); | |||
1954 | ||||
1955 | Function *ExitRTLFn = | |||
1956 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical); | |||
1957 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); | |||
1958 | ||||
1959 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | |||
1960 | /*Conditional*/ false, /*hasFinalize*/ true); | |||
1961 | } | |||
1962 | ||||
1963 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion( | |||
1964 | Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, | |||
1965 | BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional, | |||
1966 | bool HasFinalize, bool IsCancellable) { | |||
1967 | ||||
1968 | if (HasFinalize) | |||
1969 | FinalizationStack.push_back({FiniCB, OMPD, IsCancellable}); | |||
1970 | ||||
1971 | // Create inlined region's entry and body blocks, in preparation | |||
1972 | // for conditional creation | |||
1973 | BasicBlock *EntryBB = Builder.GetInsertBlock(); | |||
1974 | Instruction *SplitPos = EntryBB->getTerminator(); | |||
1975 | if (!isa_and_nonnull<BranchInst>(SplitPos)) | |||
1976 | SplitPos = new UnreachableInst(Builder.getContext(), EntryBB); | |||
1977 | BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end"); | |||
1978 | BasicBlock *FiniBB = | |||
1979 | EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize"); | |||
1980 | ||||
1981 | Builder.SetInsertPoint(EntryBB->getTerminator()); | |||
1982 | emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional); | |||
1983 | ||||
1984 | // generate body | |||
1985 | BodyGenCB(/* AllocaIP */ InsertPointTy(), | |||
1986 | /* CodeGenIP */ Builder.saveIP(), *FiniBB); | |||
1987 | ||||
1988 | // If we didn't emit a branch to FiniBB during body generation, it means | |||
1989 | // FiniBB is unreachable (e.g. while(1);). stop generating all the | |||
1990 | // unreachable blocks, and remove anything we are not going to use. | |||
1991 | auto SkipEmittingRegion = FiniBB->hasNPredecessors(0); | |||
1992 | if (SkipEmittingRegion) { | |||
1993 | FiniBB->eraseFromParent(); | |||
1994 | ExitCall->eraseFromParent(); | |||
1995 | // Discard finalization if we have it. | |||
1996 | if (HasFinalize) { | |||
1997 | assert(!FinalizationStack.empty() &&((void)0) | |||
1998 | "Unexpected finalization stack state!")((void)0); | |||
1999 | FinalizationStack.pop_back(); | |||
2000 | } | |||
2001 | } else { | |||
2002 | // emit exit call and do any needed finalization. | |||
2003 | auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt()); | |||
2004 | assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&((void)0) | |||
2005 | FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&((void)0) | |||
2006 | "Unexpected control flow graph state!!")((void)0); | |||
2007 | emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize); | |||
2008 | assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&((void)0) | |||
2009 | "Unexpected Control Flow State!")((void)0); | |||
2010 | MergeBlockIntoPredecessor(FiniBB); | |||
2011 | } | |||
2012 | ||||
2013 | // If we are skipping the region of a non conditional, remove the exit | |||
2014 | // block, and clear the builder's insertion point. | |||
2015 | assert(SplitPos->getParent() == ExitBB &&((void)0) | |||
2016 | "Unexpected Insertion point location!")((void)0); | |||
2017 | if (!Conditional && SkipEmittingRegion) { | |||
2018 | ExitBB->eraseFromParent(); | |||
2019 | Builder.ClearInsertionPoint(); | |||
2020 | } else { | |||
2021 | auto merged = MergeBlockIntoPredecessor(ExitBB); | |||
2022 | BasicBlock *ExitPredBB = SplitPos->getParent(); | |||
2023 | auto InsertBB = merged ? ExitPredBB : ExitBB; | |||
2024 | if (!isa_and_nonnull<BranchInst>(SplitPos)) | |||
2025 | SplitPos->eraseFromParent(); | |||
2026 | Builder.SetInsertPoint(InsertBB); | |||
2027 | } | |||
2028 | ||||
2029 | return Builder.saveIP(); | |||
2030 | } | |||
2031 | ||||
2032 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry( | |||
2033 | Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) { | |||
2034 | // if nothing to do, Return current insertion point. | |||
2035 | if (!Conditional || !EntryCall) | |||
2036 | return Builder.saveIP(); | |||
2037 | ||||
2038 | BasicBlock *EntryBB = Builder.GetInsertBlock(); | |||
2039 | Value *CallBool = Builder.CreateIsNotNull(EntryCall); | |||
2040 | auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body"); | |||
2041 | auto *UI = new UnreachableInst(Builder.getContext(), ThenBB); | |||
2042 | ||||
2043 | // Emit thenBB and set the Builder's insertion point there for | |||
2044 | // body generation next. Place the block after the current block. | |||
2045 | Function *CurFn = EntryBB->getParent(); | |||
2046 | CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB); | |||
2047 | ||||
2048 | // Move Entry branch to end of ThenBB, and replace with conditional | |||
2049 | // branch (If-stmt) | |||
2050 | Instruction *EntryBBTI = EntryBB->getTerminator(); | |||
2051 | Builder.CreateCondBr(CallBool, ThenBB, ExitBB); | |||
2052 | EntryBBTI->removeFromParent(); | |||
2053 | Builder.SetInsertPoint(UI); | |||
2054 | Builder.Insert(EntryBBTI); | |||
2055 | UI->eraseFromParent(); | |||
2056 | Builder.SetInsertPoint(ThenBB->getTerminator()); | |||
2057 | ||||
2058 | // return an insertion point to ExitBB. | |||
2059 | return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt()); | |||
2060 | } | |||
2061 | ||||
2062 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit( | |||
2063 | omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, | |||
2064 | bool HasFinalize) { | |||
2065 | ||||
2066 | Builder.restoreIP(FinIP); | |||
2067 | ||||
2068 | // If there is finalization to do, emit it before the exit call | |||
2069 | if (HasFinalize) { | |||
2070 | assert(!FinalizationStack.empty() &&((void)0) | |||
2071 | "Unexpected finalization stack state!")((void)0); | |||
2072 | ||||
2073 | FinalizationInfo Fi = FinalizationStack.pop_back_val(); | |||
2074 | assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!")((void)0); | |||
2075 | ||||
2076 | Fi.FiniCB(FinIP); | |||
2077 | ||||
2078 | BasicBlock *FiniBB = FinIP.getBlock(); | |||
2079 | Instruction *FiniBBTI = FiniBB->getTerminator(); | |||
2080 | ||||
2081 | // set Builder IP for call creation | |||
2082 | Builder.SetInsertPoint(FiniBBTI); | |||
2083 | } | |||
2084 | ||||
2085 | if (!ExitCall) | |||
2086 | return Builder.saveIP(); | |||
2087 | ||||
2088 | // place the Exitcall as last instruction before Finalization block terminator | |||
2089 | ExitCall->removeFromParent(); | |||
2090 | Builder.Insert(ExitCall); | |||
2091 | ||||
2092 | return IRBuilder<>::InsertPoint(ExitCall->getParent(), | |||
2093 | ExitCall->getIterator()); | |||
2094 | } | |||
2095 | ||||
2096 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks( | |||
2097 | InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, | |||
2098 | llvm::IntegerType *IntPtrTy, bool BranchtoEnd) { | |||
2099 | if (!IP.isSet()) | |||
2100 | return IP; | |||
2101 | ||||
2102 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
2103 | ||||
2104 | // creates the following CFG structure | |||
2105 | // OMP_Entry : (MasterAddr != PrivateAddr)? | |||
2106 | // F T | |||
2107 | // | \ | |||
2108 | // | copin.not.master | |||
2109 | // | / | |||
2110 | // v / | |||
2111 | // copyin.not.master.end | |||
2112 | // | | |||
2113 | // v | |||
2114 | // OMP.Entry.Next | |||
2115 | ||||
2116 | BasicBlock *OMP_Entry = IP.getBlock(); | |||
2117 | Function *CurFn = OMP_Entry->getParent(); | |||
2118 | BasicBlock *CopyBegin = | |||
2119 | BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn); | |||
2120 | BasicBlock *CopyEnd = nullptr; | |||
2121 | ||||
2122 | // If entry block is terminated, split to preserve the branch to following | |||
2123 | // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is. | |||
2124 | if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) { | |||
2125 | CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(), | |||
2126 | "copyin.not.master.end"); | |||
2127 | OMP_Entry->getTerminator()->eraseFromParent(); | |||
2128 | } else { | |||
2129 | CopyEnd = | |||
2130 | BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn); | |||
2131 | } | |||
2132 | ||||
2133 | Builder.SetInsertPoint(OMP_Entry); | |||
2134 | Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy); | |||
2135 | Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy); | |||
2136 | Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr); | |||
2137 | Builder.CreateCondBr(cmp, CopyBegin, CopyEnd); | |||
2138 | ||||
2139 | Builder.SetInsertPoint(CopyBegin); | |||
2140 | if (BranchtoEnd) | |||
2141 | Builder.SetInsertPoint(Builder.CreateBr(CopyEnd)); | |||
2142 | ||||
2143 | return Builder.saveIP(); | |||
2144 | } | |||
2145 | ||||
2146 | CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc, | |||
2147 | Value *Size, Value *Allocator, | |||
2148 | std::string Name) { | |||
2149 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
2150 | Builder.restoreIP(Loc.IP); | |||
2151 | ||||
2152 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
2153 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
2154 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
2155 | Value *Args[] = {ThreadId, Size, Allocator}; | |||
2156 | ||||
2157 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc); | |||
2158 | ||||
2159 | return Builder.CreateCall(Fn, Args, Name); | |||
2160 | } | |||
2161 | ||||
2162 | CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc, | |||
2163 | Value *Addr, Value *Allocator, | |||
2164 | std::string Name) { | |||
2165 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
2166 | Builder.restoreIP(Loc.IP); | |||
2167 | ||||
2168 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
2169 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
2170 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
2171 | Value *Args[] = {ThreadId, Addr, Allocator}; | |||
2172 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free); | |||
2173 | return Builder.CreateCall(Fn, Args, Name); | |||
2174 | } | |||
2175 | ||||
2176 | CallInst *OpenMPIRBuilder::createCachedThreadPrivate( | |||
2177 | const LocationDescription &Loc, llvm::Value *Pointer, | |||
2178 | llvm::ConstantInt *Size, const llvm::Twine &Name) { | |||
2179 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
2180 | Builder.restoreIP(Loc.IP); | |||
2181 | ||||
2182 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
2183 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
2184 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
2185 | Constant *ThreadPrivateCache = | |||
2186 | getOrCreateOMPInternalVariable(Int8PtrPtr, Name); | |||
2187 | llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache}; | |||
2188 | ||||
2189 | Function *Fn = | |||
2190 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached); | |||
2191 | ||||
2192 | return Builder.CreateCall(Fn, Args); | |||
2193 | } | |||
2194 | ||||
2195 | OpenMPIRBuilder::InsertPointTy | |||
2196 | OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime) { | |||
2197 | if (!updateToLocation(Loc)) | |||
2198 | return Loc.IP; | |||
2199 | ||||
2200 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
2201 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
2202 | ConstantInt *IsSPMDVal = ConstantInt::getBool(Int32->getContext(), IsSPMD); | |||
2203 | ConstantInt *UseGenericStateMachine = | |||
2204 | ConstantInt::getBool(Int32->getContext(), !IsSPMD); | |||
2205 | ConstantInt *RequiresFullRuntimeVal = ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); | |||
2206 | ||||
2207 | Function *Fn = getOrCreateRuntimeFunctionPtr( | |||
2208 | omp::RuntimeFunction::OMPRTL___kmpc_target_init); | |||
2209 | ||||
2210 | CallInst *ThreadKind = | |||
2211 | Builder.CreateCall(Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal}); | |||
2212 | ||||
2213 | Value *ExecUserCode = Builder.CreateICmpEQ( | |||
2214 | ThreadKind, ConstantInt::get(ThreadKind->getType(), -1), "exec_user_code"); | |||
2215 | ||||
2216 | // ThreadKind = __kmpc_target_init(...) | |||
2217 | // if (ThreadKind == -1) | |||
2218 | // user_code | |||
2219 | // else | |||
2220 | // return; | |||
2221 | ||||
2222 | auto *UI = Builder.CreateUnreachable(); | |||
2223 | BasicBlock *CheckBB = UI->getParent(); | |||
2224 | BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry"); | |||
2225 | ||||
2226 | BasicBlock *WorkerExitBB = BasicBlock::Create( | |||
2227 | CheckBB->getContext(), "worker.exit", CheckBB->getParent()); | |||
2228 | Builder.SetInsertPoint(WorkerExitBB); | |||
2229 | Builder.CreateRetVoid(); | |||
2230 | ||||
2231 | auto *CheckBBTI = CheckBB->getTerminator(); | |||
2232 | Builder.SetInsertPoint(CheckBBTI); | |||
2233 | Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB); | |||
2234 | ||||
2235 | CheckBBTI->eraseFromParent(); | |||
2236 | UI->eraseFromParent(); | |||
2237 | ||||
2238 | // Continue in the "user_code" block, see diagram above and in | |||
2239 | // openmp/libomptarget/deviceRTLs/common/include/target.h . | |||
2240 | return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt()); | |||
2241 | } | |||
2242 | ||||
2243 | void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc, | |||
2244 | bool IsSPMD, bool RequiresFullRuntime) { | |||
2245 | if (!updateToLocation(Loc)) | |||
2246 | return; | |||
2247 | ||||
2248 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); | |||
2249 | Value *Ident = getOrCreateIdent(SrcLocStr); | |||
2250 | ConstantInt *IsSPMDVal = ConstantInt::getBool(Int32->getContext(), IsSPMD); | |||
2251 | ConstantInt *RequiresFullRuntimeVal = ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); | |||
2252 | ||||
2253 | Function *Fn = getOrCreateRuntimeFunctionPtr( | |||
2254 | omp::RuntimeFunction::OMPRTL___kmpc_target_deinit); | |||
2255 | ||||
2256 | Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal}); | |||
2257 | } | |||
2258 | ||||
2259 | std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts, | |||
2260 | StringRef FirstSeparator, | |||
2261 | StringRef Separator) { | |||
2262 | SmallString<128> Buffer; | |||
2263 | llvm::raw_svector_ostream OS(Buffer); | |||
2264 | StringRef Sep = FirstSeparator; | |||
2265 | for (StringRef Part : Parts) { | |||
2266 | OS << Sep << Part; | |||
2267 | Sep = Separator; | |||
2268 | } | |||
2269 | return OS.str().str(); | |||
2270 | } | |||
2271 | ||||
2272 | Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable( | |||
2273 | llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) { | |||
2274 | // TODO: Replace the twine arg with stringref to get rid of the conversion | |||
2275 | // logic. However This is taken from current implementation in clang as is. | |||
2276 | // Since this method is used in many places exclusively for OMP internal use | |||
2277 | // we will keep it as is for temporarily until we move all users to the | |||
2278 | // builder and then, if possible, fix it everywhere in one go. | |||
2279 | SmallString<256> Buffer; | |||
2280 | llvm::raw_svector_ostream Out(Buffer); | |||
2281 | Out << Name; | |||
2282 | StringRef RuntimeName = Out.str(); | |||
2283 | auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first; | |||
2284 | if (Elem.second) { | |||
2285 | assert(Elem.second->getType()->getPointerElementType() == Ty &&((void)0) | |||
2286 | "OMP internal variable has different type than requested")((void)0); | |||
2287 | } else { | |||
2288 | // TODO: investigate the appropriate linkage type used for the global | |||
2289 | // variable for possibly changing that to internal or private, or maybe | |||
2290 | // create different versions of the function for different OMP internal | |||
2291 | // variables. | |||
2292 | Elem.second = new llvm::GlobalVariable( | |||
2293 | M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage, | |||
2294 | llvm::Constant::getNullValue(Ty), Elem.first(), | |||
2295 | /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, | |||
2296 | AddressSpace); | |||
2297 | } | |||
2298 | ||||
2299 | return Elem.second; | |||
2300 | } | |||
2301 | ||||
2302 | Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) { | |||
2303 | std::string Prefix = Twine("gomp_critical_user_", CriticalName).str(); | |||
2304 | std::string Name = getNameWithSeparators({Prefix, "var"}, ".", "."); | |||
2305 | return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name); | |||
2306 | } | |||
2307 | ||||
2308 | GlobalVariable * | |||
2309 | OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, | |||
2310 | std::string VarName) { | |||
2311 | llvm::Constant *MaptypesArrayInit = | |||
2312 | llvm::ConstantDataArray::get(M.getContext(), Mappings); | |||
2313 | auto *MaptypesArrayGlobal = new llvm::GlobalVariable( | |||
2314 | M, MaptypesArrayInit->getType(), | |||
2315 | /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit, | |||
2316 | VarName); | |||
2317 | MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); | |||
2318 | return MaptypesArrayGlobal; | |||
2319 | } | |||
2320 | ||||
2321 | void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc, | |||
2322 | InsertPointTy AllocaIP, | |||
2323 | unsigned NumOperands, | |||
2324 | struct MapperAllocas &MapperAllocas) { | |||
2325 | if (!updateToLocation(Loc)) | |||
2326 | return; | |||
2327 | ||||
2328 | auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); | |||
2329 | auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); | |||
2330 | Builder.restoreIP(AllocaIP); | |||
2331 | AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI8PtrTy); | |||
2332 | AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy); | |||
2333 | AllocaInst *ArgSizes = Builder.CreateAlloca(ArrI64Ty); | |||
2334 | Builder.restoreIP(Loc.IP); | |||
2335 | MapperAllocas.ArgsBase = ArgsBase; | |||
2336 | MapperAllocas.Args = Args; | |||
2337 | MapperAllocas.ArgSizes = ArgSizes; | |||
2338 | } | |||
2339 | ||||
2340 | void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc, | |||
2341 | Function *MapperFunc, Value *SrcLocInfo, | |||
2342 | Value *MaptypesArg, Value *MapnamesArg, | |||
2343 | struct MapperAllocas &MapperAllocas, | |||
2344 | int64_t DeviceID, unsigned NumOperands) { | |||
2345 | if (!updateToLocation(Loc)) | |||
2346 | return; | |||
2347 | ||||
2348 | auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); | |||
2349 | auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); | |||
2350 | Value *ArgsBaseGEP = | |||
2351 | Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase, | |||
2352 | {Builder.getInt32(0), Builder.getInt32(0)}); | |||
2353 | Value *ArgsGEP = | |||
2354 | Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args, | |||
2355 | {Builder.getInt32(0), Builder.getInt32(0)}); | |||
2356 | Value *ArgSizesGEP = | |||
2357 | Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes, | |||
2358 | {Builder.getInt32(0), Builder.getInt32(0)}); | |||
2359 | Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo()); | |||
2360 | Builder.CreateCall(MapperFunc, | |||
2361 | {SrcLocInfo, Builder.getInt64(DeviceID), | |||
2362 | Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP, | |||
2363 | ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr}); | |||
2364 | } | |||
2365 | ||||
2366 | bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic( | |||
2367 | const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) { | |||
2368 | assert(!(AO == AtomicOrdering::NotAtomic ||((void)0) | |||
2369 | AO == llvm::AtomicOrdering::Unordered) &&((void)0) | |||
2370 | "Unexpected Atomic Ordering.")((void)0); | |||
2371 | ||||
2372 | bool Flush = false; | |||
2373 | llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic; | |||
2374 | ||||
2375 | switch (AK) { | |||
2376 | case Read: | |||
2377 | if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease || | |||
2378 | AO == AtomicOrdering::SequentiallyConsistent) { | |||
2379 | FlushAO = AtomicOrdering::Acquire; | |||
2380 | Flush = true; | |||
2381 | } | |||
2382 | break; | |||
2383 | case Write: | |||
2384 | case Update: | |||
2385 | if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease || | |||
2386 | AO == AtomicOrdering::SequentiallyConsistent) { | |||
2387 | FlushAO = AtomicOrdering::Release; | |||
2388 | Flush = true; | |||
2389 | } | |||
2390 | break; | |||
2391 | case Capture: | |||
2392 | switch (AO) { | |||
2393 | case AtomicOrdering::Acquire: | |||
2394 | FlushAO = AtomicOrdering::Acquire; | |||
2395 | Flush = true; | |||
2396 | break; | |||
2397 | case AtomicOrdering::Release: | |||
2398 | FlushAO = AtomicOrdering::Release; | |||
2399 | Flush = true; | |||
2400 | break; | |||
2401 | case AtomicOrdering::AcquireRelease: | |||
2402 | case AtomicOrdering::SequentiallyConsistent: | |||
2403 | FlushAO = AtomicOrdering::AcquireRelease; | |||
2404 | Flush = true; | |||
2405 | break; | |||
2406 | default: | |||
2407 | // do nothing - leave silently. | |||
2408 | break; | |||
2409 | } | |||
2410 | } | |||
2411 | ||||
2412 | if (Flush) { | |||
2413 | // Currently Flush RT call still doesn't take memory_ordering, so for when | |||
2414 | // that happens, this tries to do the resolution of which atomic ordering | |||
2415 | // to use with but issue the flush call | |||
2416 | // TODO: pass `FlushAO` after memory ordering support is added | |||
2417 | (void)FlushAO; | |||
2418 | emitFlush(Loc); | |||
2419 | } | |||
2420 | ||||
2421 | // for AO == AtomicOrdering::Monotonic and all other case combinations | |||
2422 | // do nothing | |||
2423 | return Flush; | |||
2424 | } | |||
2425 | ||||
2426 | OpenMPIRBuilder::InsertPointTy | |||
2427 | OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, | |||
2428 | AtomicOpValue &X, AtomicOpValue &V, | |||
2429 | AtomicOrdering AO) { | |||
2430 | if (!updateToLocation(Loc)) | |||
2431 | return Loc.IP; | |||
2432 | ||||
2433 | Type *XTy = X.Var->getType(); | |||
2434 | assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")((void)0); | |||
2435 | Type *XElemTy = XTy->getPointerElementType(); | |||
2436 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||((void)0) | |||
2437 | XElemTy->isPointerTy()) &&((void)0) | |||
2438 | "OMP atomic read expected a scalar type")((void)0); | |||
2439 | ||||
2440 | Value *XRead = nullptr; | |||
2441 | ||||
2442 | if (XElemTy->isIntegerTy()) { | |||
2443 | LoadInst *XLD = | |||
2444 | Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read"); | |||
2445 | XLD->setAtomic(AO); | |||
2446 | XRead = cast<Value>(XLD); | |||
2447 | } else { | |||
2448 | // We need to bitcast and perform atomic op as integer | |||
2449 | unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); | |||
2450 | IntegerType *IntCastTy = | |||
2451 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | |||
2452 | Value *XBCast = Builder.CreateBitCast( | |||
2453 | X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast"); | |||
2454 | LoadInst *XLoad = | |||
2455 | Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load"); | |||
2456 | XLoad->setAtomic(AO); | |||
2457 | if (XElemTy->isFloatingPointTy()) { | |||
2458 | XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast"); | |||
2459 | } else { | |||
2460 | XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast"); | |||
2461 | } | |||
2462 | } | |||
2463 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read); | |||
2464 | Builder.CreateStore(XRead, V.Var, V.IsVolatile); | |||
2465 | return Builder.saveIP(); | |||
2466 | } | |||
2467 | ||||
2468 | OpenMPIRBuilder::InsertPointTy | |||
2469 | OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc, | |||
2470 | AtomicOpValue &X, Value *Expr, | |||
2471 | AtomicOrdering AO) { | |||
2472 | if (!updateToLocation(Loc)) | |||
2473 | return Loc.IP; | |||
2474 | ||||
2475 | Type *XTy = X.Var->getType(); | |||
2476 | assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")((void)0); | |||
2477 | Type *XElemTy = XTy->getPointerElementType(); | |||
2478 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||((void)0) | |||
2479 | XElemTy->isPointerTy()) &&((void)0) | |||
2480 | "OMP atomic write expected a scalar type")((void)0); | |||
2481 | ||||
2482 | if (XElemTy->isIntegerTy()) { | |||
2483 | StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile); | |||
2484 | XSt->setAtomic(AO); | |||
2485 | } else { | |||
2486 | // We need to bitcast and perform atomic op as integers | |||
2487 | unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); | |||
2488 | IntegerType *IntCastTy = | |||
2489 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | |||
2490 | Value *XBCast = Builder.CreateBitCast( | |||
2491 | X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast"); | |||
2492 | Value *ExprCast = | |||
2493 | Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast"); | |||
2494 | StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile); | |||
2495 | XSt->setAtomic(AO); | |||
2496 | } | |||
2497 | ||||
2498 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write); | |||
2499 | return Builder.saveIP(); | |||
2500 | } | |||
2501 | ||||
2502 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate( | |||
2503 | const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, | |||
2504 | Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, | |||
2505 | AtomicUpdateCallbackTy &UpdateOp, bool IsXLHSInRHSPart) { | |||
2506 | if (!updateToLocation(Loc)) | |||
2507 | return Loc.IP; | |||
2508 | ||||
2509 | LLVM_DEBUG({do { } while (false) | |||
2510 | Type *XTy = X.Var->getType();do { } while (false) | |||
2511 | assert(XTy->isPointerTy() &&do { } while (false) | |||
2512 | "OMP Atomic expects a pointer to target memory");do { } while (false) | |||
2513 | Type *XElemTy = XTy->getPointerElementType();do { } while (false) | |||
2514 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { } while (false) | |||
2515 | XElemTy->isPointerTy()) &&do { } while (false) | |||
2516 | "OMP atomic update expected a scalar type");do { } while (false) | |||
2517 | assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { } while (false) | |||
2518 | (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) &&do { } while (false) | |||
2519 | "OpenMP atomic does not support LT or GT operations");do { } while (false) | |||
2520 | })do { } while (false); | |||
2521 | ||||
2522 | emitAtomicUpdate(AllocIP, X.Var, Expr, AO, RMWOp, UpdateOp, X.IsVolatile, | |||
2523 | IsXLHSInRHSPart); | |||
2524 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update); | |||
2525 | return Builder.saveIP(); | |||
2526 | } | |||
2527 | ||||
2528 | Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2, | |||
2529 | AtomicRMWInst::BinOp RMWOp) { | |||
2530 | switch (RMWOp) { | |||
2531 | case AtomicRMWInst::Add: | |||
2532 | return Builder.CreateAdd(Src1, Src2); | |||
2533 | case AtomicRMWInst::Sub: | |||
2534 | return Builder.CreateSub(Src1, Src2); | |||
2535 | case AtomicRMWInst::And: | |||
2536 | return Builder.CreateAnd(Src1, Src2); | |||
2537 | case AtomicRMWInst::Nand: | |||
2538 | return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2)); | |||
2539 | case AtomicRMWInst::Or: | |||
2540 | return Builder.CreateOr(Src1, Src2); | |||
2541 | case AtomicRMWInst::Xor: | |||
2542 | return Builder.CreateXor(Src1, Src2); | |||
2543 | case AtomicRMWInst::Xchg: | |||
2544 | case AtomicRMWInst::FAdd: | |||
2545 | case AtomicRMWInst::FSub: | |||
2546 | case AtomicRMWInst::BAD_BINOP: | |||
2547 | case AtomicRMWInst::Max: | |||
2548 | case AtomicRMWInst::Min: | |||
2549 | case AtomicRMWInst::UMax: | |||
2550 | case AtomicRMWInst::UMin: | |||
2551 | llvm_unreachable("Unsupported atomic update operation")__builtin_unreachable(); | |||
2552 | } | |||
2553 | llvm_unreachable("Unsupported atomic update operation")__builtin_unreachable(); | |||
2554 | } | |||
2555 | ||||
2556 | std::pair<Value *, Value *> | |||
2557 | OpenMPIRBuilder::emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr, | |||
2558 | AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, | |||
2559 | AtomicUpdateCallbackTy &UpdateOp, | |||
2560 | bool VolatileX, bool IsXLHSInRHSPart) { | |||
2561 | Type *XElemTy = X->getType()->getPointerElementType(); | |||
2562 | ||||
2563 | bool DoCmpExch = | |||
2564 | ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) || | |||
2565 | (RMWOp == AtomicRMWInst::FSub) || | |||
2566 | (RMWOp == AtomicRMWInst::Sub && !IsXLHSInRHSPart); | |||
2567 | ||||
2568 | std::pair<Value *, Value *> Res; | |||
2569 | if (XElemTy->isIntegerTy() && !DoCmpExch) { | |||
2570 | Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO); | |||
2571 | // not needed except in case of postfix captures. Generate anyway for | |||
2572 | // consistency with the else part. Will be removed with any DCE pass. | |||
2573 | Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp); | |||
2574 | } else { | |||
2575 | unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace(); | |||
2576 | IntegerType *IntCastTy = | |||
2577 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | |||
2578 | Value *XBCast = | |||
2579 | Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); | |||
2580 | LoadInst *OldVal = | |||
2581 | Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load"); | |||
2582 | OldVal->setAtomic(AO); | |||
2583 | // CurBB | |||
2584 | // | /---\ | |||
2585 | // ContBB | | |||
2586 | // | \---/ | |||
2587 | // ExitBB | |||
2588 | BasicBlock *CurBB = Builder.GetInsertBlock(); | |||
2589 | Instruction *CurBBTI = CurBB->getTerminator(); | |||
2590 | CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable(); | |||
2591 | BasicBlock *ExitBB = | |||
2592 | CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit"); | |||
2593 | BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(), | |||
2594 | X->getName() + ".atomic.cont"); | |||
2595 | ContBB->getTerminator()->eraseFromParent(); | |||
2596 | Builder.SetInsertPoint(ContBB); | |||
2597 | llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2); | |||
2598 | PHI->addIncoming(OldVal, CurBB); | |||
2599 | AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy); | |||
2600 | NewAtomicAddr->setName(X->getName() + "x.new.val"); | |||
2601 | NewAtomicAddr->moveBefore(AllocIP); | |||
2602 | IntegerType *NewAtomicCastTy = | |||
2603 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | |||
2604 | bool IsIntTy = XElemTy->isIntegerTy(); | |||
2605 | Value *NewAtomicIntAddr = | |||
2606 | (IsIntTy) | |||
2607 | ? NewAtomicAddr | |||
2608 | : Builder.CreateBitCast(NewAtomicAddr, | |||
2609 | NewAtomicCastTy->getPointerTo(Addrspace)); | |||
2610 | Value *OldExprVal = PHI; | |||
2611 | if (!IsIntTy) { | |||
2612 | if (XElemTy->isFloatingPointTy()) { | |||
2613 | OldExprVal = Builder.CreateBitCast(PHI, XElemTy, | |||
2614 | X->getName() + ".atomic.fltCast"); | |||
2615 | } else { | |||
2616 | OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy, | |||
2617 | X->getName() + ".atomic.ptrCast"); | |||
2618 | } | |||
2619 | } | |||
2620 | ||||
2621 | Value *Upd = UpdateOp(OldExprVal, Builder); | |||
2622 | Builder.CreateStore(Upd, NewAtomicAddr); | |||
2623 | LoadInst *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr); | |||
2624 | Value *XAddr = | |||
2625 | (IsIntTy) | |||
2626 | ? X | |||
2627 | : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); | |||
2628 | AtomicOrdering Failure = | |||
2629 | llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); | |||
2630 | AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg( | |||
2631 | XAddr, OldExprVal, DesiredVal, llvm::MaybeAlign(), AO, Failure); | |||
2632 | Result->setVolatile(VolatileX); | |||
2633 | Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0); | |||
2634 | Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1); | |||
2635 | PHI->addIncoming(PreviousVal, Builder.GetInsertBlock()); | |||
2636 | Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB); | |||
2637 | ||||
2638 | Res.first = OldExprVal; | |||
2639 | Res.second = Upd; | |||
2640 | ||||
2641 | // set Insertion point in exit block | |||
2642 | if (UnreachableInst *ExitTI = | |||
2643 | dyn_cast<UnreachableInst>(ExitBB->getTerminator())) { | |||
2644 | CurBBTI->eraseFromParent(); | |||
2645 | Builder.SetInsertPoint(ExitBB); | |||
2646 | } else { | |||
2647 | Builder.SetInsertPoint(ExitTI); | |||
2648 | } | |||
2649 | } | |||
2650 | ||||
2651 | return Res; | |||
2652 | } | |||
2653 | ||||
2654 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture( | |||
2655 | const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, | |||
2656 | AtomicOpValue &V, Value *Expr, AtomicOrdering AO, | |||
2657 | AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, | |||
2658 | bool UpdateExpr, bool IsPostfixUpdate, bool IsXLHSInRHSPart) { | |||
2659 | if (!updateToLocation(Loc)) | |||
2660 | return Loc.IP; | |||
2661 | ||||
2662 | LLVM_DEBUG({do { } while (false) | |||
2663 | Type *XTy = X.Var->getType();do { } while (false) | |||
2664 | assert(XTy->isPointerTy() &&do { } while (false) | |||
2665 | "OMP Atomic expects a pointer to target memory");do { } while (false) | |||
2666 | Type *XElemTy = XTy->getPointerElementType();do { } while (false) | |||
2667 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { } while (false) | |||
2668 | XElemTy->isPointerTy()) &&do { } while (false) | |||
2669 | "OMP atomic capture expected a scalar type");do { } while (false) | |||
2670 | assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { } while (false) | |||
2671 | "OpenMP atomic does not support LT or GT operations");do { } while (false) | |||
2672 | })do { } while (false); | |||
2673 | ||||
2674 | // If UpdateExpr is 'x' updated with some `expr` not based on 'x', | |||
2675 | // 'x' is simply atomically rewritten with 'expr'. | |||
2676 | AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg); | |||
2677 | std::pair<Value *, Value *> Result = | |||
2678 | emitAtomicUpdate(AllocIP, X.Var, Expr, AO, AtomicOp, UpdateOp, | |||
2679 | X.IsVolatile, IsXLHSInRHSPart); | |||
2680 | ||||
2681 | Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second); | |||
2682 | Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile); | |||
2683 | ||||
2684 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture); | |||
2685 | return Builder.saveIP(); | |||
2686 | } | |||
2687 | ||||
2688 | GlobalVariable * | |||
2689 | OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, | |||
2690 | std::string VarName) { | |||
2691 | llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get( | |||
2692 | llvm::ArrayType::get( | |||
2693 | llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()), | |||
2694 | Names); | |||
2695 | auto *MapNamesArrayGlobal = new llvm::GlobalVariable( | |||
2696 | M, MapNamesArrayInit->getType(), | |||
2697 | /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit, | |||
2698 | VarName); | |||
2699 | return MapNamesArrayGlobal; | |||
2700 | } | |||
2701 | ||||
2702 | // Create all simple and struct types exposed by the runtime and remember | |||
2703 | // the llvm::PointerTypes of them for easy access later. | |||
2704 | void OpenMPIRBuilder::initializeTypes(Module &M) { | |||
2705 | LLVMContext &Ctx = M.getContext(); | |||
2706 | StructType *T; | |||
2707 | #define OMP_TYPE(VarName, InitValue) VarName = InitValue; | |||
2708 | #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ | |||
2709 | VarName##Ty = ArrayType::get(ElemTy, ArraySize); \ | |||
2710 | VarName##PtrTy = PointerType::getUnqual(VarName##Ty); | |||
2711 | #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ | |||
2712 | VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \ | |||
2713 | VarName##Ptr = PointerType::getUnqual(VarName); | |||
2714 | #define OMP_STRUCT_TYPE(VarName, StructName, ...) \ | |||
2715 | T = StructType::getTypeByName(Ctx, StructName); \ | |||
2716 | if (!T) \ | |||
2717 | T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \ | |||
2718 | VarName = T; \ | |||
2719 | VarName##Ptr = PointerType::getUnqual(T); | |||
2720 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
2721 | } | |||
2722 | ||||
2723 | void OpenMPIRBuilder::OutlineInfo::collectBlocks( | |||
2724 | SmallPtrSetImpl<BasicBlock *> &BlockSet, | |||
2725 | SmallVectorImpl<BasicBlock *> &BlockVector) { | |||
2726 | SmallVector<BasicBlock *, 32> Worklist; | |||
2727 | BlockSet.insert(EntryBB); | |||
2728 | BlockSet.insert(ExitBB); | |||
2729 | ||||
2730 | Worklist.push_back(EntryBB); | |||
2731 | while (!Worklist.empty()) { | |||
2732 | BasicBlock *BB = Worklist.pop_back_val(); | |||
2733 | BlockVector.push_back(BB); | |||
2734 | for (BasicBlock *SuccBB : successors(BB)) | |||
2735 | if (BlockSet.insert(SuccBB).second) | |||
2736 | Worklist.push_back(SuccBB); | |||
2737 | } | |||
2738 | } | |||
2739 | ||||
2740 | void CanonicalLoopInfo::collectControlBlocks( | |||
2741 | SmallVectorImpl<BasicBlock *> &BBs) { | |||
2742 | // We only count those BBs as control block for which we do not need to | |||
2743 | // reverse the CFG, i.e. not the loop body which can contain arbitrary control | |||
2744 | // flow. For consistency, this also means we do not add the Body block, which | |||
2745 | // is just the entry to the body code. | |||
2746 | BBs.reserve(BBs.size() + 6); | |||
2747 | BBs.append({Preheader, Header, Cond, Latch, Exit, After}); | |||
2748 | } | |||
2749 | ||||
2750 | void CanonicalLoopInfo::assertOK() const { | |||
2751 | #ifndef NDEBUG1 | |||
2752 | if (!IsValid) | |||
2753 | return; | |||
2754 | ||||
2755 | // Verify standard control-flow we use for OpenMP loops. | |||
2756 | assert(Preheader)((void)0); | |||
2757 | assert(isa<BranchInst>(Preheader->getTerminator()) &&((void)0) | |||
2758 | "Preheader must terminate with unconditional branch")((void)0); | |||
2759 | assert(Preheader->getSingleSuccessor() == Header &&((void)0) | |||
2760 | "Preheader must jump to header")((void)0); | |||
2761 | ||||
2762 | assert(Header)((void)0); | |||
2763 | assert(isa<BranchInst>(Header->getTerminator()) &&((void)0) | |||
2764 | "Header must terminate with unconditional branch")((void)0); | |||
2765 | assert(Header->getSingleSuccessor() == Cond &&((void)0) | |||
2766 | "Header must jump to exiting block")((void)0); | |||
2767 | ||||
2768 | assert(Cond)((void)0); | |||
2769 | assert(Cond->getSinglePredecessor() == Header &&((void)0) | |||
2770 | "Exiting block only reachable from header")((void)0); | |||
2771 | ||||
2772 | assert(isa<BranchInst>(Cond->getTerminator()) &&((void)0) | |||
2773 | "Exiting block must terminate with conditional branch")((void)0); | |||
2774 | assert(size(successors(Cond)) == 2 &&((void)0) | |||
2775 | "Exiting block must have two successors")((void)0); | |||
2776 | assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body &&((void)0) | |||
2777 | "Exiting block's first successor jump to the body")((void)0); | |||
2778 | assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit &&((void)0) | |||
2779 | "Exiting block's second successor must exit the loop")((void)0); | |||
2780 | ||||
2781 | assert(Body)((void)0); | |||
2782 | assert(Body->getSinglePredecessor() == Cond &&((void)0) | |||
2783 | "Body only reachable from exiting block")((void)0); | |||
2784 | assert(!isa<PHINode>(Body->front()))((void)0); | |||
2785 | ||||
2786 | assert(Latch)((void)0); | |||
2787 | assert(isa<BranchInst>(Latch->getTerminator()) &&((void)0) | |||
2788 | "Latch must terminate with unconditional branch")((void)0); | |||
2789 | assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header")((void)0); | |||
2790 | // TODO: To support simple redirecting of the end of the body code that has | |||
2791 | // multiple; introduce another auxiliary basic block like preheader and after. | |||
2792 | assert(Latch->getSinglePredecessor() != nullptr)((void)0); | |||
2793 | assert(!isa<PHINode>(Latch->front()))((void)0); | |||
2794 | ||||
2795 | assert(Exit)((void)0); | |||
2796 | assert(isa<BranchInst>(Exit->getTerminator()) &&((void)0) | |||
2797 | "Exit block must terminate with unconditional branch")((void)0); | |||
2798 | assert(Exit->getSingleSuccessor() == After &&((void)0) | |||
2799 | "Exit block must jump to after block")((void)0); | |||
2800 | ||||
2801 | assert(After)((void)0); | |||
2802 | assert(After->getSinglePredecessor() == Exit &&((void)0) | |||
2803 | "After block only reachable from exit block")((void)0); | |||
2804 | assert(After->empty() || !isa<PHINode>(After->front()))((void)0); | |||
2805 | ||||
2806 | Instruction *IndVar = getIndVar(); | |||
2807 | assert(IndVar && "Canonical induction variable not found?")((void)0); | |||
2808 | assert(isa<IntegerType>(IndVar->getType()) &&((void)0) | |||
2809 | "Induction variable must be an integer")((void)0); | |||
2810 | assert(cast<PHINode>(IndVar)->getParent() == Header &&((void)0) | |||
2811 | "Induction variable must be a PHI in the loop header")((void)0); | |||
2812 | assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader)((void)0); | |||
2813 | assert(((void)0) | |||
2814 | cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero())((void)0); | |||
2815 | assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch)((void)0); | |||
2816 | ||||
2817 | auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1); | |||
2818 | assert(cast<Instruction>(NextIndVar)->getParent() == Latch)((void)0); | |||
2819 | assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add)((void)0); | |||
2820 | assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar)((void)0); | |||
2821 | assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1))((void)0) | |||
2822 | ->isOne())((void)0); | |||
2823 | ||||
2824 | Value *TripCount = getTripCount(); | |||
2825 | assert(TripCount && "Loop trip count not found?")((void)0); | |||
2826 | assert(IndVar->getType() == TripCount->getType() &&((void)0) | |||
2827 | "Trip count and induction variable must have the same type")((void)0); | |||
2828 | ||||
2829 | auto *CmpI = cast<CmpInst>(&Cond->front()); | |||
2830 | assert(CmpI->getPredicate() == CmpInst::ICMP_ULT &&((void)0) | |||
2831 | "Exit condition must be a signed less-than comparison")((void)0); | |||
2832 | assert(CmpI->getOperand(0) == IndVar &&((void)0) | |||
2833 | "Exit condition must compare the induction variable")((void)0); | |||
2834 | assert(CmpI->getOperand(1) == TripCount &&((void)0) | |||
2835 | "Exit condition must compare with the trip count")((void)0); | |||
2836 | #endif | |||
2837 | } |
1 | //===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file defines the IRBuilder class, which is used as a convenient way | |||
10 | // to create LLVM instructions with a consistent and simplified interface. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #ifndef LLVM_IR_IRBUILDER_H | |||
15 | #define LLVM_IR_IRBUILDER_H | |||
16 | ||||
17 | #include "llvm-c/Types.h" | |||
18 | #include "llvm/ADT/ArrayRef.h" | |||
19 | #include "llvm/ADT/None.h" | |||
20 | #include "llvm/ADT/STLExtras.h" | |||
21 | #include "llvm/ADT/StringRef.h" | |||
22 | #include "llvm/ADT/Twine.h" | |||
23 | #include "llvm/IR/BasicBlock.h" | |||
24 | #include "llvm/IR/Constant.h" | |||
25 | #include "llvm/IR/ConstantFolder.h" | |||
26 | #include "llvm/IR/Constants.h" | |||
27 | #include "llvm/IR/DataLayout.h" | |||
28 | #include "llvm/IR/DebugInfoMetadata.h" | |||
29 | #include "llvm/IR/DebugLoc.h" | |||
30 | #include "llvm/IR/DerivedTypes.h" | |||
31 | #include "llvm/IR/Function.h" | |||
32 | #include "llvm/IR/GlobalVariable.h" | |||
33 | #include "llvm/IR/InstrTypes.h" | |||
34 | #include "llvm/IR/Instruction.h" | |||
35 | #include "llvm/IR/Instructions.h" | |||
36 | #include "llvm/IR/IntrinsicInst.h" | |||
37 | #include "llvm/IR/LLVMContext.h" | |||
38 | #include "llvm/IR/Module.h" | |||
39 | #include "llvm/IR/Operator.h" | |||
40 | #include "llvm/IR/Type.h" | |||
41 | #include "llvm/IR/Value.h" | |||
42 | #include "llvm/IR/ValueHandle.h" | |||
43 | #include "llvm/Support/AtomicOrdering.h" | |||
44 | #include "llvm/Support/CBindingWrapping.h" | |||
45 | #include "llvm/Support/Casting.h" | |||
46 | #include <cassert> | |||
47 | #include <cstddef> | |||
48 | #include <cstdint> | |||
49 | #include <functional> | |||
50 | #include <utility> | |||
51 | ||||
52 | namespace llvm { | |||
53 | ||||
54 | class APInt; | |||
55 | class MDNode; | |||
56 | class Use; | |||
57 | ||||
58 | /// This provides the default implementation of the IRBuilder | |||
59 | /// 'InsertHelper' method that is called whenever an instruction is created by | |||
60 | /// IRBuilder and needs to be inserted. | |||
61 | /// | |||
62 | /// By default, this inserts the instruction at the insertion point. | |||
63 | class IRBuilderDefaultInserter { | |||
64 | public: | |||
65 | virtual ~IRBuilderDefaultInserter(); | |||
66 | ||||
67 | virtual void InsertHelper(Instruction *I, const Twine &Name, | |||
68 | BasicBlock *BB, | |||
69 | BasicBlock::iterator InsertPt) const { | |||
70 | if (BB) BB->getInstList().insert(InsertPt, I); | |||
71 | I->setName(Name); | |||
72 | } | |||
73 | }; | |||
74 | ||||
75 | /// Provides an 'InsertHelper' that calls a user-provided callback after | |||
76 | /// performing the default insertion. | |||
77 | class IRBuilderCallbackInserter : public IRBuilderDefaultInserter { | |||
78 | std::function<void(Instruction *)> Callback; | |||
79 | ||||
80 | public: | |||
81 | virtual ~IRBuilderCallbackInserter(); | |||
82 | ||||
83 | IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback) | |||
84 | : Callback(std::move(Callback)) {} | |||
85 | ||||
86 | void InsertHelper(Instruction *I, const Twine &Name, | |||
87 | BasicBlock *BB, | |||
88 | BasicBlock::iterator InsertPt) const override { | |||
89 | IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); | |||
90 | Callback(I); | |||
91 | } | |||
92 | }; | |||
93 | ||||
94 | /// Common base class shared among various IRBuilders. | |||
95 | class IRBuilderBase { | |||
96 | /// Pairs of (metadata kind, MDNode *) that should be added to all newly | |||
97 | /// created instructions, like !dbg metadata. | |||
98 | SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy; | |||
99 | ||||
100 | /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not | |||
101 | /// null. If \p MD is null, remove the entry with \p Kind. | |||
102 | void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) { | |||
103 | if (!MD) { | |||
104 | erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) { | |||
105 | return KV.first == Kind; | |||
106 | }); | |||
107 | return; | |||
108 | } | |||
109 | ||||
110 | for (auto &KV : MetadataToCopy) | |||
111 | if (KV.first == Kind) { | |||
112 | KV.second = MD; | |||
113 | return; | |||
114 | } | |||
115 | ||||
116 | MetadataToCopy.emplace_back(Kind, MD); | |||
117 | } | |||
118 | ||||
119 | protected: | |||
120 | BasicBlock *BB; | |||
121 | BasicBlock::iterator InsertPt; | |||
122 | LLVMContext &Context; | |||
123 | const IRBuilderFolder &Folder; | |||
124 | const IRBuilderDefaultInserter &Inserter; | |||
125 | ||||
126 | MDNode *DefaultFPMathTag; | |||
127 | FastMathFlags FMF; | |||
128 | ||||
129 | bool IsFPConstrained; | |||
130 | fp::ExceptionBehavior DefaultConstrainedExcept; | |||
131 | RoundingMode DefaultConstrainedRounding; | |||
132 | ||||
133 | ArrayRef<OperandBundleDef> DefaultOperandBundles; | |||
134 | ||||
135 | public: | |||
136 | IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder, | |||
137 | const IRBuilderDefaultInserter &Inserter, | |||
138 | MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles) | |||
139 | : Context(context), Folder(Folder), Inserter(Inserter), | |||
140 | DefaultFPMathTag(FPMathTag), IsFPConstrained(false), | |||
141 | DefaultConstrainedExcept(fp::ebStrict), | |||
142 | DefaultConstrainedRounding(RoundingMode::Dynamic), | |||
143 | DefaultOperandBundles(OpBundles) { | |||
144 | ClearInsertionPoint(); | |||
145 | } | |||
146 | ||||
147 | /// Insert and return the specified instruction. | |||
148 | template<typename InstTy> | |||
149 | InstTy *Insert(InstTy *I, const Twine &Name = "") const { | |||
150 | Inserter.InsertHelper(I, Name, BB, InsertPt); | |||
151 | AddMetadataToInst(I); | |||
152 | return I; | |||
153 | } | |||
154 | ||||
155 | /// No-op overload to handle constants. | |||
156 | Constant *Insert(Constant *C, const Twine& = "") const { | |||
157 | return C; | |||
158 | } | |||
159 | ||||
160 | Value *Insert(Value *V, const Twine &Name = "") const { | |||
161 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
162 | return Insert(I, Name); | |||
163 | assert(isa<Constant>(V))((void)0); | |||
164 | return V; | |||
165 | } | |||
166 | ||||
167 | //===--------------------------------------------------------------------===// | |||
168 | // Builder configuration methods | |||
169 | //===--------------------------------------------------------------------===// | |||
170 | ||||
171 | /// Clear the insertion point: created instructions will not be | |||
172 | /// inserted into a block. | |||
173 | void ClearInsertionPoint() { | |||
174 | BB = nullptr; | |||
175 | InsertPt = BasicBlock::iterator(); | |||
176 | } | |||
177 | ||||
178 | BasicBlock *GetInsertBlock() const { return BB; } | |||
179 | BasicBlock::iterator GetInsertPoint() const { return InsertPt; } | |||
180 | LLVMContext &getContext() const { return Context; } | |||
181 | ||||
182 | /// This specifies that created instructions should be appended to the | |||
183 | /// end of the specified block. | |||
184 | void SetInsertPoint(BasicBlock *TheBB) { | |||
185 | BB = TheBB; | |||
186 | InsertPt = BB->end(); | |||
187 | } | |||
188 | ||||
189 | /// This specifies that created instructions should be inserted before | |||
190 | /// the specified instruction. | |||
191 | void SetInsertPoint(Instruction *I) { | |||
192 | BB = I->getParent(); | |||
193 | InsertPt = I->getIterator(); | |||
194 | assert(InsertPt != BB->end() && "Can't read debug loc from end()")((void)0); | |||
195 | SetCurrentDebugLocation(I->getDebugLoc()); | |||
196 | } | |||
197 | ||||
198 | /// This specifies that created instructions should be inserted at the | |||
199 | /// specified point. | |||
200 | void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) { | |||
201 | BB = TheBB; | |||
202 | InsertPt = IP; | |||
203 | if (IP != TheBB->end()) | |||
204 | SetCurrentDebugLocation(IP->getDebugLoc()); | |||
205 | } | |||
206 | ||||
207 | /// Set location information used by debugging information. | |||
208 | void SetCurrentDebugLocation(DebugLoc L) { | |||
209 | AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode()); | |||
210 | } | |||
211 | ||||
212 | /// Collect metadata with IDs \p MetadataKinds from \p Src which should be | |||
213 | /// added to all created instructions. Entries present in MedataDataToCopy but | |||
214 | /// not on \p Src will be dropped from MetadataToCopy. | |||
215 | void CollectMetadataToCopy(Instruction *Src, | |||
216 | ArrayRef<unsigned> MetadataKinds) { | |||
217 | for (unsigned K : MetadataKinds) | |||
218 | AddOrRemoveMetadataToCopy(K, Src->getMetadata(K)); | |||
219 | } | |||
220 | ||||
221 | /// Get location information used by debugging information. | |||
222 | DebugLoc getCurrentDebugLocation() const { | |||
223 | for (auto &KV : MetadataToCopy) | |||
224 | if (KV.first == LLVMContext::MD_dbg) | |||
225 | return {cast<DILocation>(KV.second)}; | |||
226 | ||||
227 | return {}; | |||
228 | } | |||
229 | ||||
230 | /// If this builder has a current debug location, set it on the | |||
231 | /// specified instruction. | |||
232 | void SetInstDebugLocation(Instruction *I) const { | |||
233 | for (const auto &KV : MetadataToCopy) | |||
234 | if (KV.first == LLVMContext::MD_dbg) { | |||
235 | I->setDebugLoc(DebugLoc(KV.second)); | |||
236 | return; | |||
237 | } | |||
238 | } | |||
239 | ||||
240 | /// Add all entries in MetadataToCopy to \p I. | |||
241 | void AddMetadataToInst(Instruction *I) const { | |||
242 | for (auto &KV : MetadataToCopy) | |||
243 | I->setMetadata(KV.first, KV.second); | |||
244 | } | |||
245 | ||||
246 | /// Get the return type of the current function that we're emitting | |||
247 | /// into. | |||
248 | Type *getCurrentFunctionReturnType() const; | |||
249 | ||||
250 | /// InsertPoint - A saved insertion point. | |||
251 | class InsertPoint { | |||
252 | BasicBlock *Block = nullptr; | |||
253 | BasicBlock::iterator Point; | |||
254 | ||||
255 | public: | |||
256 | /// Creates a new insertion point which doesn't point to anything. | |||
257 | InsertPoint() = default; | |||
258 | ||||
259 | /// Creates a new insertion point at the given location. | |||
260 | InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint) | |||
261 | : Block(InsertBlock), Point(InsertPoint) {} | |||
262 | ||||
263 | /// Returns true if this insert point is set. | |||
264 | bool isSet() const { return (Block != nullptr); } | |||
265 | ||||
266 | BasicBlock *getBlock() const { return Block; } | |||
267 | BasicBlock::iterator getPoint() const { return Point; } | |||
268 | }; | |||
269 | ||||
270 | /// Returns the current insert point. | |||
271 | InsertPoint saveIP() const { | |||
272 | return InsertPoint(GetInsertBlock(), GetInsertPoint()); | |||
273 | } | |||
274 | ||||
275 | /// Returns the current insert point, clearing it in the process. | |||
276 | InsertPoint saveAndClearIP() { | |||
277 | InsertPoint IP(GetInsertBlock(), GetInsertPoint()); | |||
278 | ClearInsertionPoint(); | |||
279 | return IP; | |||
280 | } | |||
281 | ||||
282 | /// Sets the current insert point to a previously-saved location. | |||
283 | void restoreIP(InsertPoint IP) { | |||
284 | if (IP.isSet()) | |||
285 | SetInsertPoint(IP.getBlock(), IP.getPoint()); | |||
286 | else | |||
287 | ClearInsertionPoint(); | |||
288 | } | |||
289 | ||||
290 | /// Get the floating point math metadata being used. | |||
291 | MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; } | |||
292 | ||||
293 | /// Get the flags to be applied to created floating point ops | |||
294 | FastMathFlags getFastMathFlags() const { return FMF; } | |||
295 | ||||
296 | FastMathFlags &getFastMathFlags() { return FMF; } | |||
297 | ||||
298 | /// Clear the fast-math flags. | |||
299 | void clearFastMathFlags() { FMF.clear(); } | |||
300 | ||||
301 | /// Set the floating point math metadata to be used. | |||
302 | void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; } | |||
303 | ||||
304 | /// Set the fast-math flags to be used with generated fp-math operators | |||
305 | void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; } | |||
306 | ||||
307 | /// Enable/Disable use of constrained floating point math. When | |||
308 | /// enabled the CreateF<op>() calls instead create constrained | |||
309 | /// floating point intrinsic calls. Fast math flags are unaffected | |||
310 | /// by this setting. | |||
311 | void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; } | |||
312 | ||||
313 | /// Query for the use of constrained floating point math | |||
314 | bool getIsFPConstrained() { return IsFPConstrained; } | |||
315 | ||||
316 | /// Set the exception handling to be used with constrained floating point | |||
317 | void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) { | |||
318 | #ifndef NDEBUG1 | |||
319 | Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept); | |||
320 | assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((void)0); | |||
321 | #endif | |||
322 | DefaultConstrainedExcept = NewExcept; | |||
323 | } | |||
324 | ||||
325 | /// Set the rounding mode handling to be used with constrained floating point | |||
326 | void setDefaultConstrainedRounding(RoundingMode NewRounding) { | |||
327 | #ifndef NDEBUG1 | |||
328 | Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding); | |||
329 | assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((void)0); | |||
330 | #endif | |||
331 | DefaultConstrainedRounding = NewRounding; | |||
332 | } | |||
333 | ||||
334 | /// Get the exception handling used with constrained floating point | |||
335 | fp::ExceptionBehavior getDefaultConstrainedExcept() { | |||
336 | return DefaultConstrainedExcept; | |||
337 | } | |||
338 | ||||
339 | /// Get the rounding mode handling used with constrained floating point | |||
340 | RoundingMode getDefaultConstrainedRounding() { | |||
341 | return DefaultConstrainedRounding; | |||
342 | } | |||
343 | ||||
344 | void setConstrainedFPFunctionAttr() { | |||
345 | assert(BB && "Must have a basic block to set any function attributes!")((void)0); | |||
346 | ||||
347 | Function *F = BB->getParent(); | |||
348 | if (!F->hasFnAttribute(Attribute::StrictFP)) { | |||
349 | F->addFnAttr(Attribute::StrictFP); | |||
350 | } | |||
351 | } | |||
352 | ||||
353 | void setConstrainedFPCallAttr(CallBase *I) { | |||
354 | I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP); | |||
355 | } | |||
356 | ||||
357 | void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) { | |||
358 | DefaultOperandBundles = OpBundles; | |||
359 | } | |||
360 | ||||
361 | //===--------------------------------------------------------------------===// | |||
362 | // RAII helpers. | |||
363 | //===--------------------------------------------------------------------===// | |||
364 | ||||
365 | // RAII object that stores the current insertion point and restores it | |||
366 | // when the object is destroyed. This includes the debug location. | |||
367 | class InsertPointGuard { | |||
368 | IRBuilderBase &Builder; | |||
369 | AssertingVH<BasicBlock> Block; | |||
370 | BasicBlock::iterator Point; | |||
371 | DebugLoc DbgLoc; | |||
372 | ||||
373 | public: | |||
374 | InsertPointGuard(IRBuilderBase &B) | |||
375 | : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()), | |||
376 | DbgLoc(B.getCurrentDebugLocation()) {} | |||
377 | ||||
378 | InsertPointGuard(const InsertPointGuard &) = delete; | |||
379 | InsertPointGuard &operator=(const InsertPointGuard &) = delete; | |||
380 | ||||
381 | ~InsertPointGuard() { | |||
382 | Builder.restoreIP(InsertPoint(Block, Point)); | |||
383 | Builder.SetCurrentDebugLocation(DbgLoc); | |||
384 | } | |||
385 | }; | |||
386 | ||||
387 | // RAII object that stores the current fast math settings and restores | |||
388 | // them when the object is destroyed. | |||
389 | class FastMathFlagGuard { | |||
390 | IRBuilderBase &Builder; | |||
391 | FastMathFlags FMF; | |||
392 | MDNode *FPMathTag; | |||
393 | bool IsFPConstrained; | |||
394 | fp::ExceptionBehavior DefaultConstrainedExcept; | |||
395 | RoundingMode DefaultConstrainedRounding; | |||
396 | ||||
397 | public: | |||
398 | FastMathFlagGuard(IRBuilderBase &B) | |||
399 | : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag), | |||
400 | IsFPConstrained(B.IsFPConstrained), | |||
401 | DefaultConstrainedExcept(B.DefaultConstrainedExcept), | |||
402 | DefaultConstrainedRounding(B.DefaultConstrainedRounding) {} | |||
403 | ||||
404 | FastMathFlagGuard(const FastMathFlagGuard &) = delete; | |||
405 | FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete; | |||
406 | ||||
407 | ~FastMathFlagGuard() { | |||
408 | Builder.FMF = FMF; | |||
409 | Builder.DefaultFPMathTag = FPMathTag; | |||
410 | Builder.IsFPConstrained = IsFPConstrained; | |||
411 | Builder.DefaultConstrainedExcept = DefaultConstrainedExcept; | |||
412 | Builder.DefaultConstrainedRounding = DefaultConstrainedRounding; | |||
413 | } | |||
414 | }; | |||
415 | ||||
416 | // RAII object that stores the current default operand bundles and restores | |||
417 | // them when the object is destroyed. | |||
418 | class OperandBundlesGuard { | |||
419 | IRBuilderBase &Builder; | |||
420 | ArrayRef<OperandBundleDef> DefaultOperandBundles; | |||
421 | ||||
422 | public: | |||
423 | OperandBundlesGuard(IRBuilderBase &B) | |||
424 | : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {} | |||
425 | ||||
426 | OperandBundlesGuard(const OperandBundlesGuard &) = delete; | |||
427 | OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete; | |||
428 | ||||
429 | ~OperandBundlesGuard() { | |||
430 | Builder.DefaultOperandBundles = DefaultOperandBundles; | |||
431 | } | |||
432 | }; | |||
433 | ||||
434 | ||||
435 | //===--------------------------------------------------------------------===// | |||
436 | // Miscellaneous creation methods. | |||
437 | //===--------------------------------------------------------------------===// | |||
438 | ||||
439 | /// Make a new global variable with initializer type i8* | |||
440 | /// | |||
441 | /// Make a new global variable with an initializer that has array of i8 type | |||
442 | /// filled in with the null terminated string value specified. The new global | |||
443 | /// variable will be marked mergable with any others of the same contents. If | |||
444 | /// Name is specified, it is the name of the global variable created. | |||
445 | /// | |||
446 | /// If no module is given via \p M, it is take from the insertion point basic | |||
447 | /// block. | |||
448 | GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "", | |||
449 | unsigned AddressSpace = 0, | |||
450 | Module *M = nullptr); | |||
451 | ||||
452 | /// Get a constant value representing either true or false. | |||
453 | ConstantInt *getInt1(bool V) { | |||
454 | return ConstantInt::get(getInt1Ty(), V); | |||
455 | } | |||
456 | ||||
457 | /// Get the constant value for i1 true. | |||
458 | ConstantInt *getTrue() { | |||
459 | return ConstantInt::getTrue(Context); | |||
460 | } | |||
461 | ||||
462 | /// Get the constant value for i1 false. | |||
463 | ConstantInt *getFalse() { | |||
464 | return ConstantInt::getFalse(Context); | |||
465 | } | |||
466 | ||||
467 | /// Get a constant 8-bit value. | |||
468 | ConstantInt *getInt8(uint8_t C) { | |||
469 | return ConstantInt::get(getInt8Ty(), C); | |||
470 | } | |||
471 | ||||
472 | /// Get a constant 16-bit value. | |||
473 | ConstantInt *getInt16(uint16_t C) { | |||
474 | return ConstantInt::get(getInt16Ty(), C); | |||
475 | } | |||
476 | ||||
477 | /// Get a constant 32-bit value. | |||
478 | ConstantInt *getInt32(uint32_t C) { | |||
479 | return ConstantInt::get(getInt32Ty(), C); | |||
480 | } | |||
481 | ||||
482 | /// Get a constant 64-bit value. | |||
483 | ConstantInt *getInt64(uint64_t C) { | |||
484 | return ConstantInt::get(getInt64Ty(), C); | |||
485 | } | |||
486 | ||||
487 | /// Get a constant N-bit value, zero extended or truncated from | |||
488 | /// a 64-bit value. | |||
489 | ConstantInt *getIntN(unsigned N, uint64_t C) { | |||
490 | return ConstantInt::get(getIntNTy(N), C); | |||
491 | } | |||
492 | ||||
493 | /// Get a constant integer value. | |||
494 | ConstantInt *getInt(const APInt &AI) { | |||
495 | return ConstantInt::get(Context, AI); | |||
496 | } | |||
497 | ||||
498 | //===--------------------------------------------------------------------===// | |||
499 | // Type creation methods | |||
500 | //===--------------------------------------------------------------------===// | |||
501 | ||||
502 | /// Fetch the type representing a single bit | |||
503 | IntegerType *getInt1Ty() { | |||
504 | return Type::getInt1Ty(Context); | |||
505 | } | |||
506 | ||||
507 | /// Fetch the type representing an 8-bit integer. | |||
508 | IntegerType *getInt8Ty() { | |||
509 | return Type::getInt8Ty(Context); | |||
510 | } | |||
511 | ||||
512 | /// Fetch the type representing a 16-bit integer. | |||
513 | IntegerType *getInt16Ty() { | |||
514 | return Type::getInt16Ty(Context); | |||
515 | } | |||
516 | ||||
517 | /// Fetch the type representing a 32-bit integer. | |||
518 | IntegerType *getInt32Ty() { | |||
519 | return Type::getInt32Ty(Context); | |||
520 | } | |||
521 | ||||
522 | /// Fetch the type representing a 64-bit integer. | |||
523 | IntegerType *getInt64Ty() { | |||
524 | return Type::getInt64Ty(Context); | |||
525 | } | |||
526 | ||||
527 | /// Fetch the type representing a 128-bit integer. | |||
528 | IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); } | |||
529 | ||||
530 | /// Fetch the type representing an N-bit integer. | |||
531 | IntegerType *getIntNTy(unsigned N) { | |||
532 | return Type::getIntNTy(Context, N); | |||
533 | } | |||
534 | ||||
535 | /// Fetch the type representing a 16-bit floating point value. | |||
536 | Type *getHalfTy() { | |||
537 | return Type::getHalfTy(Context); | |||
538 | } | |||
539 | ||||
540 | /// Fetch the type representing a 16-bit brain floating point value. | |||
541 | Type *getBFloatTy() { | |||
542 | return Type::getBFloatTy(Context); | |||
543 | } | |||
544 | ||||
545 | /// Fetch the type representing a 32-bit floating point value. | |||
546 | Type *getFloatTy() { | |||
547 | return Type::getFloatTy(Context); | |||
548 | } | |||
549 | ||||
550 | /// Fetch the type representing a 64-bit floating point value. | |||
551 | Type *getDoubleTy() { | |||
552 | return Type::getDoubleTy(Context); | |||
553 | } | |||
554 | ||||
555 | /// Fetch the type representing void. | |||
556 | Type *getVoidTy() { | |||
557 | return Type::getVoidTy(Context); | |||
558 | } | |||
559 | ||||
560 | /// Fetch the type representing a pointer to an 8-bit integer value. | |||
561 | PointerType *getInt8PtrTy(unsigned AddrSpace = 0) { | |||
562 | return Type::getInt8PtrTy(Context, AddrSpace); | |||
563 | } | |||
564 | ||||
565 | /// Fetch the type representing a pointer to an integer value. | |||
566 | IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) { | |||
567 | return DL.getIntPtrType(Context, AddrSpace); | |||
568 | } | |||
569 | ||||
570 | //===--------------------------------------------------------------------===// | |||
571 | // Intrinsic creation methods | |||
572 | //===--------------------------------------------------------------------===// | |||
573 | ||||
574 | /// Create and insert a memset to the specified pointer and the | |||
575 | /// specified value. | |||
576 | /// | |||
577 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is | |||
578 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
579 | /// and noalias tags. | |||
580 | CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, | |||
581 | MaybeAlign Align, bool isVolatile = false, | |||
582 | MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, | |||
583 | MDNode *NoAliasTag = nullptr) { | |||
584 | return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, | |||
585 | TBAATag, ScopeTag, NoAliasTag); | |||
586 | } | |||
587 | ||||
588 | CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align, | |||
589 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
590 | MDNode *ScopeTag = nullptr, | |||
591 | MDNode *NoAliasTag = nullptr); | |||
592 | ||||
593 | /// Create and insert an element unordered-atomic memset of the region of | |||
594 | /// memory starting at the given pointer to the given value. | |||
595 | /// | |||
596 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is | |||
597 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
598 | /// and noalias tags. | |||
599 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, | |||
600 | uint64_t Size, Align Alignment, | |||
601 | uint32_t ElementSize, | |||
602 | MDNode *TBAATag = nullptr, | |||
603 | MDNode *ScopeTag = nullptr, | |||
604 | MDNode *NoAliasTag = nullptr) { | |||
605 | return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size), | |||
606 | Align(Alignment), ElementSize, | |||
607 | TBAATag, ScopeTag, NoAliasTag); | |||
608 | } | |||
609 | ||||
610 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, | |||
611 | Value *Size, Align Alignment, | |||
612 | uint32_t ElementSize, | |||
613 | MDNode *TBAATag = nullptr, | |||
614 | MDNode *ScopeTag = nullptr, | |||
615 | MDNode *NoAliasTag = nullptr); | |||
616 | ||||
617 | /// Create and insert a memcpy between the specified pointers. | |||
618 | /// | |||
619 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is | |||
620 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
621 | /// and noalias tags. | |||
622 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
623 | MaybeAlign SrcAlign, uint64_t Size, | |||
624 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
625 | MDNode *TBAAStructTag = nullptr, | |||
626 | MDNode *ScopeTag = nullptr, | |||
627 | MDNode *NoAliasTag = nullptr) { | |||
628 | return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size), | |||
629 | isVolatile, TBAATag, TBAAStructTag, ScopeTag, | |||
630 | NoAliasTag); | |||
631 | } | |||
632 | ||||
633 | CallInst *CreateMemTransferInst( | |||
634 | Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
635 | MaybeAlign SrcAlign, Value *Size, bool isVolatile = false, | |||
636 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, | |||
637 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); | |||
638 | ||||
639 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
640 | MaybeAlign SrcAlign, Value *Size, | |||
641 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
642 | MDNode *TBAAStructTag = nullptr, | |||
643 | MDNode *ScopeTag = nullptr, | |||
644 | MDNode *NoAliasTag = nullptr) { | |||
645 | return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src, | |||
646 | SrcAlign, Size, isVolatile, TBAATag, | |||
647 | TBAAStructTag, ScopeTag, NoAliasTag); | |||
648 | } | |||
649 | ||||
650 | CallInst * | |||
651 | CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
652 | MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false, | |||
653 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, | |||
654 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); | |||
655 | ||||
656 | /// Create and insert an element unordered-atomic memcpy between the | |||
657 | /// specified pointers. | |||
658 | /// | |||
659 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively. | |||
660 | /// | |||
661 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is | |||
662 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
663 | /// and noalias tags. | |||
664 | CallInst *CreateElementUnorderedAtomicMemCpy( | |||
665 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, | |||
666 | uint32_t ElementSize, MDNode *TBAATag = nullptr, | |||
667 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, | |||
668 | MDNode *NoAliasTag = nullptr); | |||
669 | ||||
670 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
671 | MaybeAlign SrcAlign, uint64_t Size, | |||
672 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
673 | MDNode *ScopeTag = nullptr, | |||
674 | MDNode *NoAliasTag = nullptr) { | |||
675 | return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size), | |||
676 | isVolatile, TBAATag, ScopeTag, NoAliasTag); | |||
677 | } | |||
678 | ||||
679 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
680 | MaybeAlign SrcAlign, Value *Size, | |||
681 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
682 | MDNode *ScopeTag = nullptr, | |||
683 | MDNode *NoAliasTag = nullptr); | |||
684 | ||||
685 | /// \brief Create and insert an element unordered-atomic memmove between the | |||
686 | /// specified pointers. | |||
687 | /// | |||
688 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, | |||
689 | /// respectively. | |||
690 | /// | |||
691 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is | |||
692 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
693 | /// and noalias tags. | |||
694 | CallInst *CreateElementUnorderedAtomicMemMove( | |||
695 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, | |||
696 | uint32_t ElementSize, MDNode *TBAATag = nullptr, | |||
697 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, | |||
698 | MDNode *NoAliasTag = nullptr); | |||
699 | ||||
700 | /// Create a vector fadd reduction intrinsic of the source vector. | |||
701 | /// The first parameter is a scalar accumulator value for ordered reductions. | |||
702 | CallInst *CreateFAddReduce(Value *Acc, Value *Src); | |||
703 | ||||
704 | /// Create a vector fmul reduction intrinsic of the source vector. | |||
705 | /// The first parameter is a scalar accumulator value for ordered reductions. | |||
706 | CallInst *CreateFMulReduce(Value *Acc, Value *Src); | |||
707 | ||||
708 | /// Create a vector int add reduction intrinsic of the source vector. | |||
709 | CallInst *CreateAddReduce(Value *Src); | |||
710 | ||||
711 | /// Create a vector int mul reduction intrinsic of the source vector. | |||
712 | CallInst *CreateMulReduce(Value *Src); | |||
713 | ||||
714 | /// Create a vector int AND reduction intrinsic of the source vector. | |||
715 | CallInst *CreateAndReduce(Value *Src); | |||
716 | ||||
717 | /// Create a vector int OR reduction intrinsic of the source vector. | |||
718 | CallInst *CreateOrReduce(Value *Src); | |||
719 | ||||
720 | /// Create a vector int XOR reduction intrinsic of the source vector. | |||
721 | CallInst *CreateXorReduce(Value *Src); | |||
722 | ||||
723 | /// Create a vector integer max reduction intrinsic of the source | |||
724 | /// vector. | |||
725 | CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false); | |||
726 | ||||
727 | /// Create a vector integer min reduction intrinsic of the source | |||
728 | /// vector. | |||
729 | CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false); | |||
730 | ||||
731 | /// Create a vector float max reduction intrinsic of the source | |||
732 | /// vector. | |||
733 | CallInst *CreateFPMaxReduce(Value *Src); | |||
734 | ||||
735 | /// Create a vector float min reduction intrinsic of the source | |||
736 | /// vector. | |||
737 | CallInst *CreateFPMinReduce(Value *Src); | |||
738 | ||||
739 | /// Create a lifetime.start intrinsic. | |||
740 | /// | |||
741 | /// If the pointer isn't i8* it will be converted. | |||
742 | CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr); | |||
743 | ||||
744 | /// Create a lifetime.end intrinsic. | |||
745 | /// | |||
746 | /// If the pointer isn't i8* it will be converted. | |||
747 | CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr); | |||
748 | ||||
749 | /// Create a call to invariant.start intrinsic. | |||
750 | /// | |||
751 | /// If the pointer isn't i8* it will be converted. | |||
752 | CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr); | |||
753 | ||||
754 | /// Create a call to Masked Load intrinsic | |||
755 | CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, | |||
756 | Value *PassThru = nullptr, const Twine &Name = ""); | |||
757 | ||||
758 | /// Create a call to Masked Store intrinsic | |||
759 | CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, | |||
760 | Value *Mask); | |||
761 | ||||
762 | /// Create a call to Masked Gather intrinsic | |||
763 | CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, | |||
764 | Value *Mask = nullptr, Value *PassThru = nullptr, | |||
765 | const Twine &Name = ""); | |||
766 | ||||
767 | /// Create a call to Masked Scatter intrinsic | |||
768 | CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, | |||
769 | Value *Mask = nullptr); | |||
770 | ||||
771 | /// Create an assume intrinsic call that allows the optimizer to | |||
772 | /// assume that the provided condition will be true. | |||
773 | /// | |||
774 | /// The optional argument \p OpBundles specifies operand bundles that are | |||
775 | /// added to the call instruction. | |||
776 | CallInst *CreateAssumption(Value *Cond, | |||
777 | ArrayRef<OperandBundleDef> OpBundles = llvm::None); | |||
778 | ||||
779 | /// Create a llvm.experimental.noalias.scope.decl intrinsic call. | |||
780 | Instruction *CreateNoAliasScopeDeclaration(Value *Scope); | |||
781 | Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) { | |||
782 | return CreateNoAliasScopeDeclaration( | |||
783 | MetadataAsValue::get(Context, ScopeTag)); | |||
784 | } | |||
785 | ||||
786 | /// Create a call to the experimental.gc.statepoint intrinsic to | |||
787 | /// start a new statepoint sequence. | |||
788 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, | |||
789 | Value *ActualCallee, | |||
790 | ArrayRef<Value *> CallArgs, | |||
791 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
792 | ArrayRef<Value *> GCArgs, | |||
793 | const Twine &Name = ""); | |||
794 | ||||
795 | /// Create a call to the experimental.gc.statepoint intrinsic to | |||
796 | /// start a new statepoint sequence. | |||
797 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, | |||
798 | Value *ActualCallee, uint32_t Flags, | |||
799 | ArrayRef<Value *> CallArgs, | |||
800 | Optional<ArrayRef<Use>> TransitionArgs, | |||
801 | Optional<ArrayRef<Use>> DeoptArgs, | |||
802 | ArrayRef<Value *> GCArgs, | |||
803 | const Twine &Name = ""); | |||
804 | ||||
805 | /// Conveninence function for the common case when CallArgs are filled | |||
806 | /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be | |||
807 | /// .get()'ed to get the Value pointer. | |||
808 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, | |||
809 | Value *ActualCallee, ArrayRef<Use> CallArgs, | |||
810 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
811 | ArrayRef<Value *> GCArgs, | |||
812 | const Twine &Name = ""); | |||
813 | ||||
814 | /// Create an invoke to the experimental.gc.statepoint intrinsic to | |||
815 | /// start a new statepoint sequence. | |||
816 | InvokeInst * | |||
817 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, | |||
818 | Value *ActualInvokee, BasicBlock *NormalDest, | |||
819 | BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs, | |||
820 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
821 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); | |||
822 | ||||
823 | /// Create an invoke to the experimental.gc.statepoint intrinsic to | |||
824 | /// start a new statepoint sequence. | |||
825 | InvokeInst *CreateGCStatepointInvoke( | |||
826 | uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, | |||
827 | BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, | |||
828 | ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs, | |||
829 | Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, | |||
830 | const Twine &Name = ""); | |||
831 | ||||
832 | // Convenience function for the common case when CallArgs are filled in using | |||
833 | // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to | |||
834 | // get the Value *. | |||
835 | InvokeInst * | |||
836 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, | |||
837 | Value *ActualInvokee, BasicBlock *NormalDest, | |||
838 | BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, | |||
839 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
840 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); | |||
841 | ||||
842 | /// Create a call to the experimental.gc.result intrinsic to extract | |||
843 | /// the result from a call wrapped in a statepoint. | |||
844 | CallInst *CreateGCResult(Instruction *Statepoint, | |||
845 | Type *ResultType, | |||
846 | const Twine &Name = ""); | |||
847 | ||||
848 | /// Create a call to the experimental.gc.relocate intrinsics to | |||
849 | /// project the relocated value of one pointer from the statepoint. | |||
850 | CallInst *CreateGCRelocate(Instruction *Statepoint, | |||
851 | int BaseOffset, | |||
852 | int DerivedOffset, | |||
853 | Type *ResultType, | |||
854 | const Twine &Name = ""); | |||
855 | ||||
856 | /// Create a call to the experimental.gc.pointer.base intrinsic to get the | |||
857 | /// base pointer for the specified derived pointer. | |||
858 | CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = ""); | |||
859 | ||||
860 | /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get | |||
861 | /// the offset of the specified derived pointer from its base. | |||
862 | CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = ""); | |||
863 | ||||
864 | /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale | |||
865 | /// will be the same type as that of \p Scaling. | |||
866 | Value *CreateVScale(Constant *Scaling, const Twine &Name = ""); | |||
867 | ||||
868 | /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...> | |||
869 | Value *CreateStepVector(Type *DstType, const Twine &Name = ""); | |||
870 | ||||
871 | /// Create a call to intrinsic \p ID with 1 operand which is mangled on its | |||
872 | /// type. | |||
873 | CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, | |||
874 | Instruction *FMFSource = nullptr, | |||
875 | const Twine &Name = ""); | |||
876 | ||||
877 | /// Create a call to intrinsic \p ID with 2 operands which is mangled on the | |||
878 | /// first type. | |||
879 | CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, | |||
880 | Instruction *FMFSource = nullptr, | |||
881 | const Twine &Name = ""); | |||
882 | ||||
883 | /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If | |||
884 | /// \p FMFSource is provided, copy fast-math-flags from that instruction to | |||
885 | /// the intrinsic. | |||
886 | CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types, | |||
887 | ArrayRef<Value *> Args, | |||
888 | Instruction *FMFSource = nullptr, | |||
889 | const Twine &Name = ""); | |||
890 | ||||
891 | /// Create call to the minnum intrinsic. | |||
892 | CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
893 | return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name); | |||
894 | } | |||
895 | ||||
896 | /// Create call to the maxnum intrinsic. | |||
897 | CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
898 | return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name); | |||
899 | } | |||
900 | ||||
901 | /// Create call to the minimum intrinsic. | |||
902 | CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
903 | return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name); | |||
904 | } | |||
905 | ||||
906 | /// Create call to the maximum intrinsic. | |||
907 | CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
908 | return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name); | |||
909 | } | |||
910 | ||||
911 | /// Create a call to the arithmetic_fence intrinsic. | |||
912 | CallInst *CreateArithmeticFence(Value *Val, Type *DstType, | |||
913 | const Twine &Name = "") { | |||
914 | return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr, | |||
915 | Name); | |||
916 | } | |||
917 | ||||
918 | /// Create a call to the experimental.vector.extract intrinsic. | |||
919 | CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, | |||
920 | const Twine &Name = "") { | |||
921 | return CreateIntrinsic(Intrinsic::experimental_vector_extract, | |||
922 | {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr, | |||
923 | Name); | |||
924 | } | |||
925 | ||||
926 | /// Create a call to the experimental.vector.insert intrinsic. | |||
927 | CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, | |||
928 | Value *Idx, const Twine &Name = "") { | |||
929 | return CreateIntrinsic(Intrinsic::experimental_vector_insert, | |||
930 | {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx}, | |||
931 | nullptr, Name); | |||
932 | } | |||
933 | ||||
934 | private: | |||
935 | /// Create a call to a masked intrinsic with given Id. | |||
936 | CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops, | |||
937 | ArrayRef<Type *> OverloadedTypes, | |||
938 | const Twine &Name = ""); | |||
939 | ||||
940 | Value *getCastedInt8PtrValue(Value *Ptr); | |||
941 | ||||
942 | //===--------------------------------------------------------------------===// | |||
943 | // Instruction creation methods: Terminators | |||
944 | //===--------------------------------------------------------------------===// | |||
945 | ||||
946 | private: | |||
947 | /// Helper to add branch weight and unpredictable metadata onto an | |||
948 | /// instruction. | |||
949 | /// \returns The annotated instruction. | |||
950 | template <typename InstTy> | |||
951 | InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) { | |||
952 | if (Weights) | |||
953 | I->setMetadata(LLVMContext::MD_prof, Weights); | |||
954 | if (Unpredictable) | |||
955 | I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable); | |||
956 | return I; | |||
957 | } | |||
958 | ||||
959 | public: | |||
960 | /// Create a 'ret void' instruction. | |||
961 | ReturnInst *CreateRetVoid() { | |||
962 | return Insert(ReturnInst::Create(Context)); | |||
963 | } | |||
964 | ||||
965 | /// Create a 'ret <val>' instruction. | |||
966 | ReturnInst *CreateRet(Value *V) { | |||
967 | return Insert(ReturnInst::Create(Context, V)); | |||
968 | } | |||
969 | ||||
970 | /// Create a sequence of N insertvalue instructions, | |||
971 | /// with one Value from the retVals array each, that build a aggregate | |||
972 | /// return value one value at a time, and a ret instruction to return | |||
973 | /// the resulting aggregate value. | |||
974 | /// | |||
975 | /// This is a convenience function for code that uses aggregate return values | |||
976 | /// as a vehicle for having multiple return values. | |||
977 | ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) { | |||
978 | Value *V = UndefValue::get(getCurrentFunctionReturnType()); | |||
979 | for (unsigned i = 0; i != N; ++i) | |||
980 | V = CreateInsertValue(V, retVals[i], i, "mrv"); | |||
981 | return Insert(ReturnInst::Create(Context, V)); | |||
982 | } | |||
983 | ||||
984 | /// Create an unconditional 'br label X' instruction. | |||
985 | BranchInst *CreateBr(BasicBlock *Dest) { | |||
986 | return Insert(BranchInst::Create(Dest)); | |||
987 | } | |||
988 | ||||
989 | /// Create a conditional 'br Cond, TrueDest, FalseDest' | |||
990 | /// instruction. | |||
991 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, | |||
992 | MDNode *BranchWeights = nullptr, | |||
993 | MDNode *Unpredictable = nullptr) { | |||
994 | return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond), | |||
995 | BranchWeights, Unpredictable)); | |||
996 | } | |||
997 | ||||
998 | /// Create a conditional 'br Cond, TrueDest, FalseDest' | |||
999 | /// instruction. Copy branch meta data if available. | |||
1000 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, | |||
1001 | Instruction *MDSrc) { | |||
1002 | BranchInst *Br = BranchInst::Create(True, False, Cond); | |||
1003 | if (MDSrc) { | |||
1004 | unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable, | |||
1005 | LLVMContext::MD_make_implicit, LLVMContext::MD_dbg}; | |||
1006 | Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4)); | |||
1007 | } | |||
1008 | return Insert(Br); | |||
1009 | } | |||
1010 | ||||
1011 | /// Create a switch instruction with the specified value, default dest, | |||
1012 | /// and with a hint for the number of cases that will be added (for efficient | |||
1013 | /// allocation). | |||
1014 | SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10, | |||
1015 | MDNode *BranchWeights = nullptr, | |||
1016 | MDNode *Unpredictable = nullptr) { | |||
1017 | return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases), | |||
1018 | BranchWeights, Unpredictable)); | |||
1019 | } | |||
1020 | ||||
1021 | /// Create an indirect branch instruction with the specified address | |||
1022 | /// operand, with an optional hint for the number of destinations that will be | |||
1023 | /// added (for efficient allocation). | |||
1024 | IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) { | |||
1025 | return Insert(IndirectBrInst::Create(Addr, NumDests)); | |||
1026 | } | |||
1027 | ||||
1028 | /// Create an invoke instruction. | |||
1029 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, | |||
1030 | BasicBlock *NormalDest, BasicBlock *UnwindDest, | |||
1031 | ArrayRef<Value *> Args, | |||
1032 | ArrayRef<OperandBundleDef> OpBundles, | |||
1033 | const Twine &Name = "") { | |||
1034 | InvokeInst *II = | |||
1035 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles); | |||
1036 | if (IsFPConstrained) | |||
1037 | setConstrainedFPCallAttr(II); | |||
1038 | return Insert(II, Name); | |||
1039 | } | |||
1040 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, | |||
1041 | BasicBlock *NormalDest, BasicBlock *UnwindDest, | |||
1042 | ArrayRef<Value *> Args = None, | |||
1043 | const Twine &Name = "") { | |||
1044 | InvokeInst *II = | |||
1045 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args); | |||
1046 | if (IsFPConstrained) | |||
1047 | setConstrainedFPCallAttr(II); | |||
1048 | return Insert(II, Name); | |||
1049 | } | |||
1050 | ||||
1051 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, | |||
1052 | BasicBlock *UnwindDest, ArrayRef<Value *> Args, | |||
1053 | ArrayRef<OperandBundleDef> OpBundles, | |||
1054 | const Twine &Name = "") { | |||
1055 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), | |||
1056 | NormalDest, UnwindDest, Args, OpBundles, Name); | |||
1057 | } | |||
1058 | ||||
1059 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, | |||
1060 | BasicBlock *UnwindDest, | |||
1061 | ArrayRef<Value *> Args = None, | |||
1062 | const Twine &Name = "") { | |||
1063 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), | |||
1064 | NormalDest, UnwindDest, Args, Name); | |||
1065 | } | |||
1066 | ||||
1067 | /// \brief Create a callbr instruction. | |||
1068 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, | |||
1069 | BasicBlock *DefaultDest, | |||
1070 | ArrayRef<BasicBlock *> IndirectDests, | |||
1071 | ArrayRef<Value *> Args = None, | |||
1072 | const Twine &Name = "") { | |||
1073 | return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, | |||
1074 | Args), Name); | |||
1075 | } | |||
1076 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, | |||
1077 | BasicBlock *DefaultDest, | |||
1078 | ArrayRef<BasicBlock *> IndirectDests, | |||
1079 | ArrayRef<Value *> Args, | |||
1080 | ArrayRef<OperandBundleDef> OpBundles, | |||
1081 | const Twine &Name = "") { | |||
1082 | return Insert( | |||
1083 | CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args, | |||
1084 | OpBundles), Name); | |||
1085 | } | |||
1086 | ||||
1087 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, | |||
1088 | ArrayRef<BasicBlock *> IndirectDests, | |||
1089 | ArrayRef<Value *> Args = None, | |||
1090 | const Twine &Name = "") { | |||
1091 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), | |||
1092 | DefaultDest, IndirectDests, Args, Name); | |||
1093 | } | |||
1094 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, | |||
1095 | ArrayRef<BasicBlock *> IndirectDests, | |||
1096 | ArrayRef<Value *> Args, | |||
1097 | ArrayRef<OperandBundleDef> OpBundles, | |||
1098 | const Twine &Name = "") { | |||
1099 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), | |||
1100 | DefaultDest, IndirectDests, Args, Name); | |||
1101 | } | |||
1102 | ||||
1103 | ResumeInst *CreateResume(Value *Exn) { | |||
1104 | return Insert(ResumeInst::Create(Exn)); | |||
1105 | } | |||
1106 | ||||
1107 | CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad, | |||
1108 | BasicBlock *UnwindBB = nullptr) { | |||
1109 | return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB)); | |||
1110 | } | |||
1111 | ||||
1112 | CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB, | |||
1113 | unsigned NumHandlers, | |||
1114 | const Twine &Name = "") { | |||
1115 | return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers), | |||
1116 | Name); | |||
1117 | } | |||
1118 | ||||
1119 | CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args, | |||
1120 | const Twine &Name = "") { | |||
1121 | return Insert(CatchPadInst::Create(ParentPad, Args), Name); | |||
1122 | } | |||
1123 | ||||
1124 | CleanupPadInst *CreateCleanupPad(Value *ParentPad, | |||
1125 | ArrayRef<Value *> Args = None, | |||
1126 | const Twine &Name = "") { | |||
1127 | return Insert(CleanupPadInst::Create(ParentPad, Args), Name); | |||
1128 | } | |||
1129 | ||||
1130 | CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) { | |||
1131 | return Insert(CatchReturnInst::Create(CatchPad, BB)); | |||
1132 | } | |||
1133 | ||||
1134 | UnreachableInst *CreateUnreachable() { | |||
1135 | return Insert(new UnreachableInst(Context)); | |||
1136 | } | |||
1137 | ||||
1138 | //===--------------------------------------------------------------------===// | |||
1139 | // Instruction creation methods: Binary Operators | |||
1140 | //===--------------------------------------------------------------------===// | |||
1141 | private: | |||
1142 | BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc, | |||
1143 | Value *LHS, Value *RHS, | |||
1144 | const Twine &Name, | |||
1145 | bool HasNUW, bool HasNSW) { | |||
1146 | BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name); | |||
1147 | if (HasNUW) BO->setHasNoUnsignedWrap(); | |||
1148 | if (HasNSW) BO->setHasNoSignedWrap(); | |||
1149 | return BO; | |||
1150 | } | |||
1151 | ||||
1152 | Instruction *setFPAttrs(Instruction *I, MDNode *FPMD, | |||
1153 | FastMathFlags FMF) const { | |||
1154 | if (!FPMD) | |||
1155 | FPMD = DefaultFPMathTag; | |||
1156 | if (FPMD) | |||
1157 | I->setMetadata(LLVMContext::MD_fpmath, FPMD); | |||
1158 | I->setFastMathFlags(FMF); | |||
1159 | return I; | |||
1160 | } | |||
1161 | ||||
1162 | Value *foldConstant(Instruction::BinaryOps Opc, Value *L, | |||
1163 | Value *R, const Twine &Name) const { | |||
1164 | auto *LC = dyn_cast<Constant>(L); | |||
1165 | auto *RC = dyn_cast<Constant>(R); | |||
1166 | return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr; | |||
1167 | } | |||
1168 | ||||
1169 | Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) { | |||
1170 | RoundingMode UseRounding = DefaultConstrainedRounding; | |||
1171 | ||||
1172 | if (Rounding.hasValue()) | |||
1173 | UseRounding = Rounding.getValue(); | |||
1174 | ||||
1175 | Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding); | |||
1176 | assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((void)0); | |||
1177 | auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue()); | |||
1178 | ||||
1179 | return MetadataAsValue::get(Context, RoundingMDS); | |||
1180 | } | |||
1181 | ||||
1182 | Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) { | |||
1183 | fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept; | |||
1184 | ||||
1185 | if (Except.hasValue()) | |||
1186 | UseExcept = Except.getValue(); | |||
1187 | ||||
1188 | Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept); | |||
1189 | assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((void)0); | |||
1190 | auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue()); | |||
1191 | ||||
1192 | return MetadataAsValue::get(Context, ExceptMDS); | |||
1193 | } | |||
1194 | ||||
1195 | Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) { | |||
1196 | assert(CmpInst::isFPPredicate(Predicate) &&((void)0) | |||
1197 | Predicate != CmpInst::FCMP_FALSE &&((void)0) | |||
1198 | Predicate != CmpInst::FCMP_TRUE &&((void)0) | |||
1199 | "Invalid constrained FP comparison predicate!")((void)0); | |||
1200 | ||||
1201 | StringRef PredicateStr = CmpInst::getPredicateName(Predicate); | |||
1202 | auto *PredicateMDS = MDString::get(Context, PredicateStr); | |||
1203 | ||||
1204 | return MetadataAsValue::get(Context, PredicateMDS); | |||
1205 | } | |||
1206 | ||||
1207 | public: | |||
1208 | Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1209 | bool HasNUW = false, bool HasNSW = false) { | |||
1210 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1211 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1212 | return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name); | |||
1213 | return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name, | |||
1214 | HasNUW, HasNSW); | |||
1215 | } | |||
1216 | ||||
1217 | Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1218 | return CreateAdd(LHS, RHS, Name, false, true); | |||
1219 | } | |||
1220 | ||||
1221 | Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1222 | return CreateAdd(LHS, RHS, Name, true, false); | |||
1223 | } | |||
1224 | ||||
1225 | Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1226 | bool HasNUW = false, bool HasNSW = false) { | |||
1227 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1228 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1229 | return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name); | |||
1230 | return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name, | |||
1231 | HasNUW, HasNSW); | |||
1232 | } | |||
1233 | ||||
1234 | Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1235 | return CreateSub(LHS, RHS, Name, false, true); | |||
1236 | } | |||
1237 | ||||
1238 | Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1239 | return CreateSub(LHS, RHS, Name, true, false); | |||
1240 | } | |||
1241 | ||||
1242 | Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1243 | bool HasNUW = false, bool HasNSW = false) { | |||
1244 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1245 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1246 | return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name); | |||
1247 | return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name, | |||
1248 | HasNUW, HasNSW); | |||
1249 | } | |||
1250 | ||||
1251 | Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1252 | return CreateMul(LHS, RHS, Name, false, true); | |||
1253 | } | |||
1254 | ||||
1255 | Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1256 | return CreateMul(LHS, RHS, Name, true, false); | |||
1257 | } | |||
1258 | ||||
1259 | Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1260 | bool isExact = false) { | |||
1261 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1262 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1263 | return Insert(Folder.CreateUDiv(LC, RC, isExact), Name); | |||
1264 | if (!isExact) | |||
1265 | return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name); | |||
1266 | return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name); | |||
1267 | } | |||
1268 | ||||
1269 | Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1270 | return CreateUDiv(LHS, RHS, Name, true); | |||
1271 | } | |||
1272 | ||||
1273 | Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1274 | bool isExact = false) { | |||
1275 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1276 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1277 | return Insert(Folder.CreateSDiv(LC, RC, isExact), Name); | |||
1278 | if (!isExact) | |||
1279 | return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name); | |||
1280 | return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name); | |||
1281 | } | |||
1282 | ||||
1283 | Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1284 | return CreateSDiv(LHS, RHS, Name, true); | |||
1285 | } | |||
1286 | ||||
1287 | Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1288 | if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V; | |||
1289 | return Insert(BinaryOperator::CreateURem(LHS, RHS), Name); | |||
1290 | } | |||
1291 | ||||
1292 | Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1293 | if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V; | |||
1294 | return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name); | |||
1295 | } | |||
1296 | ||||
1297 | Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1298 | bool HasNUW = false, bool HasNSW = false) { | |||
1299 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1300 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1301 | return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name); | |||
1302 | return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name, | |||
1303 | HasNUW, HasNSW); | |||
1304 | } | |||
1305 | ||||
1306 | Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "", | |||
1307 | bool HasNUW = false, bool HasNSW = false) { | |||
1308 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, | |||
1309 | HasNUW, HasNSW); | |||
1310 | } | |||
1311 | ||||
1312 | Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "", | |||
1313 | bool HasNUW = false, bool HasNSW = false) { | |||
1314 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, | |||
1315 | HasNUW, HasNSW); | |||
1316 | } | |||
1317 | ||||
1318 | Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1319 | bool isExact = false) { | |||
1320 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1321 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1322 | return Insert(Folder.CreateLShr(LC, RC, isExact), Name); | |||
1323 | if (!isExact) | |||
1324 | return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name); | |||
1325 | return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name); | |||
1326 | } | |||
1327 | ||||
1328 | Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "", | |||
1329 | bool isExact = false) { | |||
1330 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1331 | } | |||
1332 | ||||
1333 | Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "", | |||
1334 | bool isExact = false) { | |||
1335 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1336 | } | |||
1337 | ||||
1338 | Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1339 | bool isExact = false) { | |||
1340 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1341 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1342 | return Insert(Folder.CreateAShr(LC, RC, isExact), Name); | |||
1343 | if (!isExact) | |||
1344 | return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name); | |||
1345 | return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name); | |||
1346 | } | |||
1347 | ||||
1348 | Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "", | |||
1349 | bool isExact = false) { | |||
1350 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1351 | } | |||
1352 | ||||
1353 | Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "", | |||
1354 | bool isExact = false) { | |||
1355 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1356 | } | |||
1357 | ||||
1358 | Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1359 | if (auto *RC = dyn_cast<Constant>(RHS)) { | |||
1360 | if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne()) | |||
1361 | return LHS; // LHS & -1 -> LHS | |||
1362 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1363 | return Insert(Folder.CreateAnd(LC, RC), Name); | |||
1364 | } | |||
1365 | return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name); | |||
1366 | } | |||
1367 | ||||
1368 | Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") { | |||
1369 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1370 | } | |||
1371 | ||||
1372 | Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") { | |||
1373 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1374 | } | |||
1375 | ||||
1376 | Value *CreateAnd(ArrayRef<Value*> Ops) { | |||
1377 | assert(!Ops.empty())((void)0); | |||
1378 | Value *Accum = Ops[0]; | |||
1379 | for (unsigned i = 1; i < Ops.size(); i++) | |||
1380 | Accum = CreateAnd(Accum, Ops[i]); | |||
1381 | return Accum; | |||
1382 | } | |||
1383 | ||||
1384 | Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1385 | if (auto *RC = dyn_cast<Constant>(RHS)) { | |||
1386 | if (RC->isNullValue()) | |||
1387 | return LHS; // LHS | 0 -> LHS | |||
1388 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1389 | return Insert(Folder.CreateOr(LC, RC), Name); | |||
1390 | } | |||
1391 | return Insert(BinaryOperator::CreateOr(LHS, RHS), Name); | |||
1392 | } | |||
1393 | ||||
1394 | Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") { | |||
1395 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1396 | } | |||
1397 | ||||
1398 | Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") { | |||
1399 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1400 | } | |||
1401 | ||||
1402 | Value *CreateOr(ArrayRef<Value*> Ops) { | |||
1403 | assert(!Ops.empty())((void)0); | |||
1404 | Value *Accum = Ops[0]; | |||
1405 | for (unsigned i = 1; i < Ops.size(); i++) | |||
1406 | Accum = CreateOr(Accum, Ops[i]); | |||
1407 | return Accum; | |||
1408 | } | |||
1409 | ||||
1410 | Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1411 | if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V; | |||
1412 | return Insert(BinaryOperator::CreateXor(LHS, RHS), Name); | |||
1413 | } | |||
1414 | ||||
1415 | Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") { | |||
1416 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1417 | } | |||
1418 | ||||
1419 | Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") { | |||
1420 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1421 | } | |||
1422 | ||||
1423 | Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "", | |||
1424 | MDNode *FPMD = nullptr) { | |||
1425 | if (IsFPConstrained) | |||
1426 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, | |||
1427 | L, R, nullptr, Name, FPMD); | |||
1428 | ||||
1429 | if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V; | |||
1430 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF); | |||
1431 | return Insert(I, Name); | |||
1432 | } | |||
1433 | ||||
1434 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1435 | /// default FMF. | |||
1436 | Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1437 | const Twine &Name = "") { | |||
1438 | if (IsFPConstrained) | |||
1439 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, | |||
1440 | L, R, FMFSource, Name); | |||
1441 | ||||
1442 | if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V; | |||
1443 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr, | |||
1444 | FMFSource->getFastMathFlags()); | |||
1445 | return Insert(I, Name); | |||
1446 | } | |||
1447 | ||||
1448 | Value *CreateFSub(Value *L, Value *R, const Twine &Name = "", | |||
1449 | MDNode *FPMD = nullptr) { | |||
1450 | if (IsFPConstrained) | |||
1451 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, | |||
1452 | L, R, nullptr, Name, FPMD); | |||
1453 | ||||
1454 | if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V; | |||
1455 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF); | |||
1456 | return Insert(I, Name); | |||
1457 | } | |||
1458 | ||||
1459 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1460 | /// default FMF. | |||
1461 | Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1462 | const Twine &Name = "") { | |||
1463 | if (IsFPConstrained) | |||
1464 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, | |||
1465 | L, R, FMFSource, Name); | |||
1466 | ||||
1467 | if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V; | |||
1468 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr, | |||
1469 | FMFSource->getFastMathFlags()); | |||
1470 | return Insert(I, Name); | |||
1471 | } | |||
1472 | ||||
1473 | Value *CreateFMul(Value *L, Value *R, const Twine &Name = "", | |||
1474 | MDNode *FPMD = nullptr) { | |||
1475 | if (IsFPConstrained) | |||
1476 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, | |||
1477 | L, R, nullptr, Name, FPMD); | |||
1478 | ||||
1479 | if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V; | |||
1480 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF); | |||
1481 | return Insert(I, Name); | |||
1482 | } | |||
1483 | ||||
1484 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1485 | /// default FMF. | |||
1486 | Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1487 | const Twine &Name = "") { | |||
1488 | if (IsFPConstrained) | |||
1489 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, | |||
1490 | L, R, FMFSource, Name); | |||
1491 | ||||
1492 | if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V; | |||
1493 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr, | |||
1494 | FMFSource->getFastMathFlags()); | |||
1495 | return Insert(I, Name); | |||
1496 | } | |||
1497 | ||||
1498 | Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "", | |||
1499 | MDNode *FPMD = nullptr) { | |||
1500 | if (IsFPConstrained) | |||
1501 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, | |||
1502 | L, R, nullptr, Name, FPMD); | |||
1503 | ||||
1504 | if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V; | |||
1505 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF); | |||
1506 | return Insert(I, Name); | |||
1507 | } | |||
1508 | ||||
1509 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1510 | /// default FMF. | |||
1511 | Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1512 | const Twine &Name = "") { | |||
1513 | if (IsFPConstrained) | |||
1514 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, | |||
1515 | L, R, FMFSource, Name); | |||
1516 | ||||
1517 | if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V; | |||
1518 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr, | |||
1519 | FMFSource->getFastMathFlags()); | |||
1520 | return Insert(I, Name); | |||
1521 | } | |||
1522 | ||||
1523 | Value *CreateFRem(Value *L, Value *R, const Twine &Name = "", | |||
1524 | MDNode *FPMD = nullptr) { | |||
1525 | if (IsFPConstrained) | |||
1526 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, | |||
1527 | L, R, nullptr, Name, FPMD); | |||
1528 | ||||
1529 | if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V; | |||
1530 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF); | |||
1531 | return Insert(I, Name); | |||
1532 | } | |||
1533 | ||||
1534 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1535 | /// default FMF. | |||
1536 | Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1537 | const Twine &Name = "") { | |||
1538 | if (IsFPConstrained) | |||
1539 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, | |||
1540 | L, R, FMFSource, Name); | |||
1541 | ||||
1542 | if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V; | |||
1543 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr, | |||
1544 | FMFSource->getFastMathFlags()); | |||
1545 | return Insert(I, Name); | |||
1546 | } | |||
1547 | ||||
1548 | Value *CreateBinOp(Instruction::BinaryOps Opc, | |||
1549 | Value *LHS, Value *RHS, const Twine &Name = "", | |||
1550 | MDNode *FPMathTag = nullptr) { | |||
1551 | if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V; | |||
1552 | Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS); | |||
1553 | if (isa<FPMathOperator>(BinOp)) | |||
1554 | setFPAttrs(BinOp, FPMathTag, FMF); | |||
1555 | return Insert(BinOp, Name); | |||
1556 | } | |||
1557 | ||||
1558 | Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") { | |||
1559 | assert(Cond2->getType()->isIntOrIntVectorTy(1))((void)0); | |||
1560 | return CreateSelect(Cond1, Cond2, | |||
1561 | ConstantInt::getNullValue(Cond2->getType()), Name); | |||
1562 | } | |||
1563 | ||||
1564 | Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") { | |||
1565 | assert(Cond2->getType()->isIntOrIntVectorTy(1))((void)0); | |||
1566 | return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()), | |||
1567 | Cond2, Name); | |||
1568 | } | |||
1569 | ||||
1570 | CallInst *CreateConstrainedFPBinOp( | |||
1571 | Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr, | |||
1572 | const Twine &Name = "", MDNode *FPMathTag = nullptr, | |||
1573 | Optional<RoundingMode> Rounding = None, | |||
1574 | Optional<fp::ExceptionBehavior> Except = None); | |||
1575 | ||||
1576 | Value *CreateNeg(Value *V, const Twine &Name = "", | |||
1577 | bool HasNUW = false, bool HasNSW = false) { | |||
1578 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1579 | return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name); | |||
1580 | BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name); | |||
1581 | if (HasNUW) BO->setHasNoUnsignedWrap(); | |||
1582 | if (HasNSW) BO->setHasNoSignedWrap(); | |||
1583 | return BO; | |||
1584 | } | |||
1585 | ||||
1586 | Value *CreateNSWNeg(Value *V, const Twine &Name = "") { | |||
1587 | return CreateNeg(V, Name, false, true); | |||
1588 | } | |||
1589 | ||||
1590 | Value *CreateNUWNeg(Value *V, const Twine &Name = "") { | |||
1591 | return CreateNeg(V, Name, true, false); | |||
1592 | } | |||
1593 | ||||
1594 | Value *CreateFNeg(Value *V, const Twine &Name = "", | |||
1595 | MDNode *FPMathTag = nullptr) { | |||
1596 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1597 | return Insert(Folder.CreateFNeg(VC), Name); | |||
1598 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF), | |||
1599 | Name); | |||
1600 | } | |||
1601 | ||||
1602 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1603 | /// default FMF. | |||
1604 | Value *CreateFNegFMF(Value *V, Instruction *FMFSource, | |||
1605 | const Twine &Name = "") { | |||
1606 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1607 | return Insert(Folder.CreateFNeg(VC), Name); | |||
1608 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr, | |||
1609 | FMFSource->getFastMathFlags()), | |||
1610 | Name); | |||
1611 | } | |||
1612 | ||||
1613 | Value *CreateNot(Value *V, const Twine &Name = "") { | |||
1614 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1615 | return Insert(Folder.CreateNot(VC), Name); | |||
1616 | return Insert(BinaryOperator::CreateNot(V), Name); | |||
1617 | } | |||
1618 | ||||
1619 | Value *CreateUnOp(Instruction::UnaryOps Opc, | |||
1620 | Value *V, const Twine &Name = "", | |||
1621 | MDNode *FPMathTag = nullptr) { | |||
1622 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1623 | return Insert(Folder.CreateUnOp(Opc, VC), Name); | |||
1624 | Instruction *UnOp = UnaryOperator::Create(Opc, V); | |||
1625 | if (isa<FPMathOperator>(UnOp)) | |||
1626 | setFPAttrs(UnOp, FPMathTag, FMF); | |||
1627 | return Insert(UnOp, Name); | |||
1628 | } | |||
1629 | ||||
1630 | /// Create either a UnaryOperator or BinaryOperator depending on \p Opc. | |||
1631 | /// Correct number of operands must be passed accordingly. | |||
1632 | Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, | |||
1633 | const Twine &Name = "", MDNode *FPMathTag = nullptr); | |||
1634 | ||||
1635 | //===--------------------------------------------------------------------===// | |||
1636 | // Instruction creation methods: Memory Instructions | |||
1637 | //===--------------------------------------------------------------------===// | |||
1638 | ||||
1639 | AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace, | |||
1640 | Value *ArraySize = nullptr, const Twine &Name = "") { | |||
1641 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1642 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); | |||
1643 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); | |||
1644 | } | |||
1645 | ||||
1646 | AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr, | |||
1647 | const Twine &Name = "") { | |||
1648 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
| ||||
1649 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); | |||
1650 | unsigned AddrSpace = DL.getAllocaAddrSpace(); | |||
1651 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); | |||
1652 | } | |||
1653 | ||||
1654 | /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of | |||
1655 | /// converting the string to 'bool' for the isVolatile parameter. | |||
1656 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) { | |||
1657 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); | |||
1658 | } | |||
1659 | ||||
1660 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") { | |||
1661 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); | |||
1662 | } | |||
1663 | ||||
1664 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile, | |||
1665 | const Twine &Name = "") { | |||
1666 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name); | |||
1667 | } | |||
1668 | ||||
1669 | // Deprecated [opaque pointer types] | |||
1670 | LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const char *Name) | |||
1671 | const char *Name),[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const char *Name) | |||
1672 | "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const char *Name) | |||
1673 | "loaded type instead")[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const char *Name) { | |||
1674 | return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name); | |||
1675 | } | |||
1676 | ||||
1677 | // Deprecated [opaque pointer types] | |||
1678 | LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") | |||
1679 | const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") | |||
1680 | "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") | |||
1681 | "loaded type instead")[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") { | |||
1682 | return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name); | |||
1683 | } | |||
1684 | ||||
1685 | // Deprecated [opaque pointer types] | |||
1686 | LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") | |||
1687 | bool isVolatile,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") | |||
1688 | const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") | |||
1689 | "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") | |||
1690 | "loaded type instead")[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") { | |||
1691 | return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile, | |||
1692 | Name); | |||
1693 | } | |||
1694 | ||||
1695 | StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) { | |||
1696 | return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile); | |||
1697 | } | |||
1698 | ||||
1699 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, | |||
1700 | const char *Name) { | |||
1701 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); | |||
1702 | } | |||
1703 | ||||
1704 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, | |||
1705 | const Twine &Name = "") { | |||
1706 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); | |||
1707 | } | |||
1708 | ||||
1709 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, | |||
1710 | bool isVolatile, const Twine &Name = "") { | |||
1711 | if (!Align) { | |||
1712 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1713 | Align = DL.getABITypeAlign(Ty); | |||
1714 | } | |||
1715 | return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name); | |||
1716 | } | |||
1717 | ||||
1718 | // Deprecated [opaque pointer types] | |||
1719 | LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const char *Name) | |||
1720 | MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const char *Name) | |||
1721 | const char *Name),[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const char *Name) | |||
1722 | "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const char *Name) | |||
1723 | "loaded type instead")[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const char *Name) { | |||
1724 | return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr, | |||
1725 | Align, Name); | |||
1726 | } | |||
1727 | // Deprecated [opaque pointer types] | |||
1728 | LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const Twine &Name = "") | |||
1729 | MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const Twine &Name = "") | |||
1730 | const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const Twine &Name = "") | |||
1731 | "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const Twine &Name = "") | |||
1732 | "loaded type instead")[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, const Twine &Name = "") { | |||
1733 | return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr, | |||
1734 | Align, Name); | |||
1735 | } | |||
1736 | // Deprecated [opaque pointer types] | |||
1737 | LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name = "") | |||
1738 | MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name = "") | |||
1739 | bool isVolatile,[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name = "") | |||
1740 | const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name = "") | |||
1741 | "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name = "") | |||
1742 | "loaded type instead")[[deprecated("Use the version that explicitly specifies the " "loaded type instead")]] LoadInst *CreateAlignedLoad(Value * Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name = "") { | |||
1743 | return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr, | |||
1744 | Align, isVolatile, Name); | |||
1745 | } | |||
1746 | ||||
1747 | StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, | |||
1748 | bool isVolatile = false) { | |||
1749 | if (!Align) { | |||
1750 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1751 | Align = DL.getABITypeAlign(Val->getType()); | |||
1752 | } | |||
1753 | return Insert(new StoreInst(Val, Ptr, isVolatile, *Align)); | |||
1754 | } | |||
1755 | FenceInst *CreateFence(AtomicOrdering Ordering, | |||
1756 | SyncScope::ID SSID = SyncScope::System, | |||
1757 | const Twine &Name = "") { | |||
1758 | return Insert(new FenceInst(Context, Ordering, SSID), Name); | |||
1759 | } | |||
1760 | ||||
1761 | AtomicCmpXchgInst * | |||
1762 | CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, | |||
1763 | AtomicOrdering SuccessOrdering, | |||
1764 | AtomicOrdering FailureOrdering, | |||
1765 | SyncScope::ID SSID = SyncScope::System) { | |||
1766 | if (!Align) { | |||
1767 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1768 | Align = llvm::Align(DL.getTypeStoreSize(New->getType())); | |||
1769 | } | |||
1770 | ||||
1771 | return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering, | |||
1772 | FailureOrdering, SSID)); | |||
1773 | } | |||
1774 | ||||
1775 | AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, | |||
1776 | Value *Val, MaybeAlign Align, | |||
1777 | AtomicOrdering Ordering, | |||
1778 | SyncScope::ID SSID = SyncScope::System) { | |||
1779 | if (!Align) { | |||
1780 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1781 | Align = llvm::Align(DL.getTypeStoreSize(Val->getType())); | |||
1782 | } | |||
1783 | ||||
1784 | return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID)); | |||
1785 | } | |||
1786 | ||||
1787 | LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead" )]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList , const Twine &Name = "") | |||
1788 | Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead" )]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList , const Twine &Name = "") | |||
1789 | const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead" )]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList , const Twine &Name = "") | |||
1790 | "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead" )]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList , const Twine &Name = "") { | |||
1791 | return CreateGEP(Ptr->getType()->getScalarType()->getPointerElementType(), | |||
1792 | Ptr, IdxList, Name); | |||
1793 | } | |||
1794 | ||||
1795 | Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, | |||
1796 | const Twine &Name = "") { | |||
1797 | if (auto *PC = dyn_cast<Constant>(Ptr)) { | |||
1798 | // Every index must be constant. | |||
1799 | size_t i, e; | |||
1800 | for (i = 0, e = IdxList.size(); i != e; ++i) | |||
1801 | if (!isa<Constant>(IdxList[i])) | |||
1802 | break; | |||
1803 | if (i == e) | |||
1804 | return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name); | |||
1805 | } | |||
1806 | return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name); | |||
1807 | } | |||
1808 | ||||
1809 | LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead" )]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &Name = "") | |||
1810 | Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead" )]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &Name = "") | |||
1811 | const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead" )]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &Name = "") | |||
1812 | "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead" )]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &Name = "") { | |||
1813 | return CreateInBoundsGEP( | |||
1814 | Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList, | |||
1815 | Name); | |||
1816 | } | |||
1817 | ||||
1818 | Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, | |||
1819 | const Twine &Name = "") { | |||
1820 | if (auto *PC = dyn_cast<Constant>(Ptr)) { | |||
1821 | // Every index must be constant. | |||
1822 | size_t i, e; | |||
1823 | for (i = 0, e = IdxList.size(); i != e; ++i) | |||
1824 | if (!isa<Constant>(IdxList[i])) | |||
1825 | break; | |||
1826 | if (i == e) | |||
1827 | return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList), | |||
1828 | Name); | |||
1829 | } | |||
1830 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name); | |||
1831 | } | |||
1832 | ||||
1833 | Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") { | |||
1834 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1835 | if (auto *IC = dyn_cast<Constant>(Idx)) | |||
1836 | return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name); | |||
1837 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); | |||
1838 | } | |||
1839 | ||||
1840 | Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx, | |||
1841 | const Twine &Name = "") { | |||
1842 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1843 | if (auto *IC = dyn_cast<Constant>(Idx)) | |||
1844 | return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name); | |||
1845 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); | |||
1846 | } | |||
1847 | ||||
1848 | LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") | |||
1849 | Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0,[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") | |||
1850 | const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") | |||
1851 | "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") { | |||
1852 | return CreateConstGEP1_32( | |||
1853 | Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0, | |||
1854 | Name); | |||
1855 | } | |||
1856 | ||||
1857 | Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, | |||
1858 | const Twine &Name = "") { | |||
1859 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); | |||
1860 | ||||
1861 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1862 | return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name); | |||
1863 | ||||
1864 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); | |||
1865 | } | |||
1866 | ||||
1867 | Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, | |||
1868 | const Twine &Name = "") { | |||
1869 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); | |||
1870 | ||||
1871 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1872 | return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name); | |||
1873 | ||||
1874 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); | |||
1875 | } | |||
1876 | ||||
1877 | Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, | |||
1878 | const Twine &Name = "") { | |||
1879 | Value *Idxs[] = { | |||
1880 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), | |||
1881 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) | |||
1882 | }; | |||
1883 | ||||
1884 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1885 | return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name); | |||
1886 | ||||
1887 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); | |||
1888 | } | |||
1889 | ||||
1890 | Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, | |||
1891 | unsigned Idx1, const Twine &Name = "") { | |||
1892 | Value *Idxs[] = { | |||
1893 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), | |||
1894 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) | |||
1895 | }; | |||
1896 | ||||
1897 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1898 | return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name); | |||
1899 | ||||
1900 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); | |||
1901 | } | |||
1902 | ||||
1903 | Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, | |||
1904 | const Twine &Name = "") { | |||
1905 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); | |||
1906 | ||||
1907 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1908 | return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name); | |||
1909 | ||||
1910 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); | |||
1911 | } | |||
1912 | ||||
1913 | LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") | |||
1914 | Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") | |||
1915 | const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") | |||
1916 | "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") { | |||
1917 | return CreateConstGEP1_64( | |||
1918 | Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0, | |||
1919 | Name); | |||
1920 | } | |||
1921 | ||||
1922 | Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, | |||
1923 | const Twine &Name = "") { | |||
1924 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); | |||
1925 | ||||
1926 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1927 | return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name); | |||
1928 | ||||
1929 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); | |||
1930 | } | |||
1931 | ||||
1932 | LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0 , const Twine &Name = "") | |||
1933 | Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0 , const Twine &Name = "") | |||
1934 | const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0 , const Twine &Name = "") | |||
1935 | "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0 , const Twine &Name = "") { | |||
1936 | return CreateConstInBoundsGEP1_64( | |||
1937 | Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0, | |||
1938 | Name); | |||
1939 | } | |||
1940 | ||||
1941 | Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1, | |||
1942 | const Twine &Name = "") { | |||
1943 | Value *Idxs[] = { | |||
1944 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), | |||
1945 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) | |||
1946 | }; | |||
1947 | ||||
1948 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1949 | return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name); | |||
1950 | ||||
1951 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); | |||
1952 | } | |||
1953 | ||||
1954 | LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1, const Twine &Name = "") | |||
1955 | Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1, const Twine &Name = "") | |||
1956 | const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1, const Twine &Name = "") | |||
1957 | "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1, const Twine &Name = "") { | |||
1958 | return CreateConstGEP2_64( | |||
1959 | Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0, | |||
1960 | Idx1, Name); | |||
1961 | } | |||
1962 | ||||
1963 | Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, | |||
1964 | uint64_t Idx1, const Twine &Name = "") { | |||
1965 | Value *Idxs[] = { | |||
1966 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), | |||
1967 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) | |||
1968 | }; | |||
1969 | ||||
1970 | if (auto *PC = dyn_cast<Constant>(Ptr)) | |||
1971 | return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name); | |||
1972 | ||||
1973 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); | |||
1974 | } | |||
1975 | ||||
1976 | LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0 , uint64_t Idx1, const Twine &Name = "") | |||
1977 | Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0 , uint64_t Idx1, const Twine &Name = "") | |||
1978 | uint64_t Idx1, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0 , uint64_t Idx1, const Twine &Name = "") | |||
1979 | "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead" )]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0 , uint64_t Idx1, const Twine &Name = "") { | |||
1980 | return CreateConstInBoundsGEP2_64( | |||
1981 | Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0, | |||
1982 | Idx1, Name); | |||
1983 | } | |||
1984 | ||||
1985 | Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, | |||
1986 | const Twine &Name = "") { | |||
1987 | return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name); | |||
1988 | } | |||
1989 | ||||
1990 | LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead" )]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") | |||
1991 | Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead" )]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") | |||
1992 | "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead" )]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") { | |||
1993 | return CreateConstInBoundsGEP2_32( | |||
1994 | Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, 0, Idx, | |||
1995 | Name); | |||
1996 | } | |||
1997 | ||||
1998 | /// Same as CreateGlobalString, but return a pointer with "i8*" type | |||
1999 | /// instead of a pointer to array of i8. | |||
2000 | /// | |||
2001 | /// If no module is given via \p M, it is take from the insertion point basic | |||
2002 | /// block. | |||
2003 | Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "", | |||
2004 | unsigned AddressSpace = 0, | |||
2005 | Module *M = nullptr) { | |||
2006 | GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M); | |||
2007 | Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); | |||
2008 | Constant *Indices[] = {Zero, Zero}; | |||
2009 | return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV, | |||
2010 | Indices); | |||
2011 | } | |||
2012 | ||||
2013 | //===--------------------------------------------------------------------===// | |||
2014 | // Instruction creation methods: Cast/Conversion Operators | |||
2015 | //===--------------------------------------------------------------------===// | |||
2016 | ||||
2017 | Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") { | |||
2018 | return CreateCast(Instruction::Trunc, V, DestTy, Name); | |||
2019 | } | |||
2020 | ||||
2021 | Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") { | |||
2022 | return CreateCast(Instruction::ZExt, V, DestTy, Name); | |||
2023 | } | |||
2024 | ||||
2025 | Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") { | |||
2026 | return CreateCast(Instruction::SExt, V, DestTy, Name); | |||
2027 | } | |||
2028 | ||||
2029 | /// Create a ZExt or Trunc from the integer value V to DestTy. Return | |||
2030 | /// the value untouched if the type of V is already DestTy. | |||
2031 | Value *CreateZExtOrTrunc(Value *V, Type *DestTy, | |||
2032 | const Twine &Name = "") { | |||
2033 | assert(V->getType()->isIntOrIntVectorTy() &&((void)0) | |||
2034 | DestTy->isIntOrIntVectorTy() &&((void)0) | |||
2035 | "Can only zero extend/truncate integers!")((void)0); | |||
2036 | Type *VTy = V->getType(); | |||
2037 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) | |||
2038 | return CreateZExt(V, DestTy, Name); | |||
2039 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) | |||
2040 | return CreateTrunc(V, DestTy, Name); | |||
2041 | return V; | |||
2042 | } | |||
2043 | ||||
2044 | /// Create a SExt or Trunc from the integer value V to DestTy. Return | |||
2045 | /// the value untouched if the type of V is already DestTy. | |||
2046 | Value *CreateSExtOrTrunc(Value *V, Type *DestTy, | |||
2047 | const Twine &Name = "") { | |||
2048 | assert(V->getType()->isIntOrIntVectorTy() &&((void)0) | |||
2049 | DestTy->isIntOrIntVectorTy() &&((void)0) | |||
2050 | "Can only sign extend/truncate integers!")((void)0); | |||
2051 | Type *VTy = V->getType(); | |||
2052 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) | |||
2053 | return CreateSExt(V, DestTy, Name); | |||
2054 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) | |||
2055 | return CreateTrunc(V, DestTy, Name); | |||
2056 | return V; | |||
2057 | } | |||
2058 | ||||
2059 | Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") { | |||
2060 | if (IsFPConstrained) | |||
2061 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui, | |||
2062 | V, DestTy, nullptr, Name); | |||
2063 | return CreateCast(Instruction::FPToUI, V, DestTy, Name); | |||
2064 | } | |||
2065 | ||||
2066 | Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") { | |||
2067 | if (IsFPConstrained) | |||
2068 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi, | |||
2069 | V, DestTy, nullptr, Name); | |||
2070 | return CreateCast(Instruction::FPToSI, V, DestTy, Name); | |||
2071 | } | |||
2072 | ||||
2073 | Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ | |||
2074 | if (IsFPConstrained) | |||
2075 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp, | |||
2076 | V, DestTy, nullptr, Name); | |||
2077 | return CreateCast(Instruction::UIToFP, V, DestTy, Name); | |||
2078 | } | |||
2079 | ||||
2080 | Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ | |||
2081 | if (IsFPConstrained) | |||
2082 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp, | |||
2083 | V, DestTy, nullptr, Name); | |||
2084 | return CreateCast(Instruction::SIToFP, V, DestTy, Name); | |||
2085 | } | |||
2086 | ||||
2087 | Value *CreateFPTrunc(Value *V, Type *DestTy, | |||
2088 | const Twine &Name = "") { | |||
2089 | if (IsFPConstrained) | |||
2090 | return CreateConstrainedFPCast( | |||
2091 | Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr, | |||
2092 | Name); | |||
2093 | return CreateCast(Instruction::FPTrunc, V, DestTy, Name); | |||
2094 | } | |||
2095 | ||||
2096 | Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") { | |||
2097 | if (IsFPConstrained) | |||
2098 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext, | |||
2099 | V, DestTy, nullptr, Name); | |||
2100 | return CreateCast(Instruction::FPExt, V, DestTy, Name); | |||
2101 | } | |||
2102 | ||||
2103 | Value *CreatePtrToInt(Value *V, Type *DestTy, | |||
2104 | const Twine &Name = "") { | |||
2105 | return CreateCast(Instruction::PtrToInt, V, DestTy, Name); | |||
2106 | } | |||
2107 | ||||
2108 | Value *CreateIntToPtr(Value *V, Type *DestTy, | |||
2109 | const Twine &Name = "") { | |||
2110 | return CreateCast(Instruction::IntToPtr, V, DestTy, Name); | |||
2111 | } | |||
2112 | ||||
2113 | Value *CreateBitCast(Value *V, Type *DestTy, | |||
2114 | const Twine &Name = "") { | |||
2115 | return CreateCast(Instruction::BitCast, V, DestTy, Name); | |||
2116 | } | |||
2117 | ||||
2118 | Value *CreateAddrSpaceCast(Value *V, Type *DestTy, | |||
2119 | const Twine &Name = "") { | |||
2120 | return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name); | |||
2121 | } | |||
2122 | ||||
2123 | Value *CreateZExtOrBitCast(Value *V, Type *DestTy, | |||
2124 | const Twine &Name = "") { | |||
2125 | if (V->getType() == DestTy) | |||
2126 | return V; | |||
2127 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2128 | return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name); | |||
2129 | return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name); | |||
2130 | } | |||
2131 | ||||
2132 | Value *CreateSExtOrBitCast(Value *V, Type *DestTy, | |||
2133 | const Twine &Name = "") { | |||
2134 | if (V->getType() == DestTy) | |||
2135 | return V; | |||
2136 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2137 | return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name); | |||
2138 | return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name); | |||
2139 | } | |||
2140 | ||||
2141 | Value *CreateTruncOrBitCast(Value *V, Type *DestTy, | |||
2142 | const Twine &Name = "") { | |||
2143 | if (V->getType() == DestTy) | |||
2144 | return V; | |||
2145 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2146 | return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name); | |||
2147 | return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name); | |||
2148 | } | |||
2149 | ||||
2150 | Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, | |||
2151 | const Twine &Name = "") { | |||
2152 | if (V->getType() == DestTy) | |||
2153 | return V; | |||
2154 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2155 | return Insert(Folder.CreateCast(Op, VC, DestTy), Name); | |||
2156 | return Insert(CastInst::Create(Op, V, DestTy), Name); | |||
2157 | } | |||
2158 | ||||
2159 | Value *CreatePointerCast(Value *V, Type *DestTy, | |||
2160 | const Twine &Name = "") { | |||
2161 | if (V->getType() == DestTy) | |||
2162 | return V; | |||
2163 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2164 | return Insert(Folder.CreatePointerCast(VC, DestTy), Name); | |||
2165 | return Insert(CastInst::CreatePointerCast(V, DestTy), Name); | |||
2166 | } | |||
2167 | ||||
2168 | Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy, | |||
2169 | const Twine &Name = "") { | |||
2170 | if (V->getType() == DestTy) | |||
2171 | return V; | |||
2172 | ||||
2173 | if (auto *VC = dyn_cast<Constant>(V)) { | |||
2174 | return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy), | |||
2175 | Name); | |||
2176 | } | |||
2177 | ||||
2178 | return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy), | |||
2179 | Name); | |||
2180 | } | |||
2181 | ||||
2182 | Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned, | |||
2183 | const Twine &Name = "") { | |||
2184 | if (V->getType() == DestTy) | |||
2185 | return V; | |||
2186 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2187 | return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name); | |||
2188 | return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name); | |||
2189 | } | |||
2190 | ||||
2191 | Value *CreateBitOrPointerCast(Value *V, Type *DestTy, | |||
2192 | const Twine &Name = "") { | |||
2193 | if (V->getType() == DestTy) | |||
2194 | return V; | |||
2195 | if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy()) | |||
2196 | return CreatePtrToInt(V, DestTy, Name); | |||
2197 | if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy()) | |||
2198 | return CreateIntToPtr(V, DestTy, Name); | |||
2199 | ||||
2200 | return CreateBitCast(V, DestTy, Name); | |||
2201 | } | |||
2202 | ||||
2203 | Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") { | |||
2204 | if (V->getType() == DestTy) | |||
2205 | return V; | |||
2206 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2207 | return Insert(Folder.CreateFPCast(VC, DestTy), Name); | |||
2208 | return Insert(CastInst::CreateFPCast(V, DestTy), Name); | |||
2209 | } | |||
2210 | ||||
2211 | CallInst *CreateConstrainedFPCast( | |||
2212 | Intrinsic::ID ID, Value *V, Type *DestTy, | |||
2213 | Instruction *FMFSource = nullptr, const Twine &Name = "", | |||
2214 | MDNode *FPMathTag = nullptr, | |||
2215 | Optional<RoundingMode> Rounding = None, | |||
2216 | Optional<fp::ExceptionBehavior> Except = None); | |||
2217 | ||||
2218 | // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a | |||
2219 | // compile time error, instead of converting the string to bool for the | |||
2220 | // isSigned parameter. | |||
2221 | Value *CreateIntCast(Value *, Type *, const char *) = delete; | |||
2222 | ||||
2223 | //===--------------------------------------------------------------------===// | |||
2224 | // Instruction creation methods: Compare Instructions | |||
2225 | //===--------------------------------------------------------------------===// | |||
2226 | ||||
2227 | Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2228 | return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name); | |||
2229 | } | |||
2230 | ||||
2231 | Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2232 | return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name); | |||
2233 | } | |||
2234 | ||||
2235 | Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2236 | return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name); | |||
2237 | } | |||
2238 | ||||
2239 | Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2240 | return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name); | |||
2241 | } | |||
2242 | ||||
2243 | Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2244 | return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name); | |||
2245 | } | |||
2246 | ||||
2247 | Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2248 | return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name); | |||
2249 | } | |||
2250 | ||||
2251 | Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2252 | return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name); | |||
2253 | } | |||
2254 | ||||
2255 | Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2256 | return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name); | |||
2257 | } | |||
2258 | ||||
2259 | Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2260 | return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name); | |||
2261 | } | |||
2262 | ||||
2263 | Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2264 | return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name); | |||
2265 | } | |||
2266 | ||||
2267 | Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2268 | MDNode *FPMathTag = nullptr) { | |||
2269 | return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag); | |||
2270 | } | |||
2271 | ||||
2272 | Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2273 | MDNode *FPMathTag = nullptr) { | |||
2274 | return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag); | |||
2275 | } | |||
2276 | ||||
2277 | Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2278 | MDNode *FPMathTag = nullptr) { | |||
2279 | return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag); | |||
2280 | } | |||
2281 | ||||
2282 | Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2283 | MDNode *FPMathTag = nullptr) { | |||
2284 | return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag); | |||
2285 | } | |||
2286 | ||||
2287 | Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2288 | MDNode *FPMathTag = nullptr) { | |||
2289 | return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag); | |||
2290 | } | |||
2291 | ||||
2292 | Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2293 | MDNode *FPMathTag = nullptr) { | |||
2294 | return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag); | |||
2295 | } | |||
2296 | ||||
2297 | Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2298 | MDNode *FPMathTag = nullptr) { | |||
2299 | return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag); | |||
2300 | } | |||
2301 | ||||
2302 | Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2303 | MDNode *FPMathTag = nullptr) { | |||
2304 | return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag); | |||
2305 | } | |||
2306 | ||||
2307 | Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2308 | MDNode *FPMathTag = nullptr) { | |||
2309 | return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag); | |||
2310 | } | |||
2311 | ||||
2312 | Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2313 | MDNode *FPMathTag = nullptr) { | |||
2314 | return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag); | |||
2315 | } | |||
2316 | ||||
2317 | Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2318 | MDNode *FPMathTag = nullptr) { | |||
2319 | return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag); | |||
2320 | } | |||
2321 | ||||
2322 | Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2323 | MDNode *FPMathTag = nullptr) { | |||
2324 | return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag); | |||
2325 | } | |||
2326 | ||||
2327 | Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2328 | MDNode *FPMathTag = nullptr) { | |||
2329 | return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag); | |||
2330 | } | |||
2331 | ||||
2332 | Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2333 | MDNode *FPMathTag = nullptr) { | |||
2334 | return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag); | |||
2335 | } | |||
2336 | ||||
2337 | Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2338 | const Twine &Name = "") { | |||
2339 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
2340 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
2341 | return Insert(Folder.CreateICmp(P, LC, RC), Name); | |||
2342 | return Insert(new ICmpInst(P, LHS, RHS), Name); | |||
2343 | } | |||
2344 | ||||
2345 | // Create a quiet floating-point comparison (i.e. one that raises an FP | |||
2346 | // exception only in the case where an input is a signaling NaN). | |||
2347 | // Note that this differs from CreateFCmpS only if IsFPConstrained is true. | |||
2348 | Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2349 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2350 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false); | |||
2351 | } | |||
2352 | ||||
2353 | Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, | |||
2354 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2355 | return CmpInst::isFPPredicate(Pred) | |||
2356 | ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag) | |||
2357 | : CreateICmp(Pred, LHS, RHS, Name); | |||
2358 | } | |||
2359 | ||||
2360 | // Create a signaling floating-point comparison (i.e. one that raises an FP | |||
2361 | // exception whenever an input is any NaN, signaling or quiet). | |||
2362 | // Note that this differs from CreateFCmp only if IsFPConstrained is true. | |||
2363 | Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2364 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2365 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true); | |||
2366 | } | |||
2367 | ||||
2368 | private: | |||
2369 | // Helper routine to create either a signaling or a quiet FP comparison. | |||
2370 | Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2371 | const Twine &Name, MDNode *FPMathTag, | |||
2372 | bool IsSignaling); | |||
2373 | ||||
2374 | public: | |||
2375 | CallInst *CreateConstrainedFPCmp( | |||
2376 | Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, | |||
2377 | const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None); | |||
2378 | ||||
2379 | //===--------------------------------------------------------------------===// | |||
2380 | // Instruction creation methods: Other Instructions | |||
2381 | //===--------------------------------------------------------------------===// | |||
2382 | ||||
2383 | PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues, | |||
2384 | const Twine &Name = "") { | |||
2385 | PHINode *Phi = PHINode::Create(Ty, NumReservedValues); | |||
2386 | if (isa<FPMathOperator>(Phi)) | |||
2387 | setFPAttrs(Phi, nullptr /* MDNode* */, FMF); | |||
2388 | return Insert(Phi, Name); | |||
2389 | } | |||
2390 | ||||
2391 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, | |||
2392 | ArrayRef<Value *> Args = None, const Twine &Name = "", | |||
2393 | MDNode *FPMathTag = nullptr) { | |||
2394 | CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles); | |||
2395 | if (IsFPConstrained) | |||
2396 | setConstrainedFPCallAttr(CI); | |||
2397 | if (isa<FPMathOperator>(CI)) | |||
2398 | setFPAttrs(CI, FPMathTag, FMF); | |||
2399 | return Insert(CI, Name); | |||
2400 | } | |||
2401 | ||||
2402 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args, | |||
2403 | ArrayRef<OperandBundleDef> OpBundles, | |||
2404 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2405 | CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles); | |||
2406 | if (IsFPConstrained) | |||
2407 | setConstrainedFPCallAttr(CI); | |||
2408 | if (isa<FPMathOperator>(CI)) | |||
2409 | setFPAttrs(CI, FPMathTag, FMF); | |||
2410 | return Insert(CI, Name); | |||
2411 | } | |||
2412 | ||||
2413 | CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None, | |||
2414 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2415 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name, | |||
2416 | FPMathTag); | |||
2417 | } | |||
2418 | ||||
2419 | CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args, | |||
2420 | ArrayRef<OperandBundleDef> OpBundles, | |||
2421 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2422 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, | |||
2423 | OpBundles, Name, FPMathTag); | |||
2424 | } | |||
2425 | ||||
2426 | CallInst *CreateConstrainedFPCall( | |||
2427 | Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "", | |||
2428 | Optional<RoundingMode> Rounding = None, | |||
2429 | Optional<fp::ExceptionBehavior> Except = None); | |||
2430 | ||||
2431 | Value *CreateSelect(Value *C, Value *True, Value *False, | |||
2432 | const Twine &Name = "", Instruction *MDFrom = nullptr); | |||
2433 | ||||
2434 | VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") { | |||
2435 | return Insert(new VAArgInst(List, Ty), Name); | |||
2436 | } | |||
2437 | ||||
2438 | Value *CreateExtractElement(Value *Vec, Value *Idx, | |||
2439 | const Twine &Name = "") { | |||
2440 | if (auto *VC = dyn_cast<Constant>(Vec)) | |||
2441 | if (auto *IC = dyn_cast<Constant>(Idx)) | |||
2442 | return Insert(Folder.CreateExtractElement(VC, IC), Name); | |||
2443 | return Insert(ExtractElementInst::Create(Vec, Idx), Name); | |||
2444 | } | |||
2445 | ||||
2446 | Value *CreateExtractElement(Value *Vec, uint64_t Idx, | |||
2447 | const Twine &Name = "") { | |||
2448 | return CreateExtractElement(Vec, getInt64(Idx), Name); | |||
2449 | } | |||
2450 | ||||
2451 | Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, | |||
2452 | const Twine &Name = "") { | |||
2453 | if (auto *VC = dyn_cast<Constant>(Vec)) | |||
2454 | if (auto *NC = dyn_cast<Constant>(NewElt)) | |||
2455 | if (auto *IC = dyn_cast<Constant>(Idx)) | |||
2456 | return Insert(Folder.CreateInsertElement(VC, NC, IC), Name); | |||
2457 | return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name); | |||
2458 | } | |||
2459 | ||||
2460 | Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx, | |||
2461 | const Twine &Name = "") { | |||
2462 | return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name); | |||
2463 | } | |||
2464 | ||||
2465 | Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask, | |||
2466 | const Twine &Name = "") { | |||
2467 | SmallVector<int, 16> IntMask; | |||
2468 | ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask); | |||
2469 | return CreateShuffleVector(V1, V2, IntMask, Name); | |||
2470 | } | |||
2471 | ||||
2472 | LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector (Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine &Name = "") | |||
2473 | ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector (Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine &Name = "") | |||
2474 | const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector (Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine &Name = "") | |||
2475 | "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector (Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine &Name = "") { | |||
2476 | SmallVector<int, 16> IntMask; | |||
2477 | IntMask.assign(Mask.begin(), Mask.end()); | |||
2478 | return CreateShuffleVector(V1, V2, IntMask, Name); | |||
2479 | } | |||
2480 | ||||
2481 | /// See class ShuffleVectorInst for a description of the mask representation. | |||
2482 | Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask, | |||
2483 | const Twine &Name = "") { | |||
2484 | if (auto *V1C = dyn_cast<Constant>(V1)) | |||
2485 | if (auto *V2C = dyn_cast<Constant>(V2)) | |||
2486 | return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name); | |||
2487 | return Insert(new ShuffleVectorInst(V1, V2, Mask), Name); | |||
2488 | } | |||
2489 | ||||
2490 | /// Create a unary shuffle. The second vector operand of the IR instruction | |||
2491 | /// is poison. | |||
2492 | Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask, | |||
2493 | const Twine &Name = "") { | |||
2494 | return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name); | |||
2495 | } | |||
2496 | ||||
2497 | Value *CreateExtractValue(Value *Agg, | |||
2498 | ArrayRef<unsigned> Idxs, | |||
2499 | const Twine &Name = "") { | |||
2500 | if (auto *AggC = dyn_cast<Constant>(Agg)) | |||
2501 | return Insert(Folder.CreateExtractValue(AggC, Idxs), Name); | |||
2502 | return Insert(ExtractValueInst::Create(Agg, Idxs), Name); | |||
2503 | } | |||
2504 | ||||
2505 | Value *CreateInsertValue(Value *Agg, Value *Val, | |||
2506 | ArrayRef<unsigned> Idxs, | |||
2507 | const Twine &Name = "") { | |||
2508 | if (auto *AggC = dyn_cast<Constant>(Agg)) | |||
2509 | if (auto *ValC = dyn_cast<Constant>(Val)) | |||
2510 | return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name); | |||
2511 | return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name); | |||
2512 | } | |||
2513 | ||||
2514 | LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses, | |||
2515 | const Twine &Name = "") { | |||
2516 | return Insert(LandingPadInst::Create(Ty, NumClauses), Name); | |||
2517 | } | |||
2518 | ||||
2519 | Value *CreateFreeze(Value *V, const Twine &Name = "") { | |||
2520 | return Insert(new FreezeInst(V), Name); | |||
2521 | } | |||
2522 | ||||
2523 | //===--------------------------------------------------------------------===// | |||
2524 | // Utility creation methods | |||
2525 | //===--------------------------------------------------------------------===// | |||
2526 | ||||
2527 | /// Return an i1 value testing if \p Arg is null. | |||
2528 | Value *CreateIsNull(Value *Arg, const Twine &Name = "") { | |||
2529 | return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()), | |||
2530 | Name); | |||
2531 | } | |||
2532 | ||||
2533 | /// Return an i1 value testing if \p Arg is not null. | |||
2534 | Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") { | |||
2535 | return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()), | |||
2536 | Name); | |||
2537 | } | |||
2538 | ||||
2539 | /// Return the i64 difference between two pointer values, dividing out | |||
2540 | /// the size of the pointed-to objects. | |||
2541 | /// | |||
2542 | /// This is intended to implement C-style pointer subtraction. As such, the | |||
2543 | /// pointers must be appropriately aligned for their element types and | |||
2544 | /// pointing into the same object. | |||
2545 | Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = ""); | |||
2546 | ||||
2547 | /// Create a launder.invariant.group intrinsic call. If Ptr type is | |||
2548 | /// different from pointer to i8, it's casted to pointer to i8 in the same | |||
2549 | /// address space before call and casted back to Ptr type after call. | |||
2550 | Value *CreateLaunderInvariantGroup(Value *Ptr); | |||
2551 | ||||
2552 | /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is | |||
2553 | /// different from pointer to i8, it's casted to pointer to i8 in the same | |||
2554 | /// address space before call and casted back to Ptr type after call. | |||
2555 | Value *CreateStripInvariantGroup(Value *Ptr); | |||
2556 | ||||
2557 | /// Return a vector value that contains the vector V reversed | |||
2558 | Value *CreateVectorReverse(Value *V, const Twine &Name = ""); | |||
2559 | ||||
2560 | /// Return a vector splice intrinsic if using scalable vectors, otherwise | |||
2561 | /// return a shufflevector. If the immediate is positive, a vector is | |||
2562 | /// extracted from concat(V1, V2), starting at Imm. If the immediate | |||
2563 | /// is negative, we extract -Imm elements from V1 and the remaining | |||
2564 | /// elements from V2. Imm is a signed integer in the range | |||
2565 | /// -VL <= Imm < VL (where VL is the runtime vector length of the | |||
2566 | /// source/result vector) | |||
2567 | Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, | |||
2568 | const Twine &Name = ""); | |||
2569 | ||||
2570 | /// Return a vector value that contains \arg V broadcasted to \p | |||
2571 | /// NumElts elements. | |||
2572 | Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = ""); | |||
2573 | ||||
2574 | /// Return a vector value that contains \arg V broadcasted to \p | |||
2575 | /// EC elements. | |||
2576 | Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = ""); | |||
2577 | ||||
2578 | /// Return a value that has been extracted from a larger integer type. | |||
2579 | Value *CreateExtractInteger(const DataLayout &DL, Value *From, | |||
2580 | IntegerType *ExtractedTy, uint64_t Offset, | |||
2581 | const Twine &Name); | |||
2582 | ||||
2583 | Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, | |||
2584 | unsigned Dimension, unsigned LastIndex, | |||
2585 | MDNode *DbgInfo); | |||
2586 | ||||
2587 | Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex, | |||
2588 | MDNode *DbgInfo); | |||
2589 | ||||
2590 | Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base, | |||
2591 | unsigned Index, unsigned FieldIndex, | |||
2592 | MDNode *DbgInfo); | |||
2593 | ||||
2594 | private: | |||
2595 | /// Helper function that creates an assume intrinsic call that | |||
2596 | /// represents an alignment assumption on the provided pointer \p PtrValue | |||
2597 | /// with offset \p OffsetValue and alignment value \p AlignValue. | |||
2598 | CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL, | |||
2599 | Value *PtrValue, Value *AlignValue, | |||
2600 | Value *OffsetValue); | |||
2601 | ||||
2602 | public: | |||
2603 | /// Create an assume intrinsic call that represents an alignment | |||
2604 | /// assumption on the provided pointer. | |||
2605 | /// | |||
2606 | /// An optional offset can be provided, and if it is provided, the offset | |||
2607 | /// must be subtracted from the provided pointer to get the pointer with the | |||
2608 | /// specified alignment. | |||
2609 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, | |||
2610 | unsigned Alignment, | |||
2611 | Value *OffsetValue = nullptr); | |||
2612 | ||||
2613 | /// Create an assume intrinsic call that represents an alignment | |||
2614 | /// assumption on the provided pointer. | |||
2615 | /// | |||
2616 | /// An optional offset can be provided, and if it is provided, the offset | |||
2617 | /// must be subtracted from the provided pointer to get the pointer with the | |||
2618 | /// specified alignment. | |||
2619 | /// | |||
2620 | /// This overload handles the condition where the Alignment is dependent | |||
2621 | /// on an existing value rather than a static value. | |||
2622 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, | |||
2623 | Value *Alignment, | |||
2624 | Value *OffsetValue = nullptr); | |||
2625 | }; | |||
2626 | ||||
2627 | /// This provides a uniform API for creating instructions and inserting | |||
2628 | /// them into a basic block: either at the end of a BasicBlock, or at a specific | |||
2629 | /// iterator location in a block. | |||
2630 | /// | |||
2631 | /// Note that the builder does not expose the full generality of LLVM | |||
2632 | /// instructions. For access to extra instruction properties, use the mutators | |||
2633 | /// (e.g. setVolatile) on the instructions after they have been | |||
2634 | /// created. Convenience state exists to specify fast-math flags and fp-math | |||
2635 | /// tags. | |||
2636 | /// | |||
2637 | /// The first template argument specifies a class to use for creating constants. | |||
2638 | /// This defaults to creating minimally folded constants. The second template | |||
2639 | /// argument allows clients to specify custom insertion hooks that are called on | |||
2640 | /// every newly created insertion. | |||
2641 | template <typename FolderTy = ConstantFolder, | |||
2642 | typename InserterTy = IRBuilderDefaultInserter> | |||
2643 | class IRBuilder : public IRBuilderBase { | |||
2644 | private: | |||
2645 | FolderTy Folder; | |||
2646 | InserterTy Inserter; | |||
2647 | ||||
2648 | public: | |||
2649 | IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(), | |||
2650 | MDNode *FPMathTag = nullptr, | |||
2651 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2652 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles), | |||
2653 | Folder(Folder), Inserter(Inserter) {} | |||
2654 | ||||
2655 | explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr, | |||
2656 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2657 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {} | |||
2658 | ||||
2659 | explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder, | |||
2660 | MDNode *FPMathTag = nullptr, | |||
2661 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2662 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2663 | FPMathTag, OpBundles), Folder(Folder) { | |||
2664 | SetInsertPoint(TheBB); | |||
2665 | } | |||
2666 | ||||
2667 | explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr, | |||
2668 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2669 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2670 | FPMathTag, OpBundles) { | |||
2671 | SetInsertPoint(TheBB); | |||
2672 | } | |||
2673 | ||||
2674 | explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr, | |||
2675 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2676 | : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, | |||
2677 | FPMathTag, OpBundles) { | |||
2678 | SetInsertPoint(IP); | |||
2679 | } | |||
2680 | ||||
2681 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder, | |||
2682 | MDNode *FPMathTag = nullptr, | |||
2683 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2684 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2685 | FPMathTag, OpBundles), Folder(Folder) { | |||
2686 | SetInsertPoint(TheBB, IP); | |||
2687 | } | |||
2688 | ||||
2689 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, | |||
2690 | MDNode *FPMathTag = nullptr, | |||
2691 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2692 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2693 | FPMathTag, OpBundles) { | |||
2694 | SetInsertPoint(TheBB, IP); | |||
2695 | } | |||
2696 | ||||
2697 | /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard | |||
2698 | /// or FastMathFlagGuard instead. | |||
2699 | IRBuilder(const IRBuilder &) = delete; | |||
2700 | ||||
2701 | InserterTy &getInserter() { return Inserter; } | |||
2702 | }; | |||
2703 | ||||
2704 | // Create wrappers for C Binding types (see CBindingWrapping.h). | |||
2705 | DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast <IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef >(const_cast<IRBuilder<>*>(P)); } | |||
2706 | ||||
2707 | } // end namespace llvm | |||
2708 | ||||
2709 | #endif // LLVM_IR_IRBUILDER_H |