Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR/IRBuilder.h
Warning:line 2676, column 23
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LoopUtils.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Utils/LoopUtils.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Utils/LoopUtils.cpp

1//===-- LoopUtils.cpp - Loop Utility functions -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines common loop utility functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Transforms/Utils/LoopUtils.h"
14#include "llvm/ADT/DenseSet.h"
15#include "llvm/ADT/Optional.h"
16#include "llvm/ADT/PriorityWorklist.h"
17#include "llvm/ADT/ScopeExit.h"
18#include "llvm/ADT/SetVector.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/BasicAliasAnalysis.h"
23#include "llvm/Analysis/DomTreeUpdater.h"
24#include "llvm/Analysis/GlobalsModRef.h"
25#include "llvm/Analysis/InstructionSimplify.h"
26#include "llvm/Analysis/LoopAccessAnalysis.h"
27#include "llvm/Analysis/LoopInfo.h"
28#include "llvm/Analysis/LoopPass.h"
29#include "llvm/Analysis/MemorySSA.h"
30#include "llvm/Analysis/MemorySSAUpdater.h"
31#include "llvm/Analysis/MustExecute.h"
32#include "llvm/Analysis/ScalarEvolution.h"
33#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
34#include "llvm/Analysis/ScalarEvolutionExpressions.h"
35#include "llvm/Analysis/TargetTransformInfo.h"
36#include "llvm/Analysis/ValueTracking.h"
37#include "llvm/IR/DIBuilder.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Instructions.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/MDBuilder.h"
42#include "llvm/IR/Module.h"
43#include "llvm/IR/Operator.h"
44#include "llvm/IR/PatternMatch.h"
45#include "llvm/IR/ValueHandle.h"
46#include "llvm/InitializePasses.h"
47#include "llvm/Pass.h"
48#include "llvm/Support/Debug.h"
49#include "llvm/Support/KnownBits.h"
50#include "llvm/Transforms/Utils/BasicBlockUtils.h"
51#include "llvm/Transforms/Utils/Local.h"
52#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
53
54using namespace llvm;
55using namespace llvm::PatternMatch;
56
57#define DEBUG_TYPE"loop-utils" "loop-utils"
58
59static const char *LLVMLoopDisableNonforced = "llvm.loop.disable_nonforced";
60static const char *LLVMLoopDisableLICM = "llvm.licm.disable";
61
62bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
63 MemorySSAUpdater *MSSAU,
64 bool PreserveLCSSA) {
65 bool Changed = false;
66
67 // We re-use a vector for the in-loop predecesosrs.
68 SmallVector<BasicBlock *, 4> InLoopPredecessors;
69
70 auto RewriteExit = [&](BasicBlock *BB) {
71 assert(InLoopPredecessors.empty() &&((void)0)
72 "Must start with an empty predecessors list!")((void)0);
73 auto Cleanup = make_scope_exit([&] { InLoopPredecessors.clear(); });
74
75 // See if there are any non-loop predecessors of this exit block and
76 // keep track of the in-loop predecessors.
77 bool IsDedicatedExit = true;
78 for (auto *PredBB : predecessors(BB))
79 if (L->contains(PredBB)) {
80 if (isa<IndirectBrInst>(PredBB->getTerminator()))
81 // We cannot rewrite exiting edges from an indirectbr.
82 return false;
83 if (isa<CallBrInst>(PredBB->getTerminator()))
84 // We cannot rewrite exiting edges from a callbr.
85 return false;
86
87 InLoopPredecessors.push_back(PredBB);
88 } else {
89 IsDedicatedExit = false;
90 }
91
92 assert(!InLoopPredecessors.empty() && "Must have *some* loop predecessor!")((void)0);
93
94 // Nothing to do if this is already a dedicated exit.
95 if (IsDedicatedExit)
96 return false;
97
98 auto *NewExitBB = SplitBlockPredecessors(
99 BB, InLoopPredecessors, ".loopexit", DT, LI, MSSAU, PreserveLCSSA);
100
101 if (!NewExitBB)
102 LLVM_DEBUG(do { } while (false)
103 dbgs() << "WARNING: Can't create a dedicated exit block for loop: "do { } while (false)
104 << *L << "\n")do { } while (false);
105 else
106 LLVM_DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "do { } while (false)
107 << NewExitBB->getName() << "\n")do { } while (false);
108 return true;
109 };
110
111 // Walk the exit blocks directly rather than building up a data structure for
112 // them, but only visit each one once.
113 SmallPtrSet<BasicBlock *, 4> Visited;
114 for (auto *BB : L->blocks())
115 for (auto *SuccBB : successors(BB)) {
116 // We're looking for exit blocks so skip in-loop successors.
117 if (L->contains(SuccBB))
118 continue;
119
120 // Visit each exit block exactly once.
121 if (!Visited.insert(SuccBB).second)
122 continue;
123
124 Changed |= RewriteExit(SuccBB);
125 }
126
127 return Changed;
128}
129
130/// Returns the instructions that use values defined in the loop.
131SmallVector<Instruction *, 8> llvm::findDefsUsedOutsideOfLoop(Loop *L) {
132 SmallVector<Instruction *, 8> UsedOutside;
133
134 for (auto *Block : L->getBlocks())
135 // FIXME: I believe that this could use copy_if if the Inst reference could
136 // be adapted into a pointer.
137 for (auto &Inst : *Block) {
138 auto Users = Inst.users();
139 if (any_of(Users, [&](User *U) {
140 auto *Use = cast<Instruction>(U);
141 return !L->contains(Use->getParent());
142 }))
143 UsedOutside.push_back(&Inst);
144 }
145
146 return UsedOutside;
147}
148
149void llvm::getLoopAnalysisUsage(AnalysisUsage &AU) {
150 // By definition, all loop passes need the LoopInfo analysis and the
151 // Dominator tree it depends on. Because they all participate in the loop
152 // pass manager, they must also preserve these.
153 AU.addRequired<DominatorTreeWrapperPass>();
154 AU.addPreserved<DominatorTreeWrapperPass>();
155 AU.addRequired<LoopInfoWrapperPass>();
156 AU.addPreserved<LoopInfoWrapperPass>();
157
158 // We must also preserve LoopSimplify and LCSSA. We locally access their IDs
159 // here because users shouldn't directly get them from this header.
160 extern char &LoopSimplifyID;
161 extern char &LCSSAID;
162 AU.addRequiredID(LoopSimplifyID);
163 AU.addPreservedID(LoopSimplifyID);
164 AU.addRequiredID(LCSSAID);
165 AU.addPreservedID(LCSSAID);
166 // This is used in the LPPassManager to perform LCSSA verification on passes
167 // which preserve lcssa form
168 AU.addRequired<LCSSAVerificationPass>();
169 AU.addPreserved<LCSSAVerificationPass>();
170
171 // Loop passes are designed to run inside of a loop pass manager which means
172 // that any function analyses they require must be required by the first loop
173 // pass in the manager (so that it is computed before the loop pass manager
174 // runs) and preserved by all loop pasess in the manager. To make this
175 // reasonably robust, the set needed for most loop passes is maintained here.
176 // If your loop pass requires an analysis not listed here, you will need to
177 // carefully audit the loop pass manager nesting structure that results.
178 AU.addRequired<AAResultsWrapperPass>();
179 AU.addPreserved<AAResultsWrapperPass>();
180 AU.addPreserved<BasicAAWrapperPass>();
181 AU.addPreserved<GlobalsAAWrapperPass>();
182 AU.addPreserved<SCEVAAWrapperPass>();
183 AU.addRequired<ScalarEvolutionWrapperPass>();
184 AU.addPreserved<ScalarEvolutionWrapperPass>();
185 // FIXME: When all loop passes preserve MemorySSA, it can be required and
186 // preserved here instead of the individual handling in each pass.
187}
188
189/// Manually defined generic "LoopPass" dependency initialization. This is used
190/// to initialize the exact set of passes from above in \c
191/// getLoopAnalysisUsage. It can be used within a loop pass's initialization
192/// with:
193///
194/// INITIALIZE_PASS_DEPENDENCY(LoopPass)
195///
196/// As-if "LoopPass" were a pass.
197void llvm::initializeLoopPassPass(PassRegistry &Registry) {
198 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
199 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry);
200 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)initializeLoopSimplifyPass(Registry);
201 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass)initializeLCSSAWrapperPassPass(Registry);
202 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
203 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)initializeBasicAAWrapperPassPass(Registry);
204 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry);
205 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)initializeSCEVAAWrapperPassPass(Registry);
206 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)initializeScalarEvolutionWrapperPassPass(Registry);
207 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
208}
209
210/// Create MDNode for input string.
211static MDNode *createStringMetadata(Loop *TheLoop, StringRef Name, unsigned V) {
212 LLVMContext &Context = TheLoop->getHeader()->getContext();
213 Metadata *MDs[] = {
214 MDString::get(Context, Name),
215 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(Context), V))};
216 return MDNode::get(Context, MDs);
217}
218
219/// Set input string into loop metadata by keeping other values intact.
220/// If the string is already in loop metadata update value if it is
221/// different.
222void llvm::addStringMetadataToLoop(Loop *TheLoop, const char *StringMD,
223 unsigned V) {
224 SmallVector<Metadata *, 4> MDs(1);
225 // If the loop already has metadata, retain it.
226 MDNode *LoopID = TheLoop->getLoopID();
227 if (LoopID) {
228 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
229 MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
230 // If it is of form key = value, try to parse it.
231 if (Node->getNumOperands() == 2) {
232 MDString *S = dyn_cast<MDString>(Node->getOperand(0));
233 if (S && S->getString().equals(StringMD)) {
234 ConstantInt *IntMD =
235 mdconst::extract_or_null<ConstantInt>(Node->getOperand(1));
236 if (IntMD && IntMD->getSExtValue() == V)
237 // It is already in place. Do nothing.
238 return;
239 // We need to update the value, so just skip it here and it will
240 // be added after copying other existed nodes.
241 continue;
242 }
243 }
244 MDs.push_back(Node);
245 }
246 }
247 // Add new metadata.
248 MDs.push_back(createStringMetadata(TheLoop, StringMD, V));
249 // Replace current metadata node with new one.
250 LLVMContext &Context = TheLoop->getHeader()->getContext();
251 MDNode *NewLoopID = MDNode::get(Context, MDs);
252 // Set operand 0 to refer to the loop id itself.
253 NewLoopID->replaceOperandWith(0, NewLoopID);
254 TheLoop->setLoopID(NewLoopID);
255}
256
257Optional<ElementCount>
258llvm::getOptionalElementCountLoopAttribute(const Loop *TheLoop) {
259 Optional<int> Width =
260 getOptionalIntLoopAttribute(TheLoop, "llvm.loop.vectorize.width");
261
262 if (Width.hasValue()) {
263 Optional<int> IsScalable = getOptionalIntLoopAttribute(
264 TheLoop, "llvm.loop.vectorize.scalable.enable");
265 return ElementCount::get(*Width, IsScalable.getValueOr(false));
266 }
267
268 return None;
269}
270
271Optional<MDNode *> llvm::makeFollowupLoopID(
272 MDNode *OrigLoopID, ArrayRef<StringRef> FollowupOptions,
273 const char *InheritOptionsExceptPrefix, bool AlwaysNew) {
274 if (!OrigLoopID) {
275 if (AlwaysNew)
276 return nullptr;
277 return None;
278 }
279
280 assert(OrigLoopID->getOperand(0) == OrigLoopID)((void)0);
281
282 bool InheritAllAttrs = !InheritOptionsExceptPrefix;
283 bool InheritSomeAttrs =
284 InheritOptionsExceptPrefix && InheritOptionsExceptPrefix[0] != '\0';
285 SmallVector<Metadata *, 8> MDs;
286 MDs.push_back(nullptr);
287
288 bool Changed = false;
289 if (InheritAllAttrs || InheritSomeAttrs) {
290 for (const MDOperand &Existing : drop_begin(OrigLoopID->operands())) {
291 MDNode *Op = cast<MDNode>(Existing.get());
292
293 auto InheritThisAttribute = [InheritSomeAttrs,
294 InheritOptionsExceptPrefix](MDNode *Op) {
295 if (!InheritSomeAttrs)
296 return false;
297
298 // Skip malformatted attribute metadata nodes.
299 if (Op->getNumOperands() == 0)
300 return true;
301 Metadata *NameMD = Op->getOperand(0).get();
302 if (!isa<MDString>(NameMD))
303 return true;
304 StringRef AttrName = cast<MDString>(NameMD)->getString();
305
306 // Do not inherit excluded attributes.
307 return !AttrName.startswith(InheritOptionsExceptPrefix);
308 };
309
310 if (InheritThisAttribute(Op))
311 MDs.push_back(Op);
312 else
313 Changed = true;
314 }
315 } else {
316 // Modified if we dropped at least one attribute.
317 Changed = OrigLoopID->getNumOperands() > 1;
318 }
319
320 bool HasAnyFollowup = false;
321 for (StringRef OptionName : FollowupOptions) {
322 MDNode *FollowupNode = findOptionMDForLoopID(OrigLoopID, OptionName);
323 if (!FollowupNode)
324 continue;
325
326 HasAnyFollowup = true;
327 for (const MDOperand &Option : drop_begin(FollowupNode->operands())) {
328 MDs.push_back(Option.get());
329 Changed = true;
330 }
331 }
332
333 // Attributes of the followup loop not specified explicity, so signal to the
334 // transformation pass to add suitable attributes.
335 if (!AlwaysNew && !HasAnyFollowup)
336 return None;
337
338 // If no attributes were added or remove, the previous loop Id can be reused.
339 if (!AlwaysNew && !Changed)
340 return OrigLoopID;
341
342 // No attributes is equivalent to having no !llvm.loop metadata at all.
343 if (MDs.size() == 1)
344 return nullptr;
345
346 // Build the new loop ID.
347 MDTuple *FollowupLoopID = MDNode::get(OrigLoopID->getContext(), MDs);
348 FollowupLoopID->replaceOperandWith(0, FollowupLoopID);
349 return FollowupLoopID;
350}
351
352bool llvm::hasDisableAllTransformsHint(const Loop *L) {
353 return getBooleanLoopAttribute(L, LLVMLoopDisableNonforced);
354}
355
356bool llvm::hasDisableLICMTransformsHint(const Loop *L) {
357 return getBooleanLoopAttribute(L, LLVMLoopDisableLICM);
358}
359
360TransformationMode llvm::hasUnrollTransformation(const Loop *L) {
361 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.disable"))
362 return TM_SuppressedByUser;
363
364 Optional<int> Count =
365 getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count");
366 if (Count.hasValue())
367 return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
368
369 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable"))
370 return TM_ForcedByUser;
371
372 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.full"))
373 return TM_ForcedByUser;
374
375 if (hasDisableAllTransformsHint(L))
376 return TM_Disable;
377
378 return TM_Unspecified;
379}
380
381TransformationMode llvm::hasUnrollAndJamTransformation(const Loop *L) {
382 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.disable"))
383 return TM_SuppressedByUser;
384
385 Optional<int> Count =
386 getOptionalIntLoopAttribute(L, "llvm.loop.unroll_and_jam.count");
387 if (Count.hasValue())
388 return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
389
390 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.enable"))
391 return TM_ForcedByUser;
392
393 if (hasDisableAllTransformsHint(L))
394 return TM_Disable;
395
396 return TM_Unspecified;
397}
398
399TransformationMode llvm::hasVectorizeTransformation(const Loop *L) {
400 Optional<bool> Enable =
401 getOptionalBoolLoopAttribute(L, "llvm.loop.vectorize.enable");
402
403 if (Enable == false)
404 return TM_SuppressedByUser;
405
406 Optional<ElementCount> VectorizeWidth =
407 getOptionalElementCountLoopAttribute(L);
408 Optional<int> InterleaveCount =
409 getOptionalIntLoopAttribute(L, "llvm.loop.interleave.count");
410
411 // 'Forcing' vector width and interleave count to one effectively disables
412 // this tranformation.
413 if (Enable == true && VectorizeWidth && VectorizeWidth->isScalar() &&
414 InterleaveCount == 1)
415 return TM_SuppressedByUser;
416
417 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
418 return TM_Disable;
419
420 if (Enable == true)
421 return TM_ForcedByUser;
422
423 if ((VectorizeWidth && VectorizeWidth->isScalar()) && InterleaveCount == 1)
424 return TM_Disable;
425
426 if ((VectorizeWidth && VectorizeWidth->isVector()) || InterleaveCount > 1)
427 return TM_Enable;
428
429 if (hasDisableAllTransformsHint(L))
430 return TM_Disable;
431
432 return TM_Unspecified;
433}
434
435TransformationMode llvm::hasDistributeTransformation(const Loop *L) {
436 if (getBooleanLoopAttribute(L, "llvm.loop.distribute.enable"))
437 return TM_ForcedByUser;
438
439 if (hasDisableAllTransformsHint(L))
440 return TM_Disable;
441
442 return TM_Unspecified;
443}
444
445TransformationMode llvm::hasLICMVersioningTransformation(const Loop *L) {
446 if (getBooleanLoopAttribute(L, "llvm.loop.licm_versioning.disable"))
447 return TM_SuppressedByUser;
448
449 if (hasDisableAllTransformsHint(L))
450 return TM_Disable;
451
452 return TM_Unspecified;
453}
454
455/// Does a BFS from a given node to all of its children inside a given loop.
456/// The returned vector of nodes includes the starting point.
457SmallVector<DomTreeNode *, 16>
458llvm::collectChildrenInLoop(DomTreeNode *N, const Loop *CurLoop) {
459 SmallVector<DomTreeNode *, 16> Worklist;
460 auto AddRegionToWorklist = [&](DomTreeNode *DTN) {
461 // Only include subregions in the top level loop.
462 BasicBlock *BB = DTN->getBlock();
463 if (CurLoop->contains(BB))
464 Worklist.push_back(DTN);
465 };
466
467 AddRegionToWorklist(N);
468
469 for (size_t I = 0; I < Worklist.size(); I++) {
470 for (DomTreeNode *Child : Worklist[I]->children())
471 AddRegionToWorklist(Child);
472 }
473
474 return Worklist;
475}
476
477void llvm::deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
478 LoopInfo *LI, MemorySSA *MSSA) {
479 assert((!DT || L->isLCSSAForm(*DT)) && "Expected LCSSA!")((void)0);
480 auto *Preheader = L->getLoopPreheader();
481 assert(Preheader && "Preheader should exist!")((void)0);
482
483 std::unique_ptr<MemorySSAUpdater> MSSAU;
484 if (MSSA)
1
Assuming 'MSSA' is null
2
Taking false branch
485 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
486
487 // Now that we know the removal is safe, remove the loop by changing the
488 // branch from the preheader to go to the single exit block.
489 //
490 // Because we're deleting a large chunk of code at once, the sequence in which
491 // we remove things is very important to avoid invalidation issues.
492
493 // Tell ScalarEvolution that the loop is deleted. Do this before
494 // deleting the loop so that ScalarEvolution can look at the loop
495 // to determine what it needs to clean up.
496 if (SE)
3
Assuming 'SE' is null
4
Taking false branch
497 SE->forgetLoop(L);
498
499 auto *OldBr = dyn_cast<BranchInst>(Preheader->getTerminator());
5
Assuming the object is not a 'BranchInst'
6
'OldBr' initialized to a null pointer value
500 assert(OldBr && "Preheader must end with a branch")((void)0);
501 assert(OldBr->isUnconditional() && "Preheader must have a single successor")((void)0);
502 // Connect the preheader to the exit block. Keep the old edge to the header
503 // around to perform the dominator tree update in two separate steps
504 // -- #1 insertion of the edge preheader -> exit and #2 deletion of the edge
505 // preheader -> header.
506 //
507 //
508 // 0. Preheader 1. Preheader 2. Preheader
509 // | | | |
510 // V | V |
511 // Header <--\ | Header <--\ | Header <--\
512 // | | | | | | | | | | |
513 // | V | | | V | | | V |
514 // | Body --/ | | Body --/ | | Body --/
515 // V V V V V
516 // Exit Exit Exit
517 //
518 // By doing this is two separate steps we can perform the dominator tree
519 // update without using the batch update API.
520 //
521 // Even when the loop is never executed, we cannot remove the edge from the
522 // source block to the exit block. Consider the case where the unexecuted loop
523 // branches back to an outer loop. If we deleted the loop and removed the edge
524 // coming to this inner loop, this will break the outer loop structure (by
525 // deleting the backedge of the outer loop). If the outer loop is indeed a
526 // non-loop, it will be deleted in a future iteration of loop deletion pass.
527 IRBuilder<> Builder(OldBr);
7
Passing null pointer value via 1st parameter 'IP'
8
Calling constructor for 'IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>'
528
529 auto *ExitBlock = L->getUniqueExitBlock();
530 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
531 if (ExitBlock) {
532 assert(ExitBlock && "Should have a unique exit block!")((void)0);
533 assert(L->hasDedicatedExits() && "Loop should have dedicated exits!")((void)0);
534
535 Builder.CreateCondBr(Builder.getFalse(), L->getHeader(), ExitBlock);
536 // Remove the old branch. The conditional branch becomes a new terminator.
537 OldBr->eraseFromParent();
538
539 // Rewrite phis in the exit block to get their inputs from the Preheader
540 // instead of the exiting block.
541 for (PHINode &P : ExitBlock->phis()) {
542 // Set the zero'th element of Phi to be from the preheader and remove all
543 // other incoming values. Given the loop has dedicated exits, all other
544 // incoming values must be from the exiting blocks.
545 int PredIndex = 0;
546 P.setIncomingBlock(PredIndex, Preheader);
547 // Removes all incoming values from all other exiting blocks (including
548 // duplicate values from an exiting block).
549 // Nuke all entries except the zero'th entry which is the preheader entry.
550 // NOTE! We need to remove Incoming Values in the reverse order as done
551 // below, to keep the indices valid for deletion (removeIncomingValues
552 // updates getNumIncomingValues and shifts all values down into the
553 // operand being deleted).
554 for (unsigned i = 0, e = P.getNumIncomingValues() - 1; i != e; ++i)
555 P.removeIncomingValue(e - i, false);
556
557 assert((P.getNumIncomingValues() == 1 &&((void)0)
558 P.getIncomingBlock(PredIndex) == Preheader) &&((void)0)
559 "Should have exactly one value and that's from the preheader!")((void)0);
560 }
561
562 if (DT) {
563 DTU.applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}});
564 if (MSSA) {
565 MSSAU->applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}},
566 *DT);
567 if (VerifyMemorySSA)
568 MSSA->verifyMemorySSA();
569 }
570 }
571
572 // Disconnect the loop body by branching directly to its exit.
573 Builder.SetInsertPoint(Preheader->getTerminator());
574 Builder.CreateBr(ExitBlock);
575 // Remove the old branch.
576 Preheader->getTerminator()->eraseFromParent();
577 } else {
578 assert(L->hasNoExitBlocks() &&((void)0)
579 "Loop should have either zero or one exit blocks.")((void)0);
580
581 Builder.SetInsertPoint(OldBr);
582 Builder.CreateUnreachable();
583 Preheader->getTerminator()->eraseFromParent();
584 }
585
586 if (DT) {
587 DTU.applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}});
588 if (MSSA) {
589 MSSAU->applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}},
590 *DT);
591 SmallSetVector<BasicBlock *, 8> DeadBlockSet(L->block_begin(),
592 L->block_end());
593 MSSAU->removeBlocks(DeadBlockSet);
594 if (VerifyMemorySSA)
595 MSSA->verifyMemorySSA();
596 }
597 }
598
599 // Use a map to unique and a vector to guarantee deterministic ordering.
600 llvm::SmallDenseSet<std::pair<DIVariable *, DIExpression *>, 4> DeadDebugSet;
601 llvm::SmallVector<DbgVariableIntrinsic *, 4> DeadDebugInst;
602
603 if (ExitBlock) {
604 // Given LCSSA form is satisfied, we should not have users of instructions
605 // within the dead loop outside of the loop. However, LCSSA doesn't take
606 // unreachable uses into account. We handle them here.
607 // We could do it after drop all references (in this case all users in the
608 // loop will be already eliminated and we have less work to do but according
609 // to API doc of User::dropAllReferences only valid operation after dropping
610 // references, is deletion. So let's substitute all usages of
611 // instruction from the loop with undef value of corresponding type first.
612 for (auto *Block : L->blocks())
613 for (Instruction &I : *Block) {
614 auto *Undef = UndefValue::get(I.getType());
615 for (Value::use_iterator UI = I.use_begin(), E = I.use_end();
616 UI != E;) {
617 Use &U = *UI;
618 ++UI;
619 if (auto *Usr = dyn_cast<Instruction>(U.getUser()))
620 if (L->contains(Usr->getParent()))
621 continue;
622 // If we have a DT then we can check that uses outside a loop only in
623 // unreachable block.
624 if (DT)
625 assert(!DT->isReachableFromEntry(U) &&((void)0)
626 "Unexpected user in reachable block")((void)0);
627 U.set(Undef);
628 }
629 auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I);
630 if (!DVI)
631 continue;
632 auto Key =
633 DeadDebugSet.find({DVI->getVariable(), DVI->getExpression()});
634 if (Key != DeadDebugSet.end())
635 continue;
636 DeadDebugSet.insert({DVI->getVariable(), DVI->getExpression()});
637 DeadDebugInst.push_back(DVI);
638 }
639
640 // After the loop has been deleted all the values defined and modified
641 // inside the loop are going to be unavailable.
642 // Since debug values in the loop have been deleted, inserting an undef
643 // dbg.value truncates the range of any dbg.value before the loop where the
644 // loop used to be. This is particularly important for constant values.
645 DIBuilder DIB(*ExitBlock->getModule());
646 Instruction *InsertDbgValueBefore = ExitBlock->getFirstNonPHI();
647 assert(InsertDbgValueBefore &&((void)0)
648 "There should be a non-PHI instruction in exit block, else these "((void)0)
649 "instructions will have no parent.")((void)0);
650 for (auto *DVI : DeadDebugInst)
651 DIB.insertDbgValueIntrinsic(UndefValue::get(Builder.getInt32Ty()),
652 DVI->getVariable(), DVI->getExpression(),
653 DVI->getDebugLoc(), InsertDbgValueBefore);
654 }
655
656 // Remove the block from the reference counting scheme, so that we can
657 // delete it freely later.
658 for (auto *Block : L->blocks())
659 Block->dropAllReferences();
660
661 if (MSSA && VerifyMemorySSA)
662 MSSA->verifyMemorySSA();
663
664 if (LI) {
665 // Erase the instructions and the blocks without having to worry
666 // about ordering because we already dropped the references.
667 // NOTE: This iteration is safe because erasing the block does not remove
668 // its entry from the loop's block list. We do that in the next section.
669 for (Loop::block_iterator LpI = L->block_begin(), LpE = L->block_end();
670 LpI != LpE; ++LpI)
671 (*LpI)->eraseFromParent();
672
673 // Finally, the blocks from loopinfo. This has to happen late because
674 // otherwise our loop iterators won't work.
675
676 SmallPtrSet<BasicBlock *, 8> blocks;
677 blocks.insert(L->block_begin(), L->block_end());
678 for (BasicBlock *BB : blocks)
679 LI->removeBlock(BB);
680
681 // The last step is to update LoopInfo now that we've eliminated this loop.
682 // Note: LoopInfo::erase remove the given loop and relink its subloops with
683 // its parent. While removeLoop/removeChildLoop remove the given loop but
684 // not relink its subloops, which is what we want.
685 if (Loop *ParentLoop = L->getParentLoop()) {
686 Loop::iterator I = find(*ParentLoop, L);
687 assert(I != ParentLoop->end() && "Couldn't find loop")((void)0);
688 ParentLoop->removeChildLoop(I);
689 } else {
690 Loop::iterator I = find(*LI, L);
691 assert(I != LI->end() && "Couldn't find loop")((void)0);
692 LI->removeLoop(I);
693 }
694 LI->destroy(L);
695 }
696}
697
698static Loop *getOutermostLoop(Loop *L) {
699 while (Loop *Parent = L->getParentLoop())
700 L = Parent;
701 return L;
702}
703
704void llvm::breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
705 LoopInfo &LI, MemorySSA *MSSA) {
706 auto *Latch = L->getLoopLatch();
707 assert(Latch && "multiple latches not yet supported")((void)0);
708 auto *Header = L->getHeader();
709 Loop *OutermostLoop = getOutermostLoop(L);
710
711 SE.forgetLoop(L);
712
713 // Note: By splitting the backedge, and then explicitly making it unreachable
714 // we gracefully handle corner cases such as non-bottom tested loops and the
715 // like. We also have the benefit of being able to reuse existing well tested
716 // code. It might be worth special casing the common bottom tested case at
717 // some point to avoid code churn.
718
719 std::unique_ptr<MemorySSAUpdater> MSSAU;
720 if (MSSA)
721 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
722
723 auto *BackedgeBB = SplitEdge(Latch, Header, &DT, &LI, MSSAU.get());
724
725 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Eager);
726 (void)changeToUnreachable(BackedgeBB->getTerminator(),
727 /*PreserveLCSSA*/ true, &DTU, MSSAU.get());
728
729 // Erase (and destroy) this loop instance. Handles relinking sub-loops
730 // and blocks within the loop as needed.
731 LI.erase(L);
732
733 // If the loop we broke had a parent, then changeToUnreachable might have
734 // caused a block to be removed from the parent loop (see loop_nest_lcssa
735 // test case in zero-btc.ll for an example), thus changing the parent's
736 // exit blocks. If that happened, we need to rebuild LCSSA on the outermost
737 // loop which might have a had a block removed.
738 if (OutermostLoop != L)
739 formLCSSARecursively(*OutermostLoop, DT, &LI, &SE);
740}
741
742
743/// Checks if \p L has single exit through latch block except possibly
744/// "deoptimizing" exits. Returns branch instruction terminating the loop
745/// latch if above check is successful, nullptr otherwise.
746static BranchInst *getExpectedExitLoopLatchBranch(Loop *L) {
747 BasicBlock *Latch = L->getLoopLatch();
748 if (!Latch)
749 return nullptr;
750
751 BranchInst *LatchBR = dyn_cast<BranchInst>(Latch->getTerminator());
752 if (!LatchBR || LatchBR->getNumSuccessors() != 2 || !L->isLoopExiting(Latch))
753 return nullptr;
754
755 assert((LatchBR->getSuccessor(0) == L->getHeader() ||((void)0)
756 LatchBR->getSuccessor(1) == L->getHeader()) &&((void)0)
757 "At least one edge out of the latch must go to the header")((void)0);
758
759 SmallVector<BasicBlock *, 4> ExitBlocks;
760 L->getUniqueNonLatchExitBlocks(ExitBlocks);
761 if (any_of(ExitBlocks, [](const BasicBlock *EB) {
762 return !EB->getTerminatingDeoptimizeCall();
763 }))
764 return nullptr;
765
766 return LatchBR;
767}
768
769Optional<unsigned>
770llvm::getLoopEstimatedTripCount(Loop *L,
771 unsigned *EstimatedLoopInvocationWeight) {
772 // Support loops with an exiting latch and other existing exists only
773 // deoptimize.
774 BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L);
775 if (!LatchBranch)
776 return None;
777
778 // To estimate the number of times the loop body was executed, we want to
779 // know the number of times the backedge was taken, vs. the number of times
780 // we exited the loop.
781 uint64_t BackedgeTakenWeight, LatchExitWeight;
782 if (!LatchBranch->extractProfMetadata(BackedgeTakenWeight, LatchExitWeight))
783 return None;
784
785 if (LatchBranch->getSuccessor(0) != L->getHeader())
786 std::swap(BackedgeTakenWeight, LatchExitWeight);
787
788 if (!LatchExitWeight)
789 return None;
790
791 if (EstimatedLoopInvocationWeight)
792 *EstimatedLoopInvocationWeight = LatchExitWeight;
793
794 // Estimated backedge taken count is a ratio of the backedge taken weight by
795 // the weight of the edge exiting the loop, rounded to nearest.
796 uint64_t BackedgeTakenCount =
797 llvm::divideNearest(BackedgeTakenWeight, LatchExitWeight);
798 // Estimated trip count is one plus estimated backedge taken count.
799 return BackedgeTakenCount + 1;
800}
801
802bool llvm::setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount,
803 unsigned EstimatedloopInvocationWeight) {
804 // Support loops with an exiting latch and other existing exists only
805 // deoptimize.
806 BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L);
807 if (!LatchBranch)
808 return false;
809
810 // Calculate taken and exit weights.
811 unsigned LatchExitWeight = 0;
812 unsigned BackedgeTakenWeight = 0;
813
814 if (EstimatedTripCount > 0) {
815 LatchExitWeight = EstimatedloopInvocationWeight;
816 BackedgeTakenWeight = (EstimatedTripCount - 1) * LatchExitWeight;
817 }
818
819 // Make a swap if back edge is taken when condition is "false".
820 if (LatchBranch->getSuccessor(0) != L->getHeader())
821 std::swap(BackedgeTakenWeight, LatchExitWeight);
822
823 MDBuilder MDB(LatchBranch->getContext());
824
825 // Set/Update profile metadata.
826 LatchBranch->setMetadata(
827 LLVMContext::MD_prof,
828 MDB.createBranchWeights(BackedgeTakenWeight, LatchExitWeight));
829
830 return true;
831}
832
833bool llvm::hasIterationCountInvariantInParent(Loop *InnerLoop,
834 ScalarEvolution &SE) {
835 Loop *OuterL = InnerLoop->getParentLoop();
836 if (!OuterL)
837 return true;
838
839 // Get the backedge taken count for the inner loop
840 BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
841 const SCEV *InnerLoopBECountSC = SE.getExitCount(InnerLoop, InnerLoopLatch);
842 if (isa<SCEVCouldNotCompute>(InnerLoopBECountSC) ||
843 !InnerLoopBECountSC->getType()->isIntegerTy())
844 return false;
845
846 // Get whether count is invariant to the outer loop
847 ScalarEvolution::LoopDisposition LD =
848 SE.getLoopDisposition(InnerLoopBECountSC, OuterL);
849 if (LD != ScalarEvolution::LoopInvariant)
850 return false;
851
852 return true;
853}
854
855Value *llvm::createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
856 Value *Right) {
857 CmpInst::Predicate Pred;
858 switch (RK) {
859 default:
860 llvm_unreachable("Unknown min/max recurrence kind")__builtin_unreachable();
861 case RecurKind::UMin:
862 Pred = CmpInst::ICMP_ULT;
863 break;
864 case RecurKind::UMax:
865 Pred = CmpInst::ICMP_UGT;
866 break;
867 case RecurKind::SMin:
868 Pred = CmpInst::ICMP_SLT;
869 break;
870 case RecurKind::SMax:
871 Pred = CmpInst::ICMP_SGT;
872 break;
873 case RecurKind::FMin:
874 Pred = CmpInst::FCMP_OLT;
875 break;
876 case RecurKind::FMax:
877 Pred = CmpInst::FCMP_OGT;
878 break;
879 }
880
881 Value *Cmp = Builder.CreateCmp(Pred, Left, Right, "rdx.minmax.cmp");
882 Value *Select = Builder.CreateSelect(Cmp, Left, Right, "rdx.minmax.select");
883 return Select;
884}
885
886// Helper to generate an ordered reduction.
887Value *llvm::getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
888 unsigned Op, RecurKind RdxKind,
889 ArrayRef<Value *> RedOps) {
890 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements();
891
892 // Extract and apply reduction ops in ascending order:
893 // e.g. ((((Acc + Scl[0]) + Scl[1]) + Scl[2]) + ) ... + Scl[VF-1]
894 Value *Result = Acc;
895 for (unsigned ExtractIdx = 0; ExtractIdx != VF; ++ExtractIdx) {
896 Value *Ext =
897 Builder.CreateExtractElement(Src, Builder.getInt32(ExtractIdx));
898
899 if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
900 Result = Builder.CreateBinOp((Instruction::BinaryOps)Op, Result, Ext,
901 "bin.rdx");
902 } else {
903 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) &&((void)0)
904 "Invalid min/max")((void)0);
905 Result = createMinMaxOp(Builder, RdxKind, Result, Ext);
906 }
907
908 if (!RedOps.empty())
909 propagateIRFlags(Result, RedOps);
910 }
911
912 return Result;
913}
914
915// Helper to generate a log2 shuffle reduction.
916Value *llvm::getShuffleReduction(IRBuilderBase &Builder, Value *Src,
917 unsigned Op, RecurKind RdxKind,
918 ArrayRef<Value *> RedOps) {
919 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements();
920 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles
921 // and vector ops, reducing the set of values being computed by half each
922 // round.
923 assert(isPowerOf2_32(VF) &&((void)0)
924 "Reduction emission only supported for pow2 vectors!")((void)0);
925 Value *TmpVec = Src;
926 SmallVector<int, 32> ShuffleMask(VF);
927 for (unsigned i = VF; i != 1; i >>= 1) {
928 // Move the upper half of the vector to the lower half.
929 for (unsigned j = 0; j != i / 2; ++j)
930 ShuffleMask[j] = i / 2 + j;
931
932 // Fill the rest of the mask with undef.
933 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), -1);
934
935 Value *Shuf = Builder.CreateShuffleVector(TmpVec, ShuffleMask, "rdx.shuf");
936
937 if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
938 // The builder propagates its fast-math-flags setting.
939 TmpVec = Builder.CreateBinOp((Instruction::BinaryOps)Op, TmpVec, Shuf,
940 "bin.rdx");
941 } else {
942 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) &&((void)0)
943 "Invalid min/max")((void)0);
944 TmpVec = createMinMaxOp(Builder, RdxKind, TmpVec, Shuf);
945 }
946 if (!RedOps.empty())
947 propagateIRFlags(TmpVec, RedOps);
948
949 // We may compute the reassociated scalar ops in a way that does not
950 // preserve nsw/nuw etc. Conservatively, drop those flags.
951 if (auto *ReductionInst = dyn_cast<Instruction>(TmpVec))
952 ReductionInst->dropPoisonGeneratingFlags();
953 }
954 // The result is in the first element of the vector.
955 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
956}
957
958Value *llvm::createSimpleTargetReduction(IRBuilderBase &Builder,
959 const TargetTransformInfo *TTI,
960 Value *Src, RecurKind RdxKind,
961 ArrayRef<Value *> RedOps) {
962 TargetTransformInfo::ReductionFlags RdxFlags;
963 RdxFlags.IsMaxOp = RdxKind == RecurKind::SMax || RdxKind == RecurKind::UMax ||
964 RdxKind == RecurKind::FMax;
965 RdxFlags.IsSigned = RdxKind == RecurKind::SMax || RdxKind == RecurKind::SMin;
966
967 auto *SrcVecEltTy = cast<VectorType>(Src->getType())->getElementType();
968 switch (RdxKind) {
969 case RecurKind::Add:
970 return Builder.CreateAddReduce(Src);
971 case RecurKind::Mul:
972 return Builder.CreateMulReduce(Src);
973 case RecurKind::And:
974 return Builder.CreateAndReduce(Src);
975 case RecurKind::Or:
976 return Builder.CreateOrReduce(Src);
977 case RecurKind::Xor:
978 return Builder.CreateXorReduce(Src);
979 case RecurKind::FAdd:
980 return Builder.CreateFAddReduce(ConstantFP::getNegativeZero(SrcVecEltTy),
981 Src);
982 case RecurKind::FMul:
983 return Builder.CreateFMulReduce(ConstantFP::get(SrcVecEltTy, 1.0), Src);
984 case RecurKind::SMax:
985 return Builder.CreateIntMaxReduce(Src, true);
986 case RecurKind::SMin:
987 return Builder.CreateIntMinReduce(Src, true);
988 case RecurKind::UMax:
989 return Builder.CreateIntMaxReduce(Src, false);
990 case RecurKind::UMin:
991 return Builder.CreateIntMinReduce(Src, false);
992 case RecurKind::FMax:
993 return Builder.CreateFPMaxReduce(Src);
994 case RecurKind::FMin:
995 return Builder.CreateFPMinReduce(Src);
996 default:
997 llvm_unreachable("Unhandled opcode")__builtin_unreachable();
998 }
999}
1000
1001Value *llvm::createTargetReduction(IRBuilderBase &B,
1002 const TargetTransformInfo *TTI,
1003 const RecurrenceDescriptor &Desc,
1004 Value *Src) {
1005 // TODO: Support in-order reductions based on the recurrence descriptor.
1006 // All ops in the reduction inherit fast-math-flags from the recurrence
1007 // descriptor.
1008 IRBuilderBase::FastMathFlagGuard FMFGuard(B);
1009 B.setFastMathFlags(Desc.getFastMathFlags());
1010 return createSimpleTargetReduction(B, TTI, Src, Desc.getRecurrenceKind());
1011}
1012
1013Value *llvm::createOrderedReduction(IRBuilderBase &B,
1014 const RecurrenceDescriptor &Desc,
1015 Value *Src, Value *Start) {
1016 assert(Desc.getRecurrenceKind() == RecurKind::FAdd &&((void)0)
1017 "Unexpected reduction kind")((void)0);
1018 assert(Src->getType()->isVectorTy() && "Expected a vector type")((void)0);
1019 assert(!Start->getType()->isVectorTy() && "Expected a scalar type")((void)0);
1020
1021 return B.CreateFAddReduce(Start, Src);
1022}
1023
1024void llvm::propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue) {
1025 auto *VecOp = dyn_cast<Instruction>(I);
1026 if (!VecOp)
1027 return;
1028 auto *Intersection = (OpValue == nullptr) ? dyn_cast<Instruction>(VL[0])
1029 : dyn_cast<Instruction>(OpValue);
1030 if (!Intersection)
1031 return;
1032 const unsigned Opcode = Intersection->getOpcode();
1033 VecOp->copyIRFlags(Intersection);
1034 for (auto *V : VL) {
1035 auto *Instr = dyn_cast<Instruction>(V);
1036 if (!Instr)
1037 continue;
1038 if (OpValue == nullptr || Opcode == Instr->getOpcode())
1039 VecOp->andIRFlags(V);
1040 }
1041}
1042
1043bool llvm::isKnownNegativeInLoop(const SCEV *S, const Loop *L,
1044 ScalarEvolution &SE) {
1045 const SCEV *Zero = SE.getZero(S->getType());
1046 return SE.isAvailableAtLoopEntry(S, L) &&
1047 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, S, Zero);
1048}
1049
1050bool llvm::isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
1051 ScalarEvolution &SE) {
1052 const SCEV *Zero = SE.getZero(S->getType());
1053 return SE.isAvailableAtLoopEntry(S, L) &&
1054 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGE, S, Zero);
1055}
1056
1057bool llvm::cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
1058 bool Signed) {
1059 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
1060 APInt Min = Signed ? APInt::getSignedMinValue(BitWidth) :
1061 APInt::getMinValue(BitWidth);
1062 auto Predicate = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1063 return SE.isAvailableAtLoopEntry(S, L) &&
1064 SE.isLoopEntryGuardedByCond(L, Predicate, S,
1065 SE.getConstant(Min));
1066}
1067
1068bool llvm::cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
1069 bool Signed) {
1070 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
1071 APInt Max = Signed ? APInt::getSignedMaxValue(BitWidth) :
1072 APInt::getMaxValue(BitWidth);
1073 auto Predicate = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1074 return SE.isAvailableAtLoopEntry(S, L) &&
1075 SE.isLoopEntryGuardedByCond(L, Predicate, S,
1076 SE.getConstant(Max));
1077}
1078
1079//===----------------------------------------------------------------------===//
1080// rewriteLoopExitValues - Optimize IV users outside the loop.
1081// As a side effect, reduces the amount of IV processing within the loop.
1082//===----------------------------------------------------------------------===//
1083
1084// Return true if the SCEV expansion generated by the rewriter can replace the
1085// original value. SCEV guarantees that it produces the same value, but the way
1086// it is produced may be illegal IR. Ideally, this function will only be
1087// called for verification.
1088static bool isValidRewrite(ScalarEvolution *SE, Value *FromVal, Value *ToVal) {
1089 // If an SCEV expression subsumed multiple pointers, its expansion could
1090 // reassociate the GEP changing the base pointer. This is illegal because the
1091 // final address produced by a GEP chain must be inbounds relative to its
1092 // underlying object. Otherwise basic alias analysis, among other things,
1093 // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid
1094 // producing an expression involving multiple pointers. Until then, we must
1095 // bail out here.
1096 //
1097 // Retrieve the pointer operand of the GEP. Don't use getUnderlyingObject
1098 // because it understands lcssa phis while SCEV does not.
1099 Value *FromPtr = FromVal;
1100 Value *ToPtr = ToVal;
1101 if (auto *GEP = dyn_cast<GEPOperator>(FromVal))
1102 FromPtr = GEP->getPointerOperand();
1103
1104 if (auto *GEP = dyn_cast<GEPOperator>(ToVal))
1105 ToPtr = GEP->getPointerOperand();
1106
1107 if (FromPtr != FromVal || ToPtr != ToVal) {
1108 // Quickly check the common case
1109 if (FromPtr == ToPtr)
1110 return true;
1111
1112 // SCEV may have rewritten an expression that produces the GEP's pointer
1113 // operand. That's ok as long as the pointer operand has the same base
1114 // pointer. Unlike getUnderlyingObject(), getPointerBase() will find the
1115 // base of a recurrence. This handles the case in which SCEV expansion
1116 // converts a pointer type recurrence into a nonrecurrent pointer base
1117 // indexed by an integer recurrence.
1118
1119 // If the GEP base pointer is a vector of pointers, abort.
1120 if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy())
1121 return false;
1122
1123 const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr));
1124 const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr));
1125 if (FromBase == ToBase)
1126 return true;
1127
1128 LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: GEP rewrite bail out "do { } while (false)
1129 << *FromBase << " != " << *ToBase << "\n")do { } while (false);
1130
1131 return false;
1132 }
1133 return true;
1134}
1135
1136static bool hasHardUserWithinLoop(const Loop *L, const Instruction *I) {
1137 SmallPtrSet<const Instruction *, 8> Visited;
1138 SmallVector<const Instruction *, 8> WorkList;
1139 Visited.insert(I);
1140 WorkList.push_back(I);
1141 while (!WorkList.empty()) {
1142 const Instruction *Curr = WorkList.pop_back_val();
1143 // This use is outside the loop, nothing to do.
1144 if (!L->contains(Curr))
1145 continue;
1146 // Do we assume it is a "hard" use which will not be eliminated easily?
1147 if (Curr->mayHaveSideEffects())
1148 return true;
1149 // Otherwise, add all its users to worklist.
1150 for (auto U : Curr->users()) {
1151 auto *UI = cast<Instruction>(U);
1152 if (Visited.insert(UI).second)
1153 WorkList.push_back(UI);
1154 }
1155 }
1156 return false;
1157}
1158
1159// Collect information about PHI nodes which can be transformed in
1160// rewriteLoopExitValues.
1161struct RewritePhi {
1162 PHINode *PN; // For which PHI node is this replacement?
1163 unsigned Ith; // For which incoming value?
1164 const SCEV *ExpansionSCEV; // The SCEV of the incoming value we are rewriting.
1165 Instruction *ExpansionPoint; // Where we'd like to expand that SCEV?
1166 bool HighCost; // Is this expansion a high-cost?
1167
1168 Value *Expansion = nullptr;
1169 bool ValidRewrite = false;
1170
1171 RewritePhi(PHINode *P, unsigned I, const SCEV *Val, Instruction *ExpansionPt,
1172 bool H)
1173 : PN(P), Ith(I), ExpansionSCEV(Val), ExpansionPoint(ExpansionPt),
1174 HighCost(H) {}
1175};
1176
1177// Check whether it is possible to delete the loop after rewriting exit
1178// value. If it is possible, ignore ReplaceExitValue and do rewriting
1179// aggressively.
1180static bool canLoopBeDeleted(Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet) {
1181 BasicBlock *Preheader = L->getLoopPreheader();
1182 // If there is no preheader, the loop will not be deleted.
1183 if (!Preheader)
1184 return false;
1185
1186 // In LoopDeletion pass Loop can be deleted when ExitingBlocks.size() > 1.
1187 // We obviate multiple ExitingBlocks case for simplicity.
1188 // TODO: If we see testcase with multiple ExitingBlocks can be deleted
1189 // after exit value rewriting, we can enhance the logic here.
1190 SmallVector<BasicBlock *, 4> ExitingBlocks;
1191 L->getExitingBlocks(ExitingBlocks);
1192 SmallVector<BasicBlock *, 8> ExitBlocks;
1193 L->getUniqueExitBlocks(ExitBlocks);
1194 if (ExitBlocks.size() != 1 || ExitingBlocks.size() != 1)
1195 return false;
1196
1197 BasicBlock *ExitBlock = ExitBlocks[0];
1198 BasicBlock::iterator BI = ExitBlock->begin();
1199 while (PHINode *P = dyn_cast<PHINode>(BI)) {
1200 Value *Incoming = P->getIncomingValueForBlock(ExitingBlocks[0]);
1201
1202 // If the Incoming value of P is found in RewritePhiSet, we know it
1203 // could be rewritten to use a loop invariant value in transformation
1204 // phase later. Skip it in the loop invariant check below.
1205 bool found = false;
1206 for (const RewritePhi &Phi : RewritePhiSet) {
1207 if (!Phi.ValidRewrite)
1208 continue;
1209 unsigned i = Phi.Ith;
1210 if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) {
1211 found = true;
1212 break;
1213 }
1214 }
1215
1216 Instruction *I;
1217 if (!found && (I = dyn_cast<Instruction>(Incoming)))
1218 if (!L->hasLoopInvariantOperands(I))
1219 return false;
1220
1221 ++BI;
1222 }
1223
1224 for (auto *BB : L->blocks())
1225 if (llvm::any_of(*BB, [](Instruction &I) {
1226 return I.mayHaveSideEffects();
1227 }))
1228 return false;
1229
1230 return true;
1231}
1232
1233int llvm::rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
1234 ScalarEvolution *SE,
1235 const TargetTransformInfo *TTI,
1236 SCEVExpander &Rewriter, DominatorTree *DT,
1237 ReplaceExitVal ReplaceExitValue,
1238 SmallVector<WeakTrackingVH, 16> &DeadInsts) {
1239 // Check a pre-condition.
1240 assert(L->isRecursivelyLCSSAForm(*DT, *LI) &&((void)0)
1241 "Indvars did not preserve LCSSA!")((void)0);
1242
1243 SmallVector<BasicBlock*, 8> ExitBlocks;
1244 L->getUniqueExitBlocks(ExitBlocks);
1245
1246 SmallVector<RewritePhi, 8> RewritePhiSet;
1247 // Find all values that are computed inside the loop, but used outside of it.
1248 // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan
1249 // the exit blocks of the loop to find them.
1250 for (BasicBlock *ExitBB : ExitBlocks) {
1251 // If there are no PHI nodes in this exit block, then no values defined
1252 // inside the loop are used on this path, skip it.
1253 PHINode *PN = dyn_cast<PHINode>(ExitBB->begin());
1254 if (!PN) continue;
1255
1256 unsigned NumPreds = PN->getNumIncomingValues();
1257
1258 // Iterate over all of the PHI nodes.
1259 BasicBlock::iterator BBI = ExitBB->begin();
1260 while ((PN = dyn_cast<PHINode>(BBI++))) {
1261 if (PN->use_empty())
1262 continue; // dead use, don't replace it
1263
1264 if (!SE->isSCEVable(PN->getType()))
1265 continue;
1266
1267 // It's necessary to tell ScalarEvolution about this explicitly so that
1268 // it can walk the def-use list and forget all SCEVs, as it may not be
1269 // watching the PHI itself. Once the new exit value is in place, there
1270 // may not be a def-use connection between the loop and every instruction
1271 // which got a SCEVAddRecExpr for that loop.
1272 SE->forgetValue(PN);
1273
1274 // Iterate over all of the values in all the PHI nodes.
1275 for (unsigned i = 0; i != NumPreds; ++i) {
1276 // If the value being merged in is not integer or is not defined
1277 // in the loop, skip it.
1278 Value *InVal = PN->getIncomingValue(i);
1279 if (!isa<Instruction>(InVal))
1280 continue;
1281
1282 // If this pred is for a subloop, not L itself, skip it.
1283 if (LI->getLoopFor(PN->getIncomingBlock(i)) != L)
1284 continue; // The Block is in a subloop, skip it.
1285
1286 // Check that InVal is defined in the loop.
1287 Instruction *Inst = cast<Instruction>(InVal);
1288 if (!L->contains(Inst))
1289 continue;
1290
1291 // Okay, this instruction has a user outside of the current loop
1292 // and varies predictably *inside* the loop. Evaluate the value it
1293 // contains when the loop exits, if possible. We prefer to start with
1294 // expressions which are true for all exits (so as to maximize
1295 // expression reuse by the SCEVExpander), but resort to per-exit
1296 // evaluation if that fails.
1297 const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop());
1298 if (isa<SCEVCouldNotCompute>(ExitValue) ||
1299 !SE->isLoopInvariant(ExitValue, L) ||
1300 !isSafeToExpand(ExitValue, *SE)) {
1301 // TODO: This should probably be sunk into SCEV in some way; maybe a
1302 // getSCEVForExit(SCEV*, L, ExitingBB)? It can be generalized for
1303 // most SCEV expressions and other recurrence types (e.g. shift
1304 // recurrences). Is there existing code we can reuse?
1305 const SCEV *ExitCount = SE->getExitCount(L, PN->getIncomingBlock(i));
1306 if (isa<SCEVCouldNotCompute>(ExitCount))
1307 continue;
1308 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Inst)))
1309 if (AddRec->getLoop() == L)
1310 ExitValue = AddRec->evaluateAtIteration(ExitCount, *SE);
1311 if (isa<SCEVCouldNotCompute>(ExitValue) ||
1312 !SE->isLoopInvariant(ExitValue, L) ||
1313 !isSafeToExpand(ExitValue, *SE))
1314 continue;
1315 }
1316
1317 // Computing the value outside of the loop brings no benefit if it is
1318 // definitely used inside the loop in a way which can not be optimized
1319 // away. Avoid doing so unless we know we have a value which computes
1320 // the ExitValue already. TODO: This should be merged into SCEV
1321 // expander to leverage its knowledge of existing expressions.
1322 if (ReplaceExitValue != AlwaysRepl && !isa<SCEVConstant>(ExitValue) &&
1323 !isa<SCEVUnknown>(ExitValue) && hasHardUserWithinLoop(L, Inst))
1324 continue;
1325
1326 // Check if expansions of this SCEV would count as being high cost.
1327 bool HighCost = Rewriter.isHighCostExpansion(
1328 ExitValue, L, SCEVCheapExpansionBudget, TTI, Inst);
1329
1330 // Note that we must not perform expansions until after
1331 // we query *all* the costs, because if we perform temporary expansion
1332 // inbetween, one that we might not intend to keep, said expansion
1333 // *may* affect cost calculation of the the next SCEV's we'll query,
1334 // and next SCEV may errneously get smaller cost.
1335
1336 // Collect all the candidate PHINodes to be rewritten.
1337 RewritePhiSet.emplace_back(PN, i, ExitValue, Inst, HighCost);
1338 }
1339 }
1340 }
1341
1342 // Now that we've done preliminary filtering and billed all the SCEV's,
1343 // we can perform the last sanity check - the expansion must be valid.
1344 for (RewritePhi &Phi : RewritePhiSet) {
1345 Phi.Expansion = Rewriter.expandCodeFor(Phi.ExpansionSCEV, Phi.PN->getType(),
1346 Phi.ExpansionPoint);
1347
1348 LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: AfterLoopVal = "do { } while (false)
1349 << *(Phi.Expansion) << '\n'do { } while (false)
1350 << " LoopVal = " << *(Phi.ExpansionPoint) << "\n")do { } while (false);
1351
1352 // FIXME: isValidRewrite() is a hack. it should be an assert, eventually.
1353 Phi.ValidRewrite = isValidRewrite(SE, Phi.ExpansionPoint, Phi.Expansion);
1354 if (!Phi.ValidRewrite) {
1355 DeadInsts.push_back(Phi.Expansion);
1356 continue;
1357 }
1358
1359#ifndef NDEBUG1
1360 // If we reuse an instruction from a loop which is neither L nor one of
1361 // its containing loops, we end up breaking LCSSA form for this loop by
1362 // creating a new use of its instruction.
1363 if (auto *ExitInsn = dyn_cast<Instruction>(Phi.Expansion))
1364 if (auto *EVL = LI->getLoopFor(ExitInsn->getParent()))
1365 if (EVL != L)
1366 assert(EVL->contains(L) && "LCSSA breach detected!")((void)0);
1367#endif
1368 }
1369
1370 // TODO: after isValidRewrite() is an assertion, evaluate whether
1371 // it is beneficial to change how we calculate high-cost:
1372 // if we have SCEV 'A' which we know we will expand, should we calculate
1373 // the cost of other SCEV's after expanding SCEV 'A',
1374 // thus potentially giving cost bonus to those other SCEV's?
1375
1376 bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet);
1377 int NumReplaced = 0;
1378
1379 // Transformation.
1380 for (const RewritePhi &Phi : RewritePhiSet) {
1381 if (!Phi.ValidRewrite)
1382 continue;
1383
1384 PHINode *PN = Phi.PN;
1385 Value *ExitVal = Phi.Expansion;
1386
1387 // Only do the rewrite when the ExitValue can be expanded cheaply.
1388 // If LoopCanBeDel is true, rewrite exit value aggressively.
1389 if (ReplaceExitValue == OnlyCheapRepl && !LoopCanBeDel && Phi.HighCost) {
1390 DeadInsts.push_back(ExitVal);
1391 continue;
1392 }
1393
1394 NumReplaced++;
1395 Instruction *Inst = cast<Instruction>(PN->getIncomingValue(Phi.Ith));
1396 PN->setIncomingValue(Phi.Ith, ExitVal);
1397
1398 // If this instruction is dead now, delete it. Don't do it now to avoid
1399 // invalidating iterators.
1400 if (isInstructionTriviallyDead(Inst, TLI))
1401 DeadInsts.push_back(Inst);
1402
1403 // Replace PN with ExitVal if that is legal and does not break LCSSA.
1404 if (PN->getNumIncomingValues() == 1 &&
1405 LI->replacementPreservesLCSSAForm(PN, ExitVal)) {
1406 PN->replaceAllUsesWith(ExitVal);
1407 PN->eraseFromParent();
1408 }
1409 }
1410
1411 // The insertion point instruction may have been deleted; clear it out
1412 // so that the rewriter doesn't trip over it later.
1413 Rewriter.clearInsertPoint();
1414 return NumReplaced;
1415}
1416
1417/// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for
1418/// \p OrigLoop.
1419void llvm::setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop,
1420 Loop *RemainderLoop, uint64_t UF) {
1421 assert(UF > 0 && "Zero unrolled factor is not supported")((void)0);
1422 assert(UnrolledLoop != RemainderLoop &&((void)0)
1423 "Unrolled and Remainder loops are expected to distinct")((void)0);
1424
1425 // Get number of iterations in the original scalar loop.
1426 unsigned OrigLoopInvocationWeight = 0;
1427 Optional<unsigned> OrigAverageTripCount =
1428 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
1429 if (!OrigAverageTripCount)
1430 return;
1431
1432 // Calculate number of iterations in unrolled loop.
1433 unsigned UnrolledAverageTripCount = *OrigAverageTripCount / UF;
1434 // Calculate number of iterations for remainder loop.
1435 unsigned RemainderAverageTripCount = *OrigAverageTripCount % UF;
1436
1437 setLoopEstimatedTripCount(UnrolledLoop, UnrolledAverageTripCount,
1438 OrigLoopInvocationWeight);
1439 setLoopEstimatedTripCount(RemainderLoop, RemainderAverageTripCount,
1440 OrigLoopInvocationWeight);
1441}
1442
1443/// Utility that implements appending of loops onto a worklist.
1444/// Loops are added in preorder (analogous for reverse postorder for trees),
1445/// and the worklist is processed LIFO.
1446template <typename RangeT>
1447void llvm::appendReversedLoopsToWorklist(
1448 RangeT &&Loops, SmallPriorityWorklist<Loop *, 4> &Worklist) {
1449 // We use an internal worklist to build up the preorder traversal without
1450 // recursion.
1451 SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
1452
1453 // We walk the initial sequence of loops in reverse because we generally want
1454 // to visit defs before uses and the worklist is LIFO.
1455 for (Loop *RootL : Loops) {
1456 assert(PreOrderLoops.empty() && "Must start with an empty preorder walk.")((void)0);
1457 assert(PreOrderWorklist.empty() &&((void)0)
1458 "Must start with an empty preorder walk worklist.")((void)0);
1459 PreOrderWorklist.push_back(RootL);
1460 do {
1461 Loop *L = PreOrderWorklist.pop_back_val();
1462 PreOrderWorklist.append(L->begin(), L->end());
1463 PreOrderLoops.push_back(L);
1464 } while (!PreOrderWorklist.empty());
1465
1466 Worklist.insert(std::move(PreOrderLoops));
1467 PreOrderLoops.clear();
1468 }
1469}
1470
1471template <typename RangeT>
1472void llvm::appendLoopsToWorklist(RangeT &&Loops,
1473 SmallPriorityWorklist<Loop *, 4> &Worklist) {
1474 appendReversedLoopsToWorklist(reverse(Loops), Worklist);
1475}
1476
1477template void llvm::appendLoopsToWorklist<ArrayRef<Loop *> &>(
1478 ArrayRef<Loop *> &Loops, SmallPriorityWorklist<Loop *, 4> &Worklist);
1479
1480template void
1481llvm::appendLoopsToWorklist<Loop &>(Loop &L,
1482 SmallPriorityWorklist<Loop *, 4> &Worklist);
1483
1484void llvm::appendLoopsToWorklist(LoopInfo &LI,
1485 SmallPriorityWorklist<Loop *, 4> &Worklist) {
1486 appendReversedLoopsToWorklist(LI, Worklist);
1487}
1488
1489Loop *llvm::cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
1490 LoopInfo *LI, LPPassManager *LPM) {
1491 Loop &New = *LI->AllocateLoop();
1492 if (PL)
1493 PL->addChildLoop(&New);
1494 else
1495 LI->addTopLevelLoop(&New);
1496
1497 if (LPM)
1498 LPM->addLoop(New);
1499
1500 // Add all of the blocks in L to the new loop.
1501 for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
1502 I != E; ++I)
1503 if (LI->getLoopFor(*I) == L)
1504 New.addBasicBlockToLoop(cast<BasicBlock>(VM[*I]), *LI);
1505
1506 // Add all of the subloops to the new loop.
1507 for (Loop *I : *L)
1508 cloneLoop(I, &New, VM, LI, LPM);
1509
1510 return &New;
1511}
1512
1513/// IR Values for the lower and upper bounds of a pointer evolution. We
1514/// need to use value-handles because SCEV expansion can invalidate previously
1515/// expanded values. Thus expansion of a pointer can invalidate the bounds for
1516/// a previous one.
1517struct PointerBounds {
1518 TrackingVH<Value> Start;
1519 TrackingVH<Value> End;
1520};
1521
1522/// Expand code for the lower and upper bound of the pointer group \p CG
1523/// in \p TheLoop. \return the values for the bounds.
1524static PointerBounds expandBounds(const RuntimeCheckingPtrGroup *CG,
1525 Loop *TheLoop, Instruction *Loc,
1526 SCEVExpander &Exp) {
1527 LLVMContext &Ctx = Loc->getContext();
1528 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, CG->AddressSpace);
1529
1530 Value *Start = nullptr, *End = nullptr;
1531 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for range:\n")do { } while (false);
1532 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc);
1533 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc);
1534 LLVM_DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n")do { } while (false);
1535 return {Start, End};
1536}
1537
1538/// Turns a collection of checks into a collection of expanded upper and
1539/// lower bounds for both pointers in the check.
1540static SmallVector<std::pair<PointerBounds, PointerBounds>, 4>
1541expandBounds(const SmallVectorImpl<RuntimePointerCheck> &PointerChecks, Loop *L,
1542 Instruction *Loc, SCEVExpander &Exp) {
1543 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds;
1544
1545 // Here we're relying on the SCEV Expander's cache to only emit code for the
1546 // same bounds once.
1547 transform(PointerChecks, std::back_inserter(ChecksWithBounds),
1548 [&](const RuntimePointerCheck &Check) {
1549 PointerBounds First = expandBounds(Check.first, L, Loc, Exp),
1550 Second = expandBounds(Check.second, L, Loc, Exp);
1551 return std::make_pair(First, Second);
1552 });
1553
1554 return ChecksWithBounds;
1555}
1556
1557std::pair<Instruction *, Instruction *> llvm::addRuntimeChecks(
1558 Instruction *Loc, Loop *TheLoop,
1559 const SmallVectorImpl<RuntimePointerCheck> &PointerChecks,
1560 SCEVExpander &Exp) {
1561 // TODO: Move noalias annotation code from LoopVersioning here and share with LV if possible.
1562 // TODO: Pass RtPtrChecking instead of PointerChecks and SE separately, if possible
1563 auto ExpandedChecks = expandBounds(PointerChecks, TheLoop, Loc, Exp);
1564
1565 LLVMContext &Ctx = Loc->getContext();
1566 Instruction *FirstInst = nullptr;
1567 IRBuilder<> ChkBuilder(Loc);
1568 // Our instructions might fold to a constant.
1569 Value *MemoryRuntimeCheck = nullptr;
1570
1571 // FIXME: this helper is currently a duplicate of the one in
1572 // LoopVectorize.cpp.
1573 auto GetFirstInst = [](Instruction *FirstInst, Value *V,
1574 Instruction *Loc) -> Instruction * {
1575 if (FirstInst)
1576 return FirstInst;
1577 if (Instruction *I = dyn_cast<Instruction>(V))
1578 return I->getParent() == Loc->getParent() ? I : nullptr;
1579 return nullptr;
1580 };
1581
1582 for (const auto &Check : ExpandedChecks) {
1583 const PointerBounds &A = Check.first, &B = Check.second;
1584 // Check if two pointers (A and B) conflict where conflict is computed as:
1585 // start(A) <= end(B) && start(B) <= end(A)
1586 unsigned AS0 = A.Start->getType()->getPointerAddressSpace();
1587 unsigned AS1 = B.Start->getType()->getPointerAddressSpace();
1588
1589 assert((AS0 == B.End->getType()->getPointerAddressSpace()) &&((void)0)
1590 (AS1 == A.End->getType()->getPointerAddressSpace()) &&((void)0)
1591 "Trying to bounds check pointers with different address spaces")((void)0);
1592
1593 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
1594 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
1595
1596 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc");
1597 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc");
1598 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc");
1599 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc");
1600
1601 // [A|B].Start points to the first accessed byte under base [A|B].
1602 // [A|B].End points to the last accessed byte, plus one.
1603 // There is no conflict when the intervals are disjoint:
1604 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End)
1605 //
1606 // bound0 = (B.Start < A.End)
1607 // bound1 = (A.Start < B.End)
1608 // IsConflict = bound0 & bound1
1609 Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0");
1610 FirstInst = GetFirstInst(FirstInst, Cmp0, Loc);
1611 Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1");
1612 FirstInst = GetFirstInst(FirstInst, Cmp1, Loc);
1613 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
1614 FirstInst = GetFirstInst(FirstInst, IsConflict, Loc);
1615 if (MemoryRuntimeCheck) {
1616 IsConflict =
1617 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx");
1618 FirstInst = GetFirstInst(FirstInst, IsConflict, Loc);
1619 }
1620 MemoryRuntimeCheck = IsConflict;
1621 }
1622
1623 if (!MemoryRuntimeCheck)
1624 return std::make_pair(nullptr, nullptr);
1625
1626 // We have to do this trickery because the IRBuilder might fold the check to a
1627 // constant expression in which case there is no Instruction anchored in a
1628 // the block.
1629 Instruction *Check =
1630 BinaryOperator::CreateAnd(MemoryRuntimeCheck, ConstantInt::getTrue(Ctx));
1631 ChkBuilder.Insert(Check, "memcheck.conflict");
1632 FirstInst = GetFirstInst(FirstInst, Check, Loc);
1633 return std::make_pair(FirstInst, Check);
1634}
1635
1636Optional<IVConditionInfo> llvm::hasPartialIVCondition(Loop &L,
1637 unsigned MSSAThreshold,
1638 MemorySSA &MSSA,
1639 AAResults &AA) {
1640 auto *TI = dyn_cast<BranchInst>(L.getHeader()->getTerminator());
1641 if (!TI || !TI->isConditional())
1642 return {};
1643
1644 auto *CondI = dyn_cast<CmpInst>(TI->getCondition());
1645 // The case with the condition outside the loop should already be handled
1646 // earlier.
1647 if (!CondI || !L.contains(CondI))
1648 return {};
1649
1650 SmallVector<Instruction *> InstToDuplicate;
1651 InstToDuplicate.push_back(CondI);
1652
1653 SmallVector<Value *, 4> WorkList;
1654 WorkList.append(CondI->op_begin(), CondI->op_end());
1655
1656 SmallVector<MemoryAccess *, 4> AccessesToCheck;
1657 SmallVector<MemoryLocation, 4> AccessedLocs;
1658 while (!WorkList.empty()) {
1659 Instruction *I = dyn_cast<Instruction>(WorkList.pop_back_val());
1660 if (!I || !L.contains(I))
1661 continue;
1662
1663 // TODO: support additional instructions.
1664 if (!isa<LoadInst>(I) && !isa<GetElementPtrInst>(I))
1665 return {};
1666
1667 // Do not duplicate volatile and atomic loads.
1668 if (auto *LI = dyn_cast<LoadInst>(I))
1669 if (LI->isVolatile() || LI->isAtomic())
1670 return {};
1671
1672 InstToDuplicate.push_back(I);
1673 if (MemoryAccess *MA = MSSA.getMemoryAccess(I)) {
1674 if (auto *MemUse = dyn_cast_or_null<MemoryUse>(MA)) {
1675 // Queue the defining access to check for alias checks.
1676 AccessesToCheck.push_back(MemUse->getDefiningAccess());
1677 AccessedLocs.push_back(MemoryLocation::get(I));
1678 } else {
1679 // MemoryDefs may clobber the location or may be atomic memory
1680 // operations. Bail out.
1681 return {};
1682 }
1683 }
1684 WorkList.append(I->op_begin(), I->op_end());
1685 }
1686
1687 if (InstToDuplicate.empty())
1688 return {};
1689
1690 SmallVector<BasicBlock *, 4> ExitingBlocks;
1691 L.getExitingBlocks(ExitingBlocks);
1692 auto HasNoClobbersOnPath =
1693 [&L, &AA, &AccessedLocs, &ExitingBlocks, &InstToDuplicate,
1694 MSSAThreshold](BasicBlock *Succ, BasicBlock *Header,
1695 SmallVector<MemoryAccess *, 4> AccessesToCheck)
1696 -> Optional<IVConditionInfo> {
1697 IVConditionInfo Info;
1698 // First, collect all blocks in the loop that are on a patch from Succ
1699 // to the header.
1700 SmallVector<BasicBlock *, 4> WorkList;
1701 WorkList.push_back(Succ);
1702 WorkList.push_back(Header);
1703 SmallPtrSet<BasicBlock *, 4> Seen;
1704 Seen.insert(Header);
1705 Info.PathIsNoop &=
1706 all_of(*Header, [](Instruction &I) { return !I.mayHaveSideEffects(); });
1707
1708 while (!WorkList.empty()) {
1709 BasicBlock *Current = WorkList.pop_back_val();
1710 if (!L.contains(Current))
1711 continue;
1712 const auto &SeenIns = Seen.insert(Current);
1713 if (!SeenIns.second)
1714 continue;
1715
1716 Info.PathIsNoop &= all_of(
1717 *Current, [](Instruction &I) { return !I.mayHaveSideEffects(); });
1718 WorkList.append(succ_begin(Current), succ_end(Current));
1719 }
1720
1721 // Require at least 2 blocks on a path through the loop. This skips
1722 // paths that directly exit the loop.
1723 if (Seen.size() < 2)
1724 return {};
1725
1726 // Next, check if there are any MemoryDefs that are on the path through
1727 // the loop (in the Seen set) and they may-alias any of the locations in
1728 // AccessedLocs. If that is the case, they may modify the condition and
1729 // partial unswitching is not possible.
1730 SmallPtrSet<MemoryAccess *, 4> SeenAccesses;
1731 while (!AccessesToCheck.empty()) {
1732 MemoryAccess *Current = AccessesToCheck.pop_back_val();
1733 auto SeenI = SeenAccesses.insert(Current);
1734 if (!SeenI.second || !Seen.contains(Current->getBlock()))
1735 continue;
1736
1737 // Bail out if exceeded the threshold.
1738 if (SeenAccesses.size() >= MSSAThreshold)
1739 return {};
1740
1741 // MemoryUse are read-only accesses.
1742 if (isa<MemoryUse>(Current))
1743 continue;
1744
1745 // For a MemoryDef, check if is aliases any of the location feeding
1746 // the original condition.
1747 if (auto *CurrentDef = dyn_cast<MemoryDef>(Current)) {
1748 if (any_of(AccessedLocs, [&AA, CurrentDef](MemoryLocation &Loc) {
1749 return isModSet(
1750 AA.getModRefInfo(CurrentDef->getMemoryInst(), Loc));
1751 }))
1752 return {};
1753 }
1754
1755 for (Use &U : Current->uses())
1756 AccessesToCheck.push_back(cast<MemoryAccess>(U.getUser()));
1757 }
1758
1759 // We could also allow loops with known trip counts without mustprogress,
1760 // but ScalarEvolution may not be available.
1761 Info.PathIsNoop &= isMustProgress(&L);
1762
1763 // If the path is considered a no-op so far, check if it reaches a
1764 // single exit block without any phis. This ensures no values from the
1765 // loop are used outside of the loop.
1766 if (Info.PathIsNoop) {
1767 for (auto *Exiting : ExitingBlocks) {
1768 if (!Seen.contains(Exiting))
1769 continue;
1770 for (auto *Succ : successors(Exiting)) {
1771 if (L.contains(Succ))
1772 continue;
1773
1774 Info.PathIsNoop &= llvm::empty(Succ->phis()) &&
1775 (!Info.ExitForPath || Info.ExitForPath == Succ);
1776 if (!Info.PathIsNoop)
1777 break;
1778 assert((!Info.ExitForPath || Info.ExitForPath == Succ) &&((void)0)
1779 "cannot have multiple exit blocks")((void)0);
1780 Info.ExitForPath = Succ;
1781 }
1782 }
1783 }
1784 if (!Info.ExitForPath)
1785 Info.PathIsNoop = false;
1786
1787 Info.InstToDuplicate = InstToDuplicate;
1788 return Info;
1789 };
1790
1791 // If we branch to the same successor, partial unswitching will not be
1792 // beneficial.
1793 if (TI->getSuccessor(0) == TI->getSuccessor(1))
1794 return {};
1795
1796 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(0), L.getHeader(),
1797 AccessesToCheck)) {
1798 Info->KnownValue = ConstantInt::getTrue(TI->getContext());
1799 return Info;
1800 }
1801 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(1), L.getHeader(),
1802 AccessesToCheck)) {
1803 Info->KnownValue = ConstantInt::getFalse(TI->getContext());
1804 return Info;
1805 }
1806
1807 return {};
1808}

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfoMetadata.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50#include <utility>
51
52namespace llvm {
53
54class APInt;
55class MDNode;
56class Use;
57
58/// This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64public:
65 virtual ~IRBuilderDefaultInserter();
66
67 virtual void InsertHelper(Instruction *I, const Twine &Name,
68 BasicBlock *BB,
69 BasicBlock::iterator InsertPt) const {
70 if (BB) BB->getInstList().insert(InsertPt, I);
71 I->setName(Name);
72 }
73};
74
75/// Provides an 'InsertHelper' that calls a user-provided callback after
76/// performing the default insertion.
77class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
78 std::function<void(Instruction *)> Callback;
79
80public:
81 virtual ~IRBuilderCallbackInserter();
82
83 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
84 : Callback(std::move(Callback)) {}
85
86 void InsertHelper(Instruction *I, const Twine &Name,
87 BasicBlock *BB,
88 BasicBlock::iterator InsertPt) const override {
89 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
90 Callback(I);
91 }
92};
93
94/// Common base class shared among various IRBuilders.
95class IRBuilderBase {
96 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
97 /// created instructions, like !dbg metadata.
98 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
99
100 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
101 /// null. If \p MD is null, remove the entry with \p Kind.
102 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
103 if (!MD) {
104 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
105 return KV.first == Kind;
106 });
107 return;
108 }
109
110 for (auto &KV : MetadataToCopy)
111 if (KV.first == Kind) {
112 KV.second = MD;
113 return;
114 }
115
116 MetadataToCopy.emplace_back(Kind, MD);
117 }
118
119protected:
120 BasicBlock *BB;
121 BasicBlock::iterator InsertPt;
122 LLVMContext &Context;
123 const IRBuilderFolder &Folder;
124 const IRBuilderDefaultInserter &Inserter;
125
126 MDNode *DefaultFPMathTag;
127 FastMathFlags FMF;
128
129 bool IsFPConstrained;
130 fp::ExceptionBehavior DefaultConstrainedExcept;
131 RoundingMode DefaultConstrainedRounding;
132
133 ArrayRef<OperandBundleDef> DefaultOperandBundles;
134
135public:
136 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
137 const IRBuilderDefaultInserter &Inserter,
138 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
139 : Context(context), Folder(Folder), Inserter(Inserter),
140 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
141 DefaultConstrainedExcept(fp::ebStrict),
142 DefaultConstrainedRounding(RoundingMode::Dynamic),
143 DefaultOperandBundles(OpBundles) {
144 ClearInsertionPoint();
145 }
146
147 /// Insert and return the specified instruction.
148 template<typename InstTy>
149 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
150 Inserter.InsertHelper(I, Name, BB, InsertPt);
151 AddMetadataToInst(I);
152 return I;
153 }
154
155 /// No-op overload to handle constants.
156 Constant *Insert(Constant *C, const Twine& = "") const {
157 return C;
158 }
159
160 Value *Insert(Value *V, const Twine &Name = "") const {
161 if (Instruction *I = dyn_cast<Instruction>(V))
162 return Insert(I, Name);
163 assert(isa<Constant>(V))((void)0);
164 return V;
165 }
166
167 //===--------------------------------------------------------------------===//
168 // Builder configuration methods
169 //===--------------------------------------------------------------------===//
170
171 /// Clear the insertion point: created instructions will not be
172 /// inserted into a block.
173 void ClearInsertionPoint() {
174 BB = nullptr;
175 InsertPt = BasicBlock::iterator();
176 }
177
178 BasicBlock *GetInsertBlock() const { return BB; }
179 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
180 LLVMContext &getContext() const { return Context; }
181
182 /// This specifies that created instructions should be appended to the
183 /// end of the specified block.
184 void SetInsertPoint(BasicBlock *TheBB) {
185 BB = TheBB;
186 InsertPt = BB->end();
187 }
188
189 /// This specifies that created instructions should be inserted before
190 /// the specified instruction.
191 void SetInsertPoint(Instruction *I) {
192 BB = I->getParent();
193 InsertPt = I->getIterator();
194 assert(InsertPt != BB->end() && "Can't read debug loc from end()")((void)0);
195 SetCurrentDebugLocation(I->getDebugLoc());
196 }
197
198 /// This specifies that created instructions should be inserted at the
199 /// specified point.
200 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
201 BB = TheBB;
202 InsertPt = IP;
203 if (IP != TheBB->end())
204 SetCurrentDebugLocation(IP->getDebugLoc());
205 }
206
207 /// Set location information used by debugging information.
208 void SetCurrentDebugLocation(DebugLoc L) {
209 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
210 }
211
212 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
213 /// added to all created instructions. Entries present in MedataDataToCopy but
214 /// not on \p Src will be dropped from MetadataToCopy.
215 void CollectMetadataToCopy(Instruction *Src,
216 ArrayRef<unsigned> MetadataKinds) {
217 for (unsigned K : MetadataKinds)
218 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
219 }
220
221 /// Get location information used by debugging information.
222 DebugLoc getCurrentDebugLocation() const {
223 for (auto &KV : MetadataToCopy)
224 if (KV.first == LLVMContext::MD_dbg)
225 return {cast<DILocation>(KV.second)};
226
227 return {};
228 }
229
230 /// If this builder has a current debug location, set it on the
231 /// specified instruction.
232 void SetInstDebugLocation(Instruction *I) const {
233 for (const auto &KV : MetadataToCopy)
234 if (KV.first == LLVMContext::MD_dbg) {
235 I->setDebugLoc(DebugLoc(KV.second));
236 return;
237 }
238 }
239
240 /// Add all entries in MetadataToCopy to \p I.
241 void AddMetadataToInst(Instruction *I) const {
242 for (auto &KV : MetadataToCopy)
243 I->setMetadata(KV.first, KV.second);
244 }
245
246 /// Get the return type of the current function that we're emitting
247 /// into.
248 Type *getCurrentFunctionReturnType() const;
249
250 /// InsertPoint - A saved insertion point.
251 class InsertPoint {
252 BasicBlock *Block = nullptr;
253 BasicBlock::iterator Point;
254
255 public:
256 /// Creates a new insertion point which doesn't point to anything.
257 InsertPoint() = default;
258
259 /// Creates a new insertion point at the given location.
260 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
261 : Block(InsertBlock), Point(InsertPoint) {}
262
263 /// Returns true if this insert point is set.
264 bool isSet() const { return (Block != nullptr); }
265
266 BasicBlock *getBlock() const { return Block; }
267 BasicBlock::iterator getPoint() const { return Point; }
268 };
269
270 /// Returns the current insert point.
271 InsertPoint saveIP() const {
272 return InsertPoint(GetInsertBlock(), GetInsertPoint());
273 }
274
275 /// Returns the current insert point, clearing it in the process.
276 InsertPoint saveAndClearIP() {
277 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
278 ClearInsertionPoint();
279 return IP;
280 }
281
282 /// Sets the current insert point to a previously-saved location.
283 void restoreIP(InsertPoint IP) {
284 if (IP.isSet())
285 SetInsertPoint(IP.getBlock(), IP.getPoint());
286 else
287 ClearInsertionPoint();
288 }
289
290 /// Get the floating point math metadata being used.
291 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
292
293 /// Get the flags to be applied to created floating point ops
294 FastMathFlags getFastMathFlags() const { return FMF; }
295
296 FastMathFlags &getFastMathFlags() { return FMF; }
297
298 /// Clear the fast-math flags.
299 void clearFastMathFlags() { FMF.clear(); }
300
301 /// Set the floating point math metadata to be used.
302 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
303
304 /// Set the fast-math flags to be used with generated fp-math operators
305 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
306
307 /// Enable/Disable use of constrained floating point math. When
308 /// enabled the CreateF<op>() calls instead create constrained
309 /// floating point intrinsic calls. Fast math flags are unaffected
310 /// by this setting.
311 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
312
313 /// Query for the use of constrained floating point math
314 bool getIsFPConstrained() { return IsFPConstrained; }
315
316 /// Set the exception handling to be used with constrained floating point
317 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
318#ifndef NDEBUG1
319 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
320 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((void)0);
321#endif
322 DefaultConstrainedExcept = NewExcept;
323 }
324
325 /// Set the rounding mode handling to be used with constrained floating point
326 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
327#ifndef NDEBUG1
328 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
329 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((void)0);
330#endif
331 DefaultConstrainedRounding = NewRounding;
332 }
333
334 /// Get the exception handling used with constrained floating point
335 fp::ExceptionBehavior getDefaultConstrainedExcept() {
336 return DefaultConstrainedExcept;
337 }
338
339 /// Get the rounding mode handling used with constrained floating point
340 RoundingMode getDefaultConstrainedRounding() {
341 return DefaultConstrainedRounding;
342 }
343
344 void setConstrainedFPFunctionAttr() {
345 assert(BB && "Must have a basic block to set any function attributes!")((void)0);
346
347 Function *F = BB->getParent();
348 if (!F->hasFnAttribute(Attribute::StrictFP)) {
349 F->addFnAttr(Attribute::StrictFP);
350 }
351 }
352
353 void setConstrainedFPCallAttr(CallBase *I) {
354 I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
355 }
356
357 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
358 DefaultOperandBundles = OpBundles;
359 }
360
361 //===--------------------------------------------------------------------===//
362 // RAII helpers.
363 //===--------------------------------------------------------------------===//
364
365 // RAII object that stores the current insertion point and restores it
366 // when the object is destroyed. This includes the debug location.
367 class InsertPointGuard {
368 IRBuilderBase &Builder;
369 AssertingVH<BasicBlock> Block;
370 BasicBlock::iterator Point;
371 DebugLoc DbgLoc;
372
373 public:
374 InsertPointGuard(IRBuilderBase &B)
375 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
376 DbgLoc(B.getCurrentDebugLocation()) {}
377
378 InsertPointGuard(const InsertPointGuard &) = delete;
379 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
380
381 ~InsertPointGuard() {
382 Builder.restoreIP(InsertPoint(Block, Point));
383 Builder.SetCurrentDebugLocation(DbgLoc);
384 }
385 };
386
387 // RAII object that stores the current fast math settings and restores
388 // them when the object is destroyed.
389 class FastMathFlagGuard {
390 IRBuilderBase &Builder;
391 FastMathFlags FMF;
392 MDNode *FPMathTag;
393 bool IsFPConstrained;
394 fp::ExceptionBehavior DefaultConstrainedExcept;
395 RoundingMode DefaultConstrainedRounding;
396
397 public:
398 FastMathFlagGuard(IRBuilderBase &B)
399 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
400 IsFPConstrained(B.IsFPConstrained),
401 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
402 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
403
404 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
405 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
406
407 ~FastMathFlagGuard() {
408 Builder.FMF = FMF;
409 Builder.DefaultFPMathTag = FPMathTag;
410 Builder.IsFPConstrained = IsFPConstrained;
411 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
412 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
413 }
414 };
415
416 // RAII object that stores the current default operand bundles and restores
417 // them when the object is destroyed.
418 class OperandBundlesGuard {
419 IRBuilderBase &Builder;
420 ArrayRef<OperandBundleDef> DefaultOperandBundles;
421
422 public:
423 OperandBundlesGuard(IRBuilderBase &B)
424 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
425
426 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
427 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
428
429 ~OperandBundlesGuard() {
430 Builder.DefaultOperandBundles = DefaultOperandBundles;
431 }
432 };
433
434
435 //===--------------------------------------------------------------------===//
436 // Miscellaneous creation methods.
437 //===--------------------------------------------------------------------===//
438
439 /// Make a new global variable with initializer type i8*
440 ///
441 /// Make a new global variable with an initializer that has array of i8 type
442 /// filled in with the null terminated string value specified. The new global
443 /// variable will be marked mergable with any others of the same contents. If
444 /// Name is specified, it is the name of the global variable created.
445 ///
446 /// If no module is given via \p M, it is take from the insertion point basic
447 /// block.
448 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
449 unsigned AddressSpace = 0,
450 Module *M = nullptr);
451
452 /// Get a constant value representing either true or false.
453 ConstantInt *getInt1(bool V) {
454 return ConstantInt::get(getInt1Ty(), V);
455 }
456
457 /// Get the constant value for i1 true.
458 ConstantInt *getTrue() {
459 return ConstantInt::getTrue(Context);
460 }
461
462 /// Get the constant value for i1 false.
463 ConstantInt *getFalse() {
464 return ConstantInt::getFalse(Context);
465 }
466
467 /// Get a constant 8-bit value.
468 ConstantInt *getInt8(uint8_t C) {
469 return ConstantInt::get(getInt8Ty(), C);
470 }
471
472 /// Get a constant 16-bit value.
473 ConstantInt *getInt16(uint16_t C) {
474 return ConstantInt::get(getInt16Ty(), C);
475 }
476
477 /// Get a constant 32-bit value.
478 ConstantInt *getInt32(uint32_t C) {
479 return ConstantInt::get(getInt32Ty(), C);
480 }
481
482 /// Get a constant 64-bit value.
483 ConstantInt *getInt64(uint64_t C) {
484 return ConstantInt::get(getInt64Ty(), C);
485 }
486
487 /// Get a constant N-bit value, zero extended or truncated from
488 /// a 64-bit value.
489 ConstantInt *getIntN(unsigned N, uint64_t C) {
490 return ConstantInt::get(getIntNTy(N), C);
491 }
492
493 /// Get a constant integer value.
494 ConstantInt *getInt(const APInt &AI) {
495 return ConstantInt::get(Context, AI);
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Type creation methods
500 //===--------------------------------------------------------------------===//
501
502 /// Fetch the type representing a single bit
503 IntegerType *getInt1Ty() {
504 return Type::getInt1Ty(Context);
505 }
506
507 /// Fetch the type representing an 8-bit integer.
508 IntegerType *getInt8Ty() {
509 return Type::getInt8Ty(Context);
510 }
511
512 /// Fetch the type representing a 16-bit integer.
513 IntegerType *getInt16Ty() {
514 return Type::getInt16Ty(Context);
515 }
516
517 /// Fetch the type representing a 32-bit integer.
518 IntegerType *getInt32Ty() {
519 return Type::getInt32Ty(Context);
520 }
521
522 /// Fetch the type representing a 64-bit integer.
523 IntegerType *getInt64Ty() {
524 return Type::getInt64Ty(Context);
525 }
526
527 /// Fetch the type representing a 128-bit integer.
528 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
529
530 /// Fetch the type representing an N-bit integer.
531 IntegerType *getIntNTy(unsigned N) {
532 return Type::getIntNTy(Context, N);
533 }
534
535 /// Fetch the type representing a 16-bit floating point value.
536 Type *getHalfTy() {
537 return Type::getHalfTy(Context);
538 }
539
540 /// Fetch the type representing a 16-bit brain floating point value.
541 Type *getBFloatTy() {
542 return Type::getBFloatTy(Context);
543 }
544
545 /// Fetch the type representing a 32-bit floating point value.
546 Type *getFloatTy() {
547 return Type::getFloatTy(Context);
548 }
549
550 /// Fetch the type representing a 64-bit floating point value.
551 Type *getDoubleTy() {
552 return Type::getDoubleTy(Context);
553 }
554
555 /// Fetch the type representing void.
556 Type *getVoidTy() {
557 return Type::getVoidTy(Context);
558 }
559
560 /// Fetch the type representing a pointer to an 8-bit integer value.
561 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
562 return Type::getInt8PtrTy(Context, AddrSpace);
563 }
564
565 /// Fetch the type representing a pointer to an integer value.
566 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
567 return DL.getIntPtrType(Context, AddrSpace);
568 }
569
570 //===--------------------------------------------------------------------===//
571 // Intrinsic creation methods
572 //===--------------------------------------------------------------------===//
573
574 /// Create and insert a memset to the specified pointer and the
575 /// specified value.
576 ///
577 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
578 /// specified, it will be added to the instruction. Likewise with alias.scope
579 /// and noalias tags.
580 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
581 MaybeAlign Align, bool isVolatile = false,
582 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
583 MDNode *NoAliasTag = nullptr) {
584 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
585 TBAATag, ScopeTag, NoAliasTag);
586 }
587
588 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
589 bool isVolatile = false, MDNode *TBAATag = nullptr,
590 MDNode *ScopeTag = nullptr,
591 MDNode *NoAliasTag = nullptr);
592
593 /// Create and insert an element unordered-atomic memset of the region of
594 /// memory starting at the given pointer to the given value.
595 ///
596 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
597 /// specified, it will be added to the instruction. Likewise with alias.scope
598 /// and noalias tags.
599 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
600 uint64_t Size, Align Alignment,
601 uint32_t ElementSize,
602 MDNode *TBAATag = nullptr,
603 MDNode *ScopeTag = nullptr,
604 MDNode *NoAliasTag = nullptr) {
605 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
606 Align(Alignment), ElementSize,
607 TBAATag, ScopeTag, NoAliasTag);
608 }
609
610 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
611 Value *Size, Align Alignment,
612 uint32_t ElementSize,
613 MDNode *TBAATag = nullptr,
614 MDNode *ScopeTag = nullptr,
615 MDNode *NoAliasTag = nullptr);
616
617 /// Create and insert a memcpy between the specified pointers.
618 ///
619 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
620 /// specified, it will be added to the instruction. Likewise with alias.scope
621 /// and noalias tags.
622 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
623 MaybeAlign SrcAlign, uint64_t Size,
624 bool isVolatile = false, MDNode *TBAATag = nullptr,
625 MDNode *TBAAStructTag = nullptr,
626 MDNode *ScopeTag = nullptr,
627 MDNode *NoAliasTag = nullptr) {
628 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
629 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
630 NoAliasTag);
631 }
632
633 CallInst *CreateMemTransferInst(
634 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
635 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
636 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
638
639 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
640 MaybeAlign SrcAlign, Value *Size,
641 bool isVolatile = false, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr,
643 MDNode *ScopeTag = nullptr,
644 MDNode *NoAliasTag = nullptr) {
645 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
646 SrcAlign, Size, isVolatile, TBAATag,
647 TBAAStructTag, ScopeTag, NoAliasTag);
648 }
649
650 CallInst *
651 CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
652 MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false,
653 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
654 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
655
656 /// Create and insert an element unordered-atomic memcpy between the
657 /// specified pointers.
658 ///
659 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
660 ///
661 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
662 /// specified, it will be added to the instruction. Likewise with alias.scope
663 /// and noalias tags.
664 CallInst *CreateElementUnorderedAtomicMemCpy(
665 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
666 uint32_t ElementSize, MDNode *TBAATag = nullptr,
667 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
668 MDNode *NoAliasTag = nullptr);
669
670 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
671 MaybeAlign SrcAlign, uint64_t Size,
672 bool isVolatile = false, MDNode *TBAATag = nullptr,
673 MDNode *ScopeTag = nullptr,
674 MDNode *NoAliasTag = nullptr) {
675 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
676 isVolatile, TBAATag, ScopeTag, NoAliasTag);
677 }
678
679 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
680 MaybeAlign SrcAlign, Value *Size,
681 bool isVolatile = false, MDNode *TBAATag = nullptr,
682 MDNode *ScopeTag = nullptr,
683 MDNode *NoAliasTag = nullptr);
684
685 /// \brief Create and insert an element unordered-atomic memmove between the
686 /// specified pointers.
687 ///
688 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
689 /// respectively.
690 ///
691 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
692 /// specified, it will be added to the instruction. Likewise with alias.scope
693 /// and noalias tags.
694 CallInst *CreateElementUnorderedAtomicMemMove(
695 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
696 uint32_t ElementSize, MDNode *TBAATag = nullptr,
697 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
698 MDNode *NoAliasTag = nullptr);
699
700 /// Create a vector fadd reduction intrinsic of the source vector.
701 /// The first parameter is a scalar accumulator value for ordered reductions.
702 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
703
704 /// Create a vector fmul reduction intrinsic of the source vector.
705 /// The first parameter is a scalar accumulator value for ordered reductions.
706 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
707
708 /// Create a vector int add reduction intrinsic of the source vector.
709 CallInst *CreateAddReduce(Value *Src);
710
711 /// Create a vector int mul reduction intrinsic of the source vector.
712 CallInst *CreateMulReduce(Value *Src);
713
714 /// Create a vector int AND reduction intrinsic of the source vector.
715 CallInst *CreateAndReduce(Value *Src);
716
717 /// Create a vector int OR reduction intrinsic of the source vector.
718 CallInst *CreateOrReduce(Value *Src);
719
720 /// Create a vector int XOR reduction intrinsic of the source vector.
721 CallInst *CreateXorReduce(Value *Src);
722
723 /// Create a vector integer max reduction intrinsic of the source
724 /// vector.
725 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
726
727 /// Create a vector integer min reduction intrinsic of the source
728 /// vector.
729 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
730
731 /// Create a vector float max reduction intrinsic of the source
732 /// vector.
733 CallInst *CreateFPMaxReduce(Value *Src);
734
735 /// Create a vector float min reduction intrinsic of the source
736 /// vector.
737 CallInst *CreateFPMinReduce(Value *Src);
738
739 /// Create a lifetime.start intrinsic.
740 ///
741 /// If the pointer isn't i8* it will be converted.
742 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
743
744 /// Create a lifetime.end intrinsic.
745 ///
746 /// If the pointer isn't i8* it will be converted.
747 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
748
749 /// Create a call to invariant.start intrinsic.
750 ///
751 /// If the pointer isn't i8* it will be converted.
752 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
753
754 /// Create a call to Masked Load intrinsic
755 CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask,
756 Value *PassThru = nullptr, const Twine &Name = "");
757
758 /// Create a call to Masked Store intrinsic
759 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
760 Value *Mask);
761
762 /// Create a call to Masked Gather intrinsic
763 CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment,
764 Value *Mask = nullptr, Value *PassThru = nullptr,
765 const Twine &Name = "");
766
767 /// Create a call to Masked Scatter intrinsic
768 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
769 Value *Mask = nullptr);
770
771 /// Create an assume intrinsic call that allows the optimizer to
772 /// assume that the provided condition will be true.
773 ///
774 /// The optional argument \p OpBundles specifies operand bundles that are
775 /// added to the call instruction.
776 CallInst *CreateAssumption(Value *Cond,
777 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
778
779 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
780 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
781 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
782 return CreateNoAliasScopeDeclaration(
783 MetadataAsValue::get(Context, ScopeTag));
784 }
785
786 /// Create a call to the experimental.gc.statepoint intrinsic to
787 /// start a new statepoint sequence.
788 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
789 Value *ActualCallee,
790 ArrayRef<Value *> CallArgs,
791 Optional<ArrayRef<Value *>> DeoptArgs,
792 ArrayRef<Value *> GCArgs,
793 const Twine &Name = "");
794
795 /// Create a call to the experimental.gc.statepoint intrinsic to
796 /// start a new statepoint sequence.
797 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
798 Value *ActualCallee, uint32_t Flags,
799 ArrayRef<Value *> CallArgs,
800 Optional<ArrayRef<Use>> TransitionArgs,
801 Optional<ArrayRef<Use>> DeoptArgs,
802 ArrayRef<Value *> GCArgs,
803 const Twine &Name = "");
804
805 /// Conveninence function for the common case when CallArgs are filled
806 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
807 /// .get()'ed to get the Value pointer.
808 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
809 Value *ActualCallee, ArrayRef<Use> CallArgs,
810 Optional<ArrayRef<Value *>> DeoptArgs,
811 ArrayRef<Value *> GCArgs,
812 const Twine &Name = "");
813
814 /// Create an invoke to the experimental.gc.statepoint intrinsic to
815 /// start a new statepoint sequence.
816 InvokeInst *
817 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
818 Value *ActualInvokee, BasicBlock *NormalDest,
819 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
820 Optional<ArrayRef<Value *>> DeoptArgs,
821 ArrayRef<Value *> GCArgs, const Twine &Name = "");
822
823 /// Create an invoke to the experimental.gc.statepoint intrinsic to
824 /// start a new statepoint sequence.
825 InvokeInst *CreateGCStatepointInvoke(
826 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
827 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
828 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
829 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
830 const Twine &Name = "");
831
832 // Convenience function for the common case when CallArgs are filled in using
833 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
834 // get the Value *.
835 InvokeInst *
836 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
837 Value *ActualInvokee, BasicBlock *NormalDest,
838 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
839 Optional<ArrayRef<Value *>> DeoptArgs,
840 ArrayRef<Value *> GCArgs, const Twine &Name = "");
841
842 /// Create a call to the experimental.gc.result intrinsic to extract
843 /// the result from a call wrapped in a statepoint.
844 CallInst *CreateGCResult(Instruction *Statepoint,
845 Type *ResultType,
846 const Twine &Name = "");
847
848 /// Create a call to the experimental.gc.relocate intrinsics to
849 /// project the relocated value of one pointer from the statepoint.
850 CallInst *CreateGCRelocate(Instruction *Statepoint,
851 int BaseOffset,
852 int DerivedOffset,
853 Type *ResultType,
854 const Twine &Name = "");
855
856 /// Create a call to the experimental.gc.pointer.base intrinsic to get the
857 /// base pointer for the specified derived pointer.
858 CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = "");
859
860 /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get
861 /// the offset of the specified derived pointer from its base.
862 CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = "");
863
864 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
865 /// will be the same type as that of \p Scaling.
866 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
867
868 /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
869 Value *CreateStepVector(Type *DstType, const Twine &Name = "");
870
871 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
872 /// type.
873 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
874 Instruction *FMFSource = nullptr,
875 const Twine &Name = "");
876
877 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
878 /// first type.
879 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
880 Instruction *FMFSource = nullptr,
881 const Twine &Name = "");
882
883 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
884 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
885 /// the intrinsic.
886 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
887 ArrayRef<Value *> Args,
888 Instruction *FMFSource = nullptr,
889 const Twine &Name = "");
890
891 /// Create call to the minnum intrinsic.
892 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
893 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
894 }
895
896 /// Create call to the maxnum intrinsic.
897 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
898 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
899 }
900
901 /// Create call to the minimum intrinsic.
902 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
903 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
904 }
905
906 /// Create call to the maximum intrinsic.
907 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
908 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
909 }
910
911 /// Create a call to the arithmetic_fence intrinsic.
912 CallInst *CreateArithmeticFence(Value *Val, Type *DstType,
913 const Twine &Name = "") {
914 return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr,
915 Name);
916 }
917
918 /// Create a call to the experimental.vector.extract intrinsic.
919 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
920 const Twine &Name = "") {
921 return CreateIntrinsic(Intrinsic::experimental_vector_extract,
922 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
923 Name);
924 }
925
926 /// Create a call to the experimental.vector.insert intrinsic.
927 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
928 Value *Idx, const Twine &Name = "") {
929 return CreateIntrinsic(Intrinsic::experimental_vector_insert,
930 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
931 nullptr, Name);
932 }
933
934private:
935 /// Create a call to a masked intrinsic with given Id.
936 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
937 ArrayRef<Type *> OverloadedTypes,
938 const Twine &Name = "");
939
940 Value *getCastedInt8PtrValue(Value *Ptr);
941
942 //===--------------------------------------------------------------------===//
943 // Instruction creation methods: Terminators
944 //===--------------------------------------------------------------------===//
945
946private:
947 /// Helper to add branch weight and unpredictable metadata onto an
948 /// instruction.
949 /// \returns The annotated instruction.
950 template <typename InstTy>
951 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
952 if (Weights)
953 I->setMetadata(LLVMContext::MD_prof, Weights);
954 if (Unpredictable)
955 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
956 return I;
957 }
958
959public:
960 /// Create a 'ret void' instruction.
961 ReturnInst *CreateRetVoid() {
962 return Insert(ReturnInst::Create(Context));
963 }
964
965 /// Create a 'ret <val>' instruction.
966 ReturnInst *CreateRet(Value *V) {
967 return Insert(ReturnInst::Create(Context, V));
968 }
969
970 /// Create a sequence of N insertvalue instructions,
971 /// with one Value from the retVals array each, that build a aggregate
972 /// return value one value at a time, and a ret instruction to return
973 /// the resulting aggregate value.
974 ///
975 /// This is a convenience function for code that uses aggregate return values
976 /// as a vehicle for having multiple return values.
977 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
978 Value *V = UndefValue::get(getCurrentFunctionReturnType());
979 for (unsigned i = 0; i != N; ++i)
980 V = CreateInsertValue(V, retVals[i], i, "mrv");
981 return Insert(ReturnInst::Create(Context, V));
982 }
983
984 /// Create an unconditional 'br label X' instruction.
985 BranchInst *CreateBr(BasicBlock *Dest) {
986 return Insert(BranchInst::Create(Dest));
987 }
988
989 /// Create a conditional 'br Cond, TrueDest, FalseDest'
990 /// instruction.
991 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
992 MDNode *BranchWeights = nullptr,
993 MDNode *Unpredictable = nullptr) {
994 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
995 BranchWeights, Unpredictable));
996 }
997
998 /// Create a conditional 'br Cond, TrueDest, FalseDest'
999 /// instruction. Copy branch meta data if available.
1000 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
1001 Instruction *MDSrc) {
1002 BranchInst *Br = BranchInst::Create(True, False, Cond);
1003 if (MDSrc) {
1004 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
1005 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
1006 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
1007 }
1008 return Insert(Br);
1009 }
1010
1011 /// Create a switch instruction with the specified value, default dest,
1012 /// and with a hint for the number of cases that will be added (for efficient
1013 /// allocation).
1014 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1015 MDNode *BranchWeights = nullptr,
1016 MDNode *Unpredictable = nullptr) {
1017 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1018 BranchWeights, Unpredictable));
1019 }
1020
1021 /// Create an indirect branch instruction with the specified address
1022 /// operand, with an optional hint for the number of destinations that will be
1023 /// added (for efficient allocation).
1024 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1025 return Insert(IndirectBrInst::Create(Addr, NumDests));
1026 }
1027
1028 /// Create an invoke instruction.
1029 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1030 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1031 ArrayRef<Value *> Args,
1032 ArrayRef<OperandBundleDef> OpBundles,
1033 const Twine &Name = "") {
1034 InvokeInst *II =
1035 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1036 if (IsFPConstrained)
1037 setConstrainedFPCallAttr(II);
1038 return Insert(II, Name);
1039 }
1040 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1041 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1042 ArrayRef<Value *> Args = None,
1043 const Twine &Name = "") {
1044 InvokeInst *II =
1045 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1046 if (IsFPConstrained)
1047 setConstrainedFPCallAttr(II);
1048 return Insert(II, Name);
1049 }
1050
1051 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1052 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1053 ArrayRef<OperandBundleDef> OpBundles,
1054 const Twine &Name = "") {
1055 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1056 NormalDest, UnwindDest, Args, OpBundles, Name);
1057 }
1058
1059 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1060 BasicBlock *UnwindDest,
1061 ArrayRef<Value *> Args = None,
1062 const Twine &Name = "") {
1063 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1064 NormalDest, UnwindDest, Args, Name);
1065 }
1066
1067 /// \brief Create a callbr instruction.
1068 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1069 BasicBlock *DefaultDest,
1070 ArrayRef<BasicBlock *> IndirectDests,
1071 ArrayRef<Value *> Args = None,
1072 const Twine &Name = "") {
1073 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1074 Args), Name);
1075 }
1076 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1077 BasicBlock *DefaultDest,
1078 ArrayRef<BasicBlock *> IndirectDests,
1079 ArrayRef<Value *> Args,
1080 ArrayRef<OperandBundleDef> OpBundles,
1081 const Twine &Name = "") {
1082 return Insert(
1083 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1084 OpBundles), Name);
1085 }
1086
1087 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1088 ArrayRef<BasicBlock *> IndirectDests,
1089 ArrayRef<Value *> Args = None,
1090 const Twine &Name = "") {
1091 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1092 DefaultDest, IndirectDests, Args, Name);
1093 }
1094 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1095 ArrayRef<BasicBlock *> IndirectDests,
1096 ArrayRef<Value *> Args,
1097 ArrayRef<OperandBundleDef> OpBundles,
1098 const Twine &Name = "") {
1099 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1100 DefaultDest, IndirectDests, Args, Name);
1101 }
1102
1103 ResumeInst *CreateResume(Value *Exn) {
1104 return Insert(ResumeInst::Create(Exn));
1105 }
1106
1107 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1108 BasicBlock *UnwindBB = nullptr) {
1109 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1110 }
1111
1112 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1113 unsigned NumHandlers,
1114 const Twine &Name = "") {
1115 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1116 Name);
1117 }
1118
1119 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1120 const Twine &Name = "") {
1121 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1122 }
1123
1124 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1125 ArrayRef<Value *> Args = None,
1126 const Twine &Name = "") {
1127 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1128 }
1129
1130 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1131 return Insert(CatchReturnInst::Create(CatchPad, BB));
1132 }
1133
1134 UnreachableInst *CreateUnreachable() {
1135 return Insert(new UnreachableInst(Context));
1136 }
1137
1138 //===--------------------------------------------------------------------===//
1139 // Instruction creation methods: Binary Operators
1140 //===--------------------------------------------------------------------===//
1141private:
1142 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1143 Value *LHS, Value *RHS,
1144 const Twine &Name,
1145 bool HasNUW, bool HasNSW) {
1146 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1147 if (HasNUW) BO->setHasNoUnsignedWrap();
1148 if (HasNSW) BO->setHasNoSignedWrap();
1149 return BO;
1150 }
1151
1152 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1153 FastMathFlags FMF) const {
1154 if (!FPMD)
1155 FPMD = DefaultFPMathTag;
1156 if (FPMD)
1157 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1158 I->setFastMathFlags(FMF);
1159 return I;
1160 }
1161
1162 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1163 Value *R, const Twine &Name) const {
1164 auto *LC = dyn_cast<Constant>(L);
1165 auto *RC = dyn_cast<Constant>(R);
1166 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1167 }
1168
1169 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1170 RoundingMode UseRounding = DefaultConstrainedRounding;
1171
1172 if (Rounding.hasValue())
1173 UseRounding = Rounding.getValue();
1174
1175 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1176 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((void)0);
1177 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1178
1179 return MetadataAsValue::get(Context, RoundingMDS);
1180 }
1181
1182 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1183 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1184
1185 if (Except.hasValue())
1186 UseExcept = Except.getValue();
1187
1188 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1189 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((void)0);
1190 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1191
1192 return MetadataAsValue::get(Context, ExceptMDS);
1193 }
1194
1195 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1196 assert(CmpInst::isFPPredicate(Predicate) &&((void)0)
1197 Predicate != CmpInst::FCMP_FALSE &&((void)0)
1198 Predicate != CmpInst::FCMP_TRUE &&((void)0)
1199 "Invalid constrained FP comparison predicate!")((void)0);
1200
1201 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1202 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1203
1204 return MetadataAsValue::get(Context, PredicateMDS);
1205 }
1206
1207public:
1208 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1209 bool HasNUW = false, bool HasNSW = false) {
1210 if (auto *LC = dyn_cast<Constant>(LHS))
1211 if (auto *RC = dyn_cast<Constant>(RHS))
1212 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1213 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1214 HasNUW, HasNSW);
1215 }
1216
1217 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1218 return CreateAdd(LHS, RHS, Name, false, true);
1219 }
1220
1221 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1222 return CreateAdd(LHS, RHS, Name, true, false);
1223 }
1224
1225 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1226 bool HasNUW = false, bool HasNSW = false) {
1227 if (auto *LC = dyn_cast<Constant>(LHS))
1228 if (auto *RC = dyn_cast<Constant>(RHS))
1229 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1230 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1231 HasNUW, HasNSW);
1232 }
1233
1234 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1235 return CreateSub(LHS, RHS, Name, false, true);
1236 }
1237
1238 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1239 return CreateSub(LHS, RHS, Name, true, false);
1240 }
1241
1242 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1243 bool HasNUW = false, bool HasNSW = false) {
1244 if (auto *LC = dyn_cast<Constant>(LHS))
1245 if (auto *RC = dyn_cast<Constant>(RHS))
1246 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1247 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1248 HasNUW, HasNSW);
1249 }
1250
1251 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1252 return CreateMul(LHS, RHS, Name, false, true);
1253 }
1254
1255 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1256 return CreateMul(LHS, RHS, Name, true, false);
1257 }
1258
1259 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1260 bool isExact = false) {
1261 if (auto *LC = dyn_cast<Constant>(LHS))
1262 if (auto *RC = dyn_cast<Constant>(RHS))
1263 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1264 if (!isExact)
1265 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1266 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1267 }
1268
1269 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1270 return CreateUDiv(LHS, RHS, Name, true);
1271 }
1272
1273 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1274 bool isExact = false) {
1275 if (auto *LC = dyn_cast<Constant>(LHS))
1276 if (auto *RC = dyn_cast<Constant>(RHS))
1277 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1278 if (!isExact)
1279 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1280 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1281 }
1282
1283 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1284 return CreateSDiv(LHS, RHS, Name, true);
1285 }
1286
1287 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1288 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1289 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1290 }
1291
1292 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1293 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1294 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1295 }
1296
1297 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1298 bool HasNUW = false, bool HasNSW = false) {
1299 if (auto *LC = dyn_cast<Constant>(LHS))
1300 if (auto *RC = dyn_cast<Constant>(RHS))
1301 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1302 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1303 HasNUW, HasNSW);
1304 }
1305
1306 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1307 bool HasNUW = false, bool HasNSW = false) {
1308 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1309 HasNUW, HasNSW);
1310 }
1311
1312 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1313 bool HasNUW = false, bool HasNSW = false) {
1314 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1315 HasNUW, HasNSW);
1316 }
1317
1318 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1319 bool isExact = false) {
1320 if (auto *LC = dyn_cast<Constant>(LHS))
1321 if (auto *RC = dyn_cast<Constant>(RHS))
1322 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1323 if (!isExact)
1324 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1325 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1326 }
1327
1328 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1329 bool isExact = false) {
1330 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1331 }
1332
1333 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1334 bool isExact = false) {
1335 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1336 }
1337
1338 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1339 bool isExact = false) {
1340 if (auto *LC = dyn_cast<Constant>(LHS))
1341 if (auto *RC = dyn_cast<Constant>(RHS))
1342 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1343 if (!isExact)
1344 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1345 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1346 }
1347
1348 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1349 bool isExact = false) {
1350 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1351 }
1352
1353 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1354 bool isExact = false) {
1355 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1356 }
1357
1358 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1359 if (auto *RC = dyn_cast<Constant>(RHS)) {
1360 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1361 return LHS; // LHS & -1 -> LHS
1362 if (auto *LC = dyn_cast<Constant>(LHS))
1363 return Insert(Folder.CreateAnd(LC, RC), Name);
1364 }
1365 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1366 }
1367
1368 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1369 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1370 }
1371
1372 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1373 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1374 }
1375
1376 Value *CreateAnd(ArrayRef<Value*> Ops) {
1377 assert(!Ops.empty())((void)0);
1378 Value *Accum = Ops[0];
1379 for (unsigned i = 1; i < Ops.size(); i++)
1380 Accum = CreateAnd(Accum, Ops[i]);
1381 return Accum;
1382 }
1383
1384 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1385 if (auto *RC = dyn_cast<Constant>(RHS)) {
1386 if (RC->isNullValue())
1387 return LHS; // LHS | 0 -> LHS
1388 if (auto *LC = dyn_cast<Constant>(LHS))
1389 return Insert(Folder.CreateOr(LC, RC), Name);
1390 }
1391 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1392 }
1393
1394 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1395 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1396 }
1397
1398 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1399 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1400 }
1401
1402 Value *CreateOr(ArrayRef<Value*> Ops) {
1403 assert(!Ops.empty())((void)0);
1404 Value *Accum = Ops[0];
1405 for (unsigned i = 1; i < Ops.size(); i++)
1406 Accum = CreateOr(Accum, Ops[i]);
1407 return Accum;
1408 }
1409
1410 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1411 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1412 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1413 }
1414
1415 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1416 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1417 }
1418
1419 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1420 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1421 }
1422
1423 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1424 MDNode *FPMD = nullptr) {
1425 if (IsFPConstrained)
1426 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1427 L, R, nullptr, Name, FPMD);
1428
1429 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1430 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1431 return Insert(I, Name);
1432 }
1433
1434 /// Copy fast-math-flags from an instruction rather than using the builder's
1435 /// default FMF.
1436 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1437 const Twine &Name = "") {
1438 if (IsFPConstrained)
1439 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1440 L, R, FMFSource, Name);
1441
1442 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1443 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1444 FMFSource->getFastMathFlags());
1445 return Insert(I, Name);
1446 }
1447
1448 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1449 MDNode *FPMD = nullptr) {
1450 if (IsFPConstrained)
1451 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1452 L, R, nullptr, Name, FPMD);
1453
1454 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1455 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1456 return Insert(I, Name);
1457 }
1458
1459 /// Copy fast-math-flags from an instruction rather than using the builder's
1460 /// default FMF.
1461 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1462 const Twine &Name = "") {
1463 if (IsFPConstrained)
1464 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1465 L, R, FMFSource, Name);
1466
1467 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1468 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1469 FMFSource->getFastMathFlags());
1470 return Insert(I, Name);
1471 }
1472
1473 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1474 MDNode *FPMD = nullptr) {
1475 if (IsFPConstrained)
1476 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1477 L, R, nullptr, Name, FPMD);
1478
1479 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1480 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1481 return Insert(I, Name);
1482 }
1483
1484 /// Copy fast-math-flags from an instruction rather than using the builder's
1485 /// default FMF.
1486 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1487 const Twine &Name = "") {
1488 if (IsFPConstrained)
1489 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1490 L, R, FMFSource, Name);
1491
1492 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1493 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1494 FMFSource->getFastMathFlags());
1495 return Insert(I, Name);
1496 }
1497
1498 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1499 MDNode *FPMD = nullptr) {
1500 if (IsFPConstrained)
1501 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1502 L, R, nullptr, Name, FPMD);
1503
1504 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1505 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1506 return Insert(I, Name);
1507 }
1508
1509 /// Copy fast-math-flags from an instruction rather than using the builder's
1510 /// default FMF.
1511 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1512 const Twine &Name = "") {
1513 if (IsFPConstrained)
1514 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1515 L, R, FMFSource, Name);
1516
1517 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1518 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1519 FMFSource->getFastMathFlags());
1520 return Insert(I, Name);
1521 }
1522
1523 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1524 MDNode *FPMD = nullptr) {
1525 if (IsFPConstrained)
1526 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1527 L, R, nullptr, Name, FPMD);
1528
1529 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1530 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1531 return Insert(I, Name);
1532 }
1533
1534 /// Copy fast-math-flags from an instruction rather than using the builder's
1535 /// default FMF.
1536 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1537 const Twine &Name = "") {
1538 if (IsFPConstrained)
1539 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1540 L, R, FMFSource, Name);
1541
1542 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1543 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1544 FMFSource->getFastMathFlags());
1545 return Insert(I, Name);
1546 }
1547
1548 Value *CreateBinOp(Instruction::BinaryOps Opc,
1549 Value *LHS, Value *RHS, const Twine &Name = "",
1550 MDNode *FPMathTag = nullptr) {
1551 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1552 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1553 if (isa<FPMathOperator>(BinOp))
1554 setFPAttrs(BinOp, FPMathTag, FMF);
1555 return Insert(BinOp, Name);
1556 }
1557
1558 Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1559 assert(Cond2->getType()->isIntOrIntVectorTy(1))((void)0);
1560 return CreateSelect(Cond1, Cond2,
1561 ConstantInt::getNullValue(Cond2->getType()), Name);
1562 }
1563
1564 Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1565 assert(Cond2->getType()->isIntOrIntVectorTy(1))((void)0);
1566 return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
1567 Cond2, Name);
1568 }
1569
1570 CallInst *CreateConstrainedFPBinOp(
1571 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1572 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1573 Optional<RoundingMode> Rounding = None,
1574 Optional<fp::ExceptionBehavior> Except = None);
1575
1576 Value *CreateNeg(Value *V, const Twine &Name = "",
1577 bool HasNUW = false, bool HasNSW = false) {
1578 if (auto *VC = dyn_cast<Constant>(V))
1579 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1580 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1581 if (HasNUW) BO->setHasNoUnsignedWrap();
1582 if (HasNSW) BO->setHasNoSignedWrap();
1583 return BO;
1584 }
1585
1586 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1587 return CreateNeg(V, Name, false, true);
1588 }
1589
1590 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1591 return CreateNeg(V, Name, true, false);
1592 }
1593
1594 Value *CreateFNeg(Value *V, const Twine &Name = "",
1595 MDNode *FPMathTag = nullptr) {
1596 if (auto *VC = dyn_cast<Constant>(V))
1597 return Insert(Folder.CreateFNeg(VC), Name);
1598 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1599 Name);
1600 }
1601
1602 /// Copy fast-math-flags from an instruction rather than using the builder's
1603 /// default FMF.
1604 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1605 const Twine &Name = "") {
1606 if (auto *VC = dyn_cast<Constant>(V))
1607 return Insert(Folder.CreateFNeg(VC), Name);
1608 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1609 FMFSource->getFastMathFlags()),
1610 Name);
1611 }
1612
1613 Value *CreateNot(Value *V, const Twine &Name = "") {
1614 if (auto *VC = dyn_cast<Constant>(V))
1615 return Insert(Folder.CreateNot(VC), Name);
1616 return Insert(BinaryOperator::CreateNot(V), Name);
1617 }
1618
1619 Value *CreateUnOp(Instruction::UnaryOps Opc,
1620 Value *V, const Twine &Name = "",
1621 MDNode *FPMathTag = nullptr) {
1622 if (auto *VC = dyn_cast<Constant>(V))
1623 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1624 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1625 if (isa<FPMathOperator>(UnOp))
1626 setFPAttrs(UnOp, FPMathTag, FMF);
1627 return Insert(UnOp, Name);
1628 }
1629
1630 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1631 /// Correct number of operands must be passed accordingly.
1632 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1633 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1634
1635 //===--------------------------------------------------------------------===//
1636 // Instruction creation methods: Memory Instructions
1637 //===--------------------------------------------------------------------===//
1638
1639 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1640 Value *ArraySize = nullptr, const Twine &Name = "") {
1641 const DataLayout &DL = BB->getModule()->getDataLayout();
1642 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1643 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1644 }
1645
1646 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1647 const Twine &Name = "") {
1648 const DataLayout &DL = BB->getModule()->getDataLayout();
1649 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1650 unsigned AddrSpace = DL.getAllocaAddrSpace();
1651 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1652 }
1653
1654 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1655 /// converting the string to 'bool' for the isVolatile parameter.
1656 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1657 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1658 }
1659
1660 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1661 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1662 }
1663
1664 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1665 const Twine &Name = "") {
1666 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1667 }
1668
1669 // Deprecated [opaque pointer types]
1670 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1671 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1672 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1673 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
{
1674 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1675 }
1676
1677 // Deprecated [opaque pointer types]
1678 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1679 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1680 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1681 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
{
1682 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1683 }
1684
1685 // Deprecated [opaque pointer types]
1686 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1687 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1688 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1689 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1690 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
{
1691 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1692 Name);
1693 }
1694
1695 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1696 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1697 }
1698
1699 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1700 const char *Name) {
1701 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1702 }
1703
1704 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1705 const Twine &Name = "") {
1706 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1707 }
1708
1709 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1710 bool isVolatile, const Twine &Name = "") {
1711 if (!Align) {
1712 const DataLayout &DL = BB->getModule()->getDataLayout();
1713 Align = DL.getABITypeAlign(Ty);
1714 }
1715 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1716 }
1717
1718 // Deprecated [opaque pointer types]
1719 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1720 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1721 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1722 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1723 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
{
1724 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1725 Align, Name);
1726 }
1727 // Deprecated [opaque pointer types]
1728 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1729 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1730 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1731 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1732 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
{
1733 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1734 Align, Name);
1735 }
1736 // Deprecated [opaque pointer types]
1737 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1738 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1739 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1740 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1741 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1742 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
{
1743 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1744 Align, isVolatile, Name);
1745 }
1746
1747 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1748 bool isVolatile = false) {
1749 if (!Align) {
1750 const DataLayout &DL = BB->getModule()->getDataLayout();
1751 Align = DL.getABITypeAlign(Val->getType());
1752 }
1753 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1754 }
1755 FenceInst *CreateFence(AtomicOrdering Ordering,
1756 SyncScope::ID SSID = SyncScope::System,
1757 const Twine &Name = "") {
1758 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1759 }
1760
1761 AtomicCmpXchgInst *
1762 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
1763 AtomicOrdering SuccessOrdering,
1764 AtomicOrdering FailureOrdering,
1765 SyncScope::ID SSID = SyncScope::System) {
1766 if (!Align) {
1767 const DataLayout &DL = BB->getModule()->getDataLayout();
1768 Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
1769 }
1770
1771 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
1772 FailureOrdering, SSID));
1773 }
1774
1775 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
1776 Value *Val, MaybeAlign Align,
1777 AtomicOrdering Ordering,
1778 SyncScope::ID SSID = SyncScope::System) {
1779 if (!Align) {
1780 const DataLayout &DL = BB->getModule()->getDataLayout();
1781 Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
1782 }
1783
1784 return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
1785 }
1786
1787 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1788 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1789 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1790 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
{
1791 return CreateGEP(Ptr->getType()->getScalarType()->getPointerElementType(),
1792 Ptr, IdxList, Name);
1793 }
1794
1795 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1796 const Twine &Name = "") {
1797 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1798 // Every index must be constant.
1799 size_t i, e;
1800 for (i = 0, e = IdxList.size(); i != e; ++i)
1801 if (!isa<Constant>(IdxList[i]))
1802 break;
1803 if (i == e)
1804 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1805 }
1806 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1807 }
1808
1809 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1810 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1811 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1812 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
{
1813 return CreateInBoundsGEP(
1814 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1815 Name);
1816 }
1817
1818 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1819 const Twine &Name = "") {
1820 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1821 // Every index must be constant.
1822 size_t i, e;
1823 for (i = 0, e = IdxList.size(); i != e; ++i)
1824 if (!isa<Constant>(IdxList[i]))
1825 break;
1826 if (i == e)
1827 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1828 Name);
1829 }
1830 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1831 }
1832
1833 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1834 if (auto *PC = dyn_cast<Constant>(Ptr))
1835 if (auto *IC = dyn_cast<Constant>(Idx))
1836 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1837 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1838 }
1839
1840 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1841 const Twine &Name = "") {
1842 if (auto *PC = dyn_cast<Constant>(Ptr))
1843 if (auto *IC = dyn_cast<Constant>(Idx))
1844 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1845 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1846 }
1847
1848 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1849 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1850 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1851 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
{
1852 return CreateConstGEP1_32(
1853 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1854 Name);
1855 }
1856
1857 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1858 const Twine &Name = "") {
1859 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1860
1861 if (auto *PC = dyn_cast<Constant>(Ptr))
1862 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1863
1864 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1865 }
1866
1867 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1868 const Twine &Name = "") {
1869 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1870
1871 if (auto *PC = dyn_cast<Constant>(Ptr))
1872 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1873
1874 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1875 }
1876
1877 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1878 const Twine &Name = "") {
1879 Value *Idxs[] = {
1880 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1881 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1882 };
1883
1884 if (auto *PC = dyn_cast<Constant>(Ptr))
1885 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1886
1887 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1888 }
1889
1890 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1891 unsigned Idx1, const Twine &Name = "") {
1892 Value *Idxs[] = {
1893 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1894 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1895 };
1896
1897 if (auto *PC = dyn_cast<Constant>(Ptr))
1898 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1899
1900 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1901 }
1902
1903 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1904 const Twine &Name = "") {
1905 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1906
1907 if (auto *PC = dyn_cast<Constant>(Ptr))
1908 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1909
1910 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1911 }
1912
1913 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1914 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1915 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1916 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
{
1917 return CreateConstGEP1_64(
1918 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1919 Name);
1920 }
1921
1922 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1923 const Twine &Name = "") {
1924 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1925
1926 if (auto *PC = dyn_cast<Constant>(Ptr))
1927 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1928
1929 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1930 }
1931
1932 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1933 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1934 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1935 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
{
1936 return CreateConstInBoundsGEP1_64(
1937 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1938 Name);
1939 }
1940
1941 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1942 const Twine &Name = "") {
1943 Value *Idxs[] = {
1944 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1945 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1946 };
1947
1948 if (auto *PC = dyn_cast<Constant>(Ptr))
1949 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1950
1951 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1952 }
1953
1954 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1955 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1956 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1957 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
{
1958 return CreateConstGEP2_64(
1959 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1960 Idx1, Name);
1961 }
1962
1963 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1964 uint64_t Idx1, const Twine &Name = "") {
1965 Value *Idxs[] = {
1966 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1967 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1968 };
1969
1970 if (auto *PC = dyn_cast<Constant>(Ptr))
1971 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1972
1973 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1974 }
1975
1976 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1977 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1978 uint64_t Idx1, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1979 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
{
1980 return CreateConstInBoundsGEP2_64(
1981 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1982 Idx1, Name);
1983 }
1984
1985 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1986 const Twine &Name = "") {
1987 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1988 }
1989
1990 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1991 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1992 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
{
1993 return CreateConstInBoundsGEP2_32(
1994 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, 0, Idx,
1995 Name);
1996 }
1997
1998 /// Same as CreateGlobalString, but return a pointer with "i8*" type
1999 /// instead of a pointer to array of i8.
2000 ///
2001 /// If no module is given via \p M, it is take from the insertion point basic
2002 /// block.
2003 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
2004 unsigned AddressSpace = 0,
2005 Module *M = nullptr) {
2006 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
2007 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
2008 Constant *Indices[] = {Zero, Zero};
2009 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
2010 Indices);
2011 }
2012
2013 //===--------------------------------------------------------------------===//
2014 // Instruction creation methods: Cast/Conversion Operators
2015 //===--------------------------------------------------------------------===//
2016
2017 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
2018 return CreateCast(Instruction::Trunc, V, DestTy, Name);
2019 }
2020
2021 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
2022 return CreateCast(Instruction::ZExt, V, DestTy, Name);
2023 }
2024
2025 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
2026 return CreateCast(Instruction::SExt, V, DestTy, Name);
2027 }
2028
2029 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
2030 /// the value untouched if the type of V is already DestTy.
2031 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
2032 const Twine &Name = "") {
2033 assert(V->getType()->isIntOrIntVectorTy() &&((void)0)
2034 DestTy->isIntOrIntVectorTy() &&((void)0)
2035 "Can only zero extend/truncate integers!")((void)0);
2036 Type *VTy = V->getType();
2037 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2038 return CreateZExt(V, DestTy, Name);
2039 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2040 return CreateTrunc(V, DestTy, Name);
2041 return V;
2042 }
2043
2044 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2045 /// the value untouched if the type of V is already DestTy.
2046 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2047 const Twine &Name = "") {
2048 assert(V->getType()->isIntOrIntVectorTy() &&((void)0)
2049 DestTy->isIntOrIntVectorTy() &&((void)0)
2050 "Can only sign extend/truncate integers!")((void)0);
2051 Type *VTy = V->getType();
2052 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2053 return CreateSExt(V, DestTy, Name);
2054 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2055 return CreateTrunc(V, DestTy, Name);
2056 return V;
2057 }
2058
2059 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2060 if (IsFPConstrained)
2061 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2062 V, DestTy, nullptr, Name);
2063 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2064 }
2065
2066 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2067 if (IsFPConstrained)
2068 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2069 V, DestTy, nullptr, Name);
2070 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2071 }
2072
2073 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2074 if (IsFPConstrained)
2075 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2076 V, DestTy, nullptr, Name);
2077 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2078 }
2079
2080 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2081 if (IsFPConstrained)
2082 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2083 V, DestTy, nullptr, Name);
2084 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2085 }
2086
2087 Value *CreateFPTrunc(Value *V, Type *DestTy,
2088 const Twine &Name = "") {
2089 if (IsFPConstrained)
2090 return CreateConstrainedFPCast(
2091 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2092 Name);
2093 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2094 }
2095
2096 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2097 if (IsFPConstrained)
2098 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2099 V, DestTy, nullptr, Name);
2100 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2101 }
2102
2103 Value *CreatePtrToInt(Value *V, Type *DestTy,
2104 const Twine &Name = "") {
2105 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2106 }
2107
2108 Value *CreateIntToPtr(Value *V, Type *DestTy,
2109 const Twine &Name = "") {
2110 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2111 }
2112
2113 Value *CreateBitCast(Value *V, Type *DestTy,
2114 const Twine &Name = "") {
2115 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2116 }
2117
2118 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2119 const Twine &Name = "") {
2120 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2121 }
2122
2123 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2124 const Twine &Name = "") {
2125 if (V->getType() == DestTy)
2126 return V;
2127 if (auto *VC = dyn_cast<Constant>(V))
2128 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2129 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2130 }
2131
2132 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2133 const Twine &Name = "") {
2134 if (V->getType() == DestTy)
2135 return V;
2136 if (auto *VC = dyn_cast<Constant>(V))
2137 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2138 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2139 }
2140
2141 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2142 const Twine &Name = "") {
2143 if (V->getType() == DestTy)
2144 return V;
2145 if (auto *VC = dyn_cast<Constant>(V))
2146 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2147 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2148 }
2149
2150 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2151 const Twine &Name = "") {
2152 if (V->getType() == DestTy)
2153 return V;
2154 if (auto *VC = dyn_cast<Constant>(V))
2155 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2156 return Insert(CastInst::Create(Op, V, DestTy), Name);
2157 }
2158
2159 Value *CreatePointerCast(Value *V, Type *DestTy,
2160 const Twine &Name = "") {
2161 if (V->getType() == DestTy)
2162 return V;
2163 if (auto *VC = dyn_cast<Constant>(V))
2164 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2165 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2166 }
2167
2168 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2169 const Twine &Name = "") {
2170 if (V->getType() == DestTy)
2171 return V;
2172
2173 if (auto *VC = dyn_cast<Constant>(V)) {
2174 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2175 Name);
2176 }
2177
2178 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2179 Name);
2180 }
2181
2182 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2183 const Twine &Name = "") {
2184 if (V->getType() == DestTy)
2185 return V;
2186 if (auto *VC = dyn_cast<Constant>(V))
2187 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2188 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2189 }
2190
2191 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2192 const Twine &Name = "") {
2193 if (V->getType() == DestTy)
2194 return V;
2195 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2196 return CreatePtrToInt(V, DestTy, Name);
2197 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2198 return CreateIntToPtr(V, DestTy, Name);
2199
2200 return CreateBitCast(V, DestTy, Name);
2201 }
2202
2203 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2204 if (V->getType() == DestTy)
2205 return V;
2206 if (auto *VC = dyn_cast<Constant>(V))
2207 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2208 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2209 }
2210
2211 CallInst *CreateConstrainedFPCast(
2212 Intrinsic::ID ID, Value *V, Type *DestTy,
2213 Instruction *FMFSource = nullptr, const Twine &Name = "",
2214 MDNode *FPMathTag = nullptr,
2215 Optional<RoundingMode> Rounding = None,
2216 Optional<fp::ExceptionBehavior> Except = None);
2217
2218 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2219 // compile time error, instead of converting the string to bool for the
2220 // isSigned parameter.
2221 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2222
2223 //===--------------------------------------------------------------------===//
2224 // Instruction creation methods: Compare Instructions
2225 //===--------------------------------------------------------------------===//
2226
2227 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2228 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2229 }
2230
2231 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2232 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2233 }
2234
2235 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2236 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2237 }
2238
2239 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2240 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2241 }
2242
2243 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2244 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2245 }
2246
2247 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2248 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2249 }
2250
2251 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2252 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2253 }
2254
2255 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2256 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2257 }
2258
2259 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2260 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2261 }
2262
2263 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2264 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2265 }
2266
2267 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2268 MDNode *FPMathTag = nullptr) {
2269 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2270 }
2271
2272 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2273 MDNode *FPMathTag = nullptr) {
2274 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2275 }
2276
2277 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2278 MDNode *FPMathTag = nullptr) {
2279 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2280 }
2281
2282 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2283 MDNode *FPMathTag = nullptr) {
2284 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2285 }
2286
2287 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2288 MDNode *FPMathTag = nullptr) {
2289 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2290 }
2291
2292 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2293 MDNode *FPMathTag = nullptr) {
2294 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2295 }
2296
2297 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2298 MDNode *FPMathTag = nullptr) {
2299 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2300 }
2301
2302 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2303 MDNode *FPMathTag = nullptr) {
2304 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2305 }
2306
2307 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2308 MDNode *FPMathTag = nullptr) {
2309 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2310 }
2311
2312 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2313 MDNode *FPMathTag = nullptr) {
2314 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2315 }
2316
2317 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2318 MDNode *FPMathTag = nullptr) {
2319 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2320 }
2321
2322 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2323 MDNode *FPMathTag = nullptr) {
2324 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2325 }
2326
2327 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2328 MDNode *FPMathTag = nullptr) {
2329 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2330 }
2331
2332 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2333 MDNode *FPMathTag = nullptr) {
2334 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2335 }
2336
2337 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2338 const Twine &Name = "") {
2339 if (auto *LC = dyn_cast<Constant>(LHS))
2340 if (auto *RC = dyn_cast<Constant>(RHS))
2341 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2342 return Insert(new ICmpInst(P, LHS, RHS), Name);
2343 }
2344
2345 // Create a quiet floating-point comparison (i.e. one that raises an FP
2346 // exception only in the case where an input is a signaling NaN).
2347 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2348 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2349 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2350 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2351 }
2352
2353 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2354 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2355 return CmpInst::isFPPredicate(Pred)
2356 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2357 : CreateICmp(Pred, LHS, RHS, Name);
2358 }
2359
2360 // Create a signaling floating-point comparison (i.e. one that raises an FP
2361 // exception whenever an input is any NaN, signaling or quiet).
2362 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2363 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2364 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2365 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2366 }
2367
2368private:
2369 // Helper routine to create either a signaling or a quiet FP comparison.
2370 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2371 const Twine &Name, MDNode *FPMathTag,
2372 bool IsSignaling);
2373
2374public:
2375 CallInst *CreateConstrainedFPCmp(
2376 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2377 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2378
2379 //===--------------------------------------------------------------------===//
2380 // Instruction creation methods: Other Instructions
2381 //===--------------------------------------------------------------------===//
2382
2383 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2384 const Twine &Name = "") {
2385 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2386 if (isa<FPMathOperator>(Phi))
2387 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2388 return Insert(Phi, Name);
2389 }
2390
2391 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2392 ArrayRef<Value *> Args = None, const Twine &Name = "",
2393 MDNode *FPMathTag = nullptr) {
2394 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2395 if (IsFPConstrained)
2396 setConstrainedFPCallAttr(CI);
2397 if (isa<FPMathOperator>(CI))
2398 setFPAttrs(CI, FPMathTag, FMF);
2399 return Insert(CI, Name);
2400 }
2401
2402 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2403 ArrayRef<OperandBundleDef> OpBundles,
2404 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2405 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2406 if (IsFPConstrained)
2407 setConstrainedFPCallAttr(CI);
2408 if (isa<FPMathOperator>(CI))
2409 setFPAttrs(CI, FPMathTag, FMF);
2410 return Insert(CI, Name);
2411 }
2412
2413 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2414 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2415 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2416 FPMathTag);
2417 }
2418
2419 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2420 ArrayRef<OperandBundleDef> OpBundles,
2421 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2422 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2423 OpBundles, Name, FPMathTag);
2424 }
2425
2426 CallInst *CreateConstrainedFPCall(
2427 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2428 Optional<RoundingMode> Rounding = None,
2429 Optional<fp::ExceptionBehavior> Except = None);
2430
2431 Value *CreateSelect(Value *C, Value *True, Value *False,
2432 const Twine &Name = "", Instruction *MDFrom = nullptr);
2433
2434 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2435 return Insert(new VAArgInst(List, Ty), Name);
2436 }
2437
2438 Value *CreateExtractElement(Value *Vec, Value *Idx,
2439 const Twine &Name = "") {
2440 if (auto *VC = dyn_cast<Constant>(Vec))
2441 if (auto *IC = dyn_cast<Constant>(Idx))
2442 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2443 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2444 }
2445
2446 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2447 const Twine &Name = "") {
2448 return CreateExtractElement(Vec, getInt64(Idx), Name);
2449 }
2450
2451 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2452 const Twine &Name = "") {
2453 if (auto *VC = dyn_cast<Constant>(Vec))
2454 if (auto *NC = dyn_cast<Constant>(NewElt))
2455 if (auto *IC = dyn_cast<Constant>(Idx))
2456 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2457 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2458 }
2459
2460 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2461 const Twine &Name = "") {
2462 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2463 }
2464
2465 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2466 const Twine &Name = "") {
2467 SmallVector<int, 16> IntMask;
2468 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2469 return CreateShuffleVector(V1, V2, IntMask, Name);
2470 }
2471
2472 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2473 ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2474 const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2475 "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
{
2476 SmallVector<int, 16> IntMask;
2477 IntMask.assign(Mask.begin(), Mask.end());
2478 return CreateShuffleVector(V1, V2, IntMask, Name);
2479 }
2480
2481 /// See class ShuffleVectorInst for a description of the mask representation.
2482 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2483 const Twine &Name = "") {
2484 if (auto *V1C = dyn_cast<Constant>(V1))
2485 if (auto *V2C = dyn_cast<Constant>(V2))
2486 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2487 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2488 }
2489
2490 /// Create a unary shuffle. The second vector operand of the IR instruction
2491 /// is poison.
2492 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2493 const Twine &Name = "") {
2494 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2495 }
2496
2497 Value *CreateExtractValue(Value *Agg,
2498 ArrayRef<unsigned> Idxs,
2499 const Twine &Name = "") {
2500 if (auto *AggC = dyn_cast<Constant>(Agg))
2501 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2502 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2503 }
2504
2505 Value *CreateInsertValue(Value *Agg, Value *Val,
2506 ArrayRef<unsigned> Idxs,
2507 const Twine &Name = "") {
2508 if (auto *AggC = dyn_cast<Constant>(Agg))
2509 if (auto *ValC = dyn_cast<Constant>(Val))
2510 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2511 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2512 }
2513
2514 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2515 const Twine &Name = "") {
2516 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2517 }
2518
2519 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2520 return Insert(new FreezeInst(V), Name);
2521 }
2522
2523 //===--------------------------------------------------------------------===//
2524 // Utility creation methods
2525 //===--------------------------------------------------------------------===//
2526
2527 /// Return an i1 value testing if \p Arg is null.
2528 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2529 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2530 Name);
2531 }
2532
2533 /// Return an i1 value testing if \p Arg is not null.
2534 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2535 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2536 Name);
2537 }
2538
2539 /// Return the i64 difference between two pointer values, dividing out
2540 /// the size of the pointed-to objects.
2541 ///
2542 /// This is intended to implement C-style pointer subtraction. As such, the
2543 /// pointers must be appropriately aligned for their element types and
2544 /// pointing into the same object.
2545 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2546
2547 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2548 /// different from pointer to i8, it's casted to pointer to i8 in the same
2549 /// address space before call and casted back to Ptr type after call.
2550 Value *CreateLaunderInvariantGroup(Value *Ptr);
2551
2552 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2553 /// different from pointer to i8, it's casted to pointer to i8 in the same
2554 /// address space before call and casted back to Ptr type after call.
2555 Value *CreateStripInvariantGroup(Value *Ptr);
2556
2557 /// Return a vector value that contains the vector V reversed
2558 Value *CreateVectorReverse(Value *V, const Twine &Name = "");
2559
2560 /// Return a vector splice intrinsic if using scalable vectors, otherwise
2561 /// return a shufflevector. If the immediate is positive, a vector is
2562 /// extracted from concat(V1, V2), starting at Imm. If the immediate
2563 /// is negative, we extract -Imm elements from V1 and the remaining
2564 /// elements from V2. Imm is a signed integer in the range
2565 /// -VL <= Imm < VL (where VL is the runtime vector length of the
2566 /// source/result vector)
2567 Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
2568 const Twine &Name = "");
2569
2570 /// Return a vector value that contains \arg V broadcasted to \p
2571 /// NumElts elements.
2572 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2573
2574 /// Return a vector value that contains \arg V broadcasted to \p
2575 /// EC elements.
2576 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2577
2578 /// Return a value that has been extracted from a larger integer type.
2579 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2580 IntegerType *ExtractedTy, uint64_t Offset,
2581 const Twine &Name);
2582
2583 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2584 unsigned Dimension, unsigned LastIndex,
2585 MDNode *DbgInfo);
2586
2587 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2588 MDNode *DbgInfo);
2589
2590 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2591 unsigned Index, unsigned FieldIndex,
2592 MDNode *DbgInfo);
2593
2594private:
2595 /// Helper function that creates an assume intrinsic call that
2596 /// represents an alignment assumption on the provided pointer \p PtrValue
2597 /// with offset \p OffsetValue and alignment value \p AlignValue.
2598 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2599 Value *PtrValue, Value *AlignValue,
2600 Value *OffsetValue);
2601
2602public:
2603 /// Create an assume intrinsic call that represents an alignment
2604 /// assumption on the provided pointer.
2605 ///
2606 /// An optional offset can be provided, and if it is provided, the offset
2607 /// must be subtracted from the provided pointer to get the pointer with the
2608 /// specified alignment.
2609 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2610 unsigned Alignment,
2611 Value *OffsetValue = nullptr);
2612
2613 /// Create an assume intrinsic call that represents an alignment
2614 /// assumption on the provided pointer.
2615 ///
2616 /// An optional offset can be provided, and if it is provided, the offset
2617 /// must be subtracted from the provided pointer to get the pointer with the
2618 /// specified alignment.
2619 ///
2620 /// This overload handles the condition where the Alignment is dependent
2621 /// on an existing value rather than a static value.
2622 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2623 Value *Alignment,
2624 Value *OffsetValue = nullptr);
2625};
2626
2627/// This provides a uniform API for creating instructions and inserting
2628/// them into a basic block: either at the end of a BasicBlock, or at a specific
2629/// iterator location in a block.
2630///
2631/// Note that the builder does not expose the full generality of LLVM
2632/// instructions. For access to extra instruction properties, use the mutators
2633/// (e.g. setVolatile) on the instructions after they have been
2634/// created. Convenience state exists to specify fast-math flags and fp-math
2635/// tags.
2636///
2637/// The first template argument specifies a class to use for creating constants.
2638/// This defaults to creating minimally folded constants. The second template
2639/// argument allows clients to specify custom insertion hooks that are called on
2640/// every newly created insertion.
2641template <typename FolderTy = ConstantFolder,
2642 typename InserterTy = IRBuilderDefaultInserter>
2643class IRBuilder : public IRBuilderBase {
2644private:
2645 FolderTy Folder;
2646 InserterTy Inserter;
2647
2648public:
2649 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2650 MDNode *FPMathTag = nullptr,
2651 ArrayRef<OperandBundleDef> OpBundles = None)
2652 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2653 Folder(Folder), Inserter(Inserter) {}
2654
2655 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2656 ArrayRef<OperandBundleDef> OpBundles = None)
2657 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2658
2659 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2660 MDNode *FPMathTag = nullptr,
2661 ArrayRef<OperandBundleDef> OpBundles = None)
2662 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2663 FPMathTag, OpBundles), Folder(Folder) {
2664 SetInsertPoint(TheBB);
2665 }
2666
2667 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2668 ArrayRef<OperandBundleDef> OpBundles = None)
2669 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2670 FPMathTag, OpBundles) {
2671 SetInsertPoint(TheBB);
2672 }
2673
2674 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2675 ArrayRef<OperandBundleDef> OpBundles = None)
2676 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
9
Called C++ object pointer is null
2677 FPMathTag, OpBundles) {
2678 SetInsertPoint(IP);
2679 }
2680
2681 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2682 MDNode *FPMathTag = nullptr,
2683 ArrayRef<OperandBundleDef> OpBundles = None)
2684 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2685 FPMathTag, OpBundles), Folder(Folder) {
2686 SetInsertPoint(TheBB, IP);
2687 }
2688
2689 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2690 MDNode *FPMathTag = nullptr,
2691 ArrayRef<OperandBundleDef> OpBundles = None)
2692 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2693 FPMathTag, OpBundles) {
2694 SetInsertPoint(TheBB, IP);
2695 }
2696
2697 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2698 /// or FastMathFlagGuard instead.
2699 IRBuilder(const IRBuilder &) = delete;
2700
2701 InserterTy &getInserter() { return Inserter; }
2702};
2703
2704// Create wrappers for C Binding types (see CBindingWrapping.h).
2705DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2706
2707} // end namespace llvm
2708
2709#endif // LLVM_IR_IRBUILDER_H