Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Scalar/LICM.cpp
Warning:line 1163, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LICM.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Scalar/LICM.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Scalar/LICM.cpp

1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
39#include "llvm/Transforms/Scalar/LICM.h"
40#include "llvm/ADT/SetOperations.h"
41#include "llvm/ADT/Statistic.h"
42#include "llvm/Analysis/AliasAnalysis.h"
43#include "llvm/Analysis/AliasSetTracker.h"
44#include "llvm/Analysis/BasicAliasAnalysis.h"
45#include "llvm/Analysis/BlockFrequencyInfo.h"
46#include "llvm/Analysis/CaptureTracking.h"
47#include "llvm/Analysis/ConstantFolding.h"
48#include "llvm/Analysis/GlobalsModRef.h"
49#include "llvm/Analysis/GuardUtils.h"
50#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
51#include "llvm/Analysis/Loads.h"
52#include "llvm/Analysis/LoopInfo.h"
53#include "llvm/Analysis/LoopIterator.h"
54#include "llvm/Analysis/LoopPass.h"
55#include "llvm/Analysis/MemoryBuiltins.h"
56#include "llvm/Analysis/MemorySSA.h"
57#include "llvm/Analysis/MemorySSAUpdater.h"
58#include "llvm/Analysis/MustExecute.h"
59#include "llvm/Analysis/OptimizationRemarkEmitter.h"
60#include "llvm/Analysis/ScalarEvolution.h"
61#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
62#include "llvm/Analysis/TargetLibraryInfo.h"
63#include "llvm/Analysis/ValueTracking.h"
64#include "llvm/IR/CFG.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfoMetadata.h"
68#include "llvm/IR/DerivedTypes.h"
69#include "llvm/IR/Dominators.h"
70#include "llvm/IR/Instructions.h"
71#include "llvm/IR/IntrinsicInst.h"
72#include "llvm/IR/LLVMContext.h"
73#include "llvm/IR/Metadata.h"
74#include "llvm/IR/PatternMatch.h"
75#include "llvm/IR/PredIteratorCache.h"
76#include "llvm/InitializePasses.h"
77#include "llvm/Support/CommandLine.h"
78#include "llvm/Support/Debug.h"
79#include "llvm/Support/raw_ostream.h"
80#include "llvm/Transforms/Scalar.h"
81#include "llvm/Transforms/Scalar/LoopPassManager.h"
82#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
83#include "llvm/Transforms/Utils/BasicBlockUtils.h"
84#include "llvm/Transforms/Utils/Local.h"
85#include "llvm/Transforms/Utils/LoopUtils.h"
86#include "llvm/Transforms/Utils/SSAUpdater.h"
87#include <algorithm>
88#include <utility>
89using namespace llvm;
90
91#define DEBUG_TYPE"licm" "licm"
92
93STATISTIC(NumCreatedBlocks, "Number of blocks created")static llvm::Statistic NumCreatedBlocks = {"licm", "NumCreatedBlocks"
, "Number of blocks created"}
;
94STATISTIC(NumClonedBranches, "Number of branches cloned")static llvm::Statistic NumClonedBranches = {"licm", "NumClonedBranches"
, "Number of branches cloned"}
;
95STATISTIC(NumSunk, "Number of instructions sunk out of loop")static llvm::Statistic NumSunk = {"licm", "NumSunk", "Number of instructions sunk out of loop"
}
;
96STATISTIC(NumHoisted, "Number of instructions hoisted out of loop")static llvm::Statistic NumHoisted = {"licm", "NumHoisted", "Number of instructions hoisted out of loop"
}
;
97STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk")static llvm::Statistic NumMovedLoads = {"licm", "NumMovedLoads"
, "Number of load insts hoisted or sunk"}
;
98STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk")static llvm::Statistic NumMovedCalls = {"licm", "NumMovedCalls"
, "Number of call insts hoisted or sunk"}
;
99STATISTIC(NumPromoted, "Number of memory locations promoted to registers")static llvm::Statistic NumPromoted = {"licm", "NumPromoted", "Number of memory locations promoted to registers"
}
;
100
101/// Memory promotion is enabled by default.
102static cl::opt<bool>
103 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
104 cl::desc("Disable memory promotion in LICM pass"));
105
106static cl::opt<bool> ControlFlowHoisting(
107 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
108 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
109
110static cl::opt<unsigned> HoistSinkColdnessThreshold(
111 "licm-coldness-threshold", cl::Hidden, cl::init(4),
112 cl::desc("Relative coldness Threshold of hoisting/sinking destination "
113 "block for LICM to be considered beneficial"));
114
115static cl::opt<uint32_t> MaxNumUsesTraversed(
116 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
117 cl::desc("Max num uses visited for identifying load "
118 "invariance in loop using invariant start (default = 8)"));
119
120// Default value of zero implies we use the regular alias set tracker mechanism
121// instead of the cross product using AA to identify aliasing of the memory
122// location we are interested in.
123static cl::opt<int>
124LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0),
125 cl::desc("How many instruction to cross product using AA"));
126
127// Experimental option to allow imprecision in LICM in pathological cases, in
128// exchange for faster compile. This is to be removed if MemorySSA starts to
129// address the same issue. This flag applies only when LICM uses MemorySSA
130// instead on AliasSetTracker. LICM calls MemorySSAWalker's
131// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
132// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
133// which may not be precise, since optimizeUses is capped. The result is
134// correct, but we may not get as "far up" as possible to get which access is
135// clobbering the one queried.
136cl::opt<unsigned> llvm::SetLicmMssaOptCap(
137 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
138 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
139 "for faster compile. Caps the MemorySSA clobbering calls."));
140
141// Experimentally, memory promotion carries less importance than sinking and
142// hoisting. Limit when we do promotion when using MemorySSA, in order to save
143// compile time.
144cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
145 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
146 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
147 "effect. When MSSA in LICM is enabled, then this is the maximum "
148 "number of accesses allowed to be present in a loop in order to "
149 "enable memory promotion."));
150
151static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
152static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
153 const LoopSafetyInfo *SafetyInfo,
154 TargetTransformInfo *TTI, bool &FreeInLoop);
155static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
156 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
157 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
158 OptimizationRemarkEmitter *ORE);
159static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
160 BlockFrequencyInfo *BFI, const Loop *CurLoop,
161 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
162 OptimizationRemarkEmitter *ORE);
163static bool isSafeToExecuteUnconditionally(Instruction &Inst,
164 const DominatorTree *DT,
165 const TargetLibraryInfo *TLI,
166 const Loop *CurLoop,
167 const LoopSafetyInfo *SafetyInfo,
168 OptimizationRemarkEmitter *ORE,
169 const Instruction *CtxI = nullptr);
170static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
171 AliasSetTracker *CurAST, Loop *CurLoop,
172 AAResults *AA);
173static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
174 Loop *CurLoop, Instruction &I,
175 SinkAndHoistLICMFlags &Flags);
176static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
177 MemoryUse &MU);
178static Instruction *cloneInstructionInExitBlock(
179 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
180 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
181
182static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
183 AliasSetTracker *AST, MemorySSAUpdater *MSSAU);
184
185static void moveInstructionBefore(Instruction &I, Instruction &Dest,
186 ICFLoopSafetyInfo &SafetyInfo,
187 MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
188
189static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
190 function_ref<void(Instruction *)> Fn);
191static SmallVector<SmallSetVector<Value *, 8>, 0>
192collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L);
193
194namespace {
195struct LoopInvariantCodeMotion {
196 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
197 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI,
198 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
199 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
200
201 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
202 unsigned LicmMssaNoAccForPromotionCap)
203 : LicmMssaOptCap(LicmMssaOptCap),
204 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
205
206private:
207 unsigned LicmMssaOptCap;
208 unsigned LicmMssaNoAccForPromotionCap;
209
210 std::unique_ptr<AliasSetTracker>
211 collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AAResults *AA);
212};
213
214struct LegacyLICMPass : public LoopPass {
215 static char ID; // Pass identification, replacement for typeid
216 LegacyLICMPass(
217 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
218 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
219 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
220 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
221 }
222
223 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
224 if (skipLoop(L))
225 return false;
226
227 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "do { } while (false)
228 << L->getHeader()->getNameOrAsOperand() << "\n")do { } while (false);
229
230 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
231 MemorySSA *MSSA = EnableMSSALoopDependency
232 ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA())
233 : nullptr;
234 bool hasProfileData = L->getHeader()->getParent()->hasProfileData();
235 BlockFrequencyInfo *BFI =
236 hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI()
237 : nullptr;
238 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
239 // pass. Function analyses need to be preserved across loop transformations
240 // but ORE cannot be preserved (see comment before the pass definition).
241 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
242 return LICM.runOnLoop(
243 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
244 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
245 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI,
246 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
247 *L->getHeader()->getParent()),
248 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
249 *L->getHeader()->getParent()),
250 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
251 }
252
253 /// This transformation requires natural loop information & requires that
254 /// loop preheaders be inserted into the CFG...
255 ///
256 void getAnalysisUsage(AnalysisUsage &AU) const override {
257 AU.addPreserved<DominatorTreeWrapperPass>();
258 AU.addPreserved<LoopInfoWrapperPass>();
259 AU.addRequired<TargetLibraryInfoWrapperPass>();
260 if (EnableMSSALoopDependency) {
261 AU.addRequired<MemorySSAWrapperPass>();
262 AU.addPreserved<MemorySSAWrapperPass>();
263 }
264 AU.addRequired<TargetTransformInfoWrapperPass>();
265 getLoopAnalysisUsage(AU);
266 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
267 AU.addPreserved<LazyBlockFrequencyInfoPass>();
268 AU.addPreserved<LazyBranchProbabilityInfoPass>();
269 }
270
271private:
272 LoopInvariantCodeMotion LICM;
273};
274} // namespace
275
276PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
277 LoopStandardAnalysisResults &AR, LPMUpdater &) {
278 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
279 // pass. Function analyses need to be preserved across loop transformations
280 // but ORE cannot be preserved (see comment before the pass definition).
281 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
282
283 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
284 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI,
285 &AR.SE, AR.MSSA, &ORE))
286 return PreservedAnalyses::all();
287
288 auto PA = getLoopPassPreservedAnalyses();
289
290 PA.preserve<DominatorTreeAnalysis>();
291 PA.preserve<LoopAnalysis>();
292 if (AR.MSSA)
293 PA.preserve<MemorySSAAnalysis>();
294
295 return PA;
296}
297
298PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM,
299 LoopStandardAnalysisResults &AR,
300 LPMUpdater &) {
301 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
302 // pass. Function analyses need to be preserved across loop transformations
303 // but ORE cannot be preserved (see comment before the pass definition).
304 OptimizationRemarkEmitter ORE(LN.getParent());
305
306 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
307
308 Loop &OutermostLoop = LN.getOutermostLoop();
309 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, AR.BFI,
310 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
311
312 if (!Changed)
313 return PreservedAnalyses::all();
314
315 auto PA = getLoopPassPreservedAnalyses();
316
317 PA.preserve<DominatorTreeAnalysis>();
318 PA.preserve<LoopAnalysis>();
319 if (AR.MSSA)
320 PA.preserve<MemorySSAAnalysis>();
321
322 return PA;
323}
324
325char LegacyLICMPass::ID = 0;
326INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",static void *initializeLegacyLICMPassPassOnce(PassRegistry &
Registry) {
327 false, false)static void *initializeLegacyLICMPassPassOnce(PassRegistry &
Registry) {
328INITIALIZE_PASS_DEPENDENCY(LoopPass)initializeLoopPassPass(Registry);
329INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
330INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
331INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
332INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)initializeLazyBFIPassPass(Registry);
333INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm"
, &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LegacyLICMPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag
; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce
, std::ref(Registry)); }
334 false)PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm"
, &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LegacyLICMPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag
; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce
, std::ref(Registry)); }
335
336Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
337Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
338 unsigned LicmMssaNoAccForPromotionCap) {
339 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
340}
341
342llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L,
343 MemorySSA *MSSA)
344 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
345 IsSink, L, MSSA) {}
346
347llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
348 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
349 Loop *L, MemorySSA *MSSA)
350 : LicmMssaOptCap(LicmMssaOptCap),
351 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
352 IsSink(IsSink) {
353 assert(((L != nullptr) == (MSSA != nullptr)) &&((void)0)
354 "Unexpected values for SinkAndHoistLICMFlags")((void)0);
355 if (!MSSA)
356 return;
357
358 unsigned AccessCapCount = 0;
359 for (auto *BB : L->getBlocks())
360 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
361 for (const auto &MA : *Accesses) {
362 (void)MA;
363 ++AccessCapCount;
364 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
365 NoOfMemAccTooLarge = true;
366 return;
367 }
368 }
369}
370
371/// Hoist expressions out of the specified loop. Note, alias info for inner
372/// loop is not preserved so it is not a good idea to run LICM multiple
373/// times on one loop.
374bool LoopInvariantCodeMotion::runOnLoop(
375 Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
376 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
377 ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE,
378 bool LoopNestMode) {
379 bool Changed = false;
380
381 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.")((void)0);
382
383 // If this loop has metadata indicating that LICM is not to be performed then
384 // just exit.
385 if (hasDisableLICMTransformsHint(L)) {
386 return false;
387 }
388
389 std::unique_ptr<AliasSetTracker> CurAST;
390 std::unique_ptr<MemorySSAUpdater> MSSAU;
391 std::unique_ptr<SinkAndHoistLICMFlags> Flags;
392
393 // Don't sink stores from loops with coroutine suspend instructions.
394 // LICM would sink instructions into the default destination of
395 // the coroutine switch. The default destination of the switch is to
396 // handle the case where the coroutine is suspended, by which point the
397 // coroutine frame may have been destroyed. No instruction can be sunk there.
398 // FIXME: This would unfortunately hurt the performance of coroutines, however
399 // there is currently no general solution for this. Similar issues could also
400 // potentially happen in other passes where instructions are being moved
401 // across that edge.
402 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
403 return llvm::any_of(*BB, [](Instruction &I) {
404 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
405 return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
406 });
407 });
408
409 if (!MSSA) {
410 LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n")do { } while (false);
411 CurAST = collectAliasInfoForLoop(L, LI, AA);
412 Flags = std::make_unique<SinkAndHoistLICMFlags>(
413 LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true);
414 } else {
415 LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n")do { } while (false);
416 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
417 Flags = std::make_unique<SinkAndHoistLICMFlags>(
418 LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true, L, MSSA);
419 }
420
421 // Get the preheader block to move instructions into...
422 BasicBlock *Preheader = L->getLoopPreheader();
423
424 // Compute loop safety information.
425 ICFLoopSafetyInfo SafetyInfo;
426 SafetyInfo.computeLoopSafetyInfo(L);
427
428 // We want to visit all of the instructions in this loop... that are not parts
429 // of our subloops (they have already had their invariants hoisted out of
430 // their loop, into this loop, so there is no need to process the BODIES of
431 // the subloops).
432 //
433 // Traverse the body of the loop in depth first order on the dominator tree so
434 // that we are guaranteed to see definitions before we see uses. This allows
435 // us to sink instructions in one pass, without iteration. After sinking
436 // instructions, we perform another pass to hoist them out of the loop.
437 if (L->hasDedicatedExits())
438 Changed |=
439 sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, TTI, L,
440 CurAST.get(), MSSAU.get(), &SafetyInfo, *Flags.get(), ORE);
441 Flags->setIsSink(false);
442 if (Preheader)
443 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L,
444 CurAST.get(), MSSAU.get(), SE, &SafetyInfo,
445 *Flags.get(), ORE, LoopNestMode);
446
447 // Now that all loop invariants have been removed from the loop, promote any
448 // memory references to scalars that we can.
449 // Don't sink stores from loops without dedicated block exits. Exits
450 // containing indirect branches are not transformed by loop simplify,
451 // make sure we catch that. An additional load may be generated in the
452 // preheader for SSA updater, so also avoid sinking when no preheader
453 // is available.
454 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
455 !Flags->tooManyMemoryAccesses() && !HasCoroSuspendInst) {
456 // Figure out the loop exits and their insertion points
457 SmallVector<BasicBlock *, 8> ExitBlocks;
458 L->getUniqueExitBlocks(ExitBlocks);
459
460 // We can't insert into a catchswitch.
461 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
462 return isa<CatchSwitchInst>(Exit->getTerminator());
463 });
464
465 if (!HasCatchSwitch) {
466 SmallVector<Instruction *, 8> InsertPts;
467 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
468 InsertPts.reserve(ExitBlocks.size());
469 if (MSSAU)
470 MSSAInsertPts.reserve(ExitBlocks.size());
471 for (BasicBlock *ExitBlock : ExitBlocks) {
472 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
473 if (MSSAU)
474 MSSAInsertPts.push_back(nullptr);
475 }
476
477 PredIteratorCache PIC;
478
479 bool Promoted = false;
480 if (CurAST.get()) {
481 // Loop over all of the alias sets in the tracker object.
482 for (AliasSet &AS : *CurAST) {
483 // We can promote this alias set if it has a store, if it is a "Must"
484 // alias set, if the pointer is loop invariant, and if we are not
485 // eliminating any volatile loads or stores.
486 if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
487 !L->isLoopInvariant(AS.begin()->getValue()))
488 continue;
489
490 assert(((void)0)
491 !AS.empty() &&((void)0)
492 "Must alias set should have at least one pointer element in it!")((void)0);
493
494 SmallSetVector<Value *, 8> PointerMustAliases;
495 for (const auto &ASI : AS)
496 PointerMustAliases.insert(ASI.getValue());
497
498 Promoted |= promoteLoopAccessesToScalars(
499 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
500 DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
501 }
502 } else {
503 // Promoting one set of accesses may make the pointers for another set
504 // loop invariant, so run this in a loop (with the MaybePromotable set
505 // decreasing in size over time).
506 bool LocalPromoted;
507 do {
508 LocalPromoted = false;
509 for (const SmallSetVector<Value *, 8> &PointerMustAliases :
510 collectPromotionCandidates(MSSA, AA, L)) {
511 LocalPromoted |= promoteLoopAccessesToScalars(
512 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC,
513 LI, DT, TLI, L, /*AST*/nullptr, MSSAU.get(), &SafetyInfo, ORE);
514 }
515 Promoted |= LocalPromoted;
516 } while (LocalPromoted);
517 }
518
519 // Once we have promoted values across the loop body we have to
520 // recursively reform LCSSA as any nested loop may now have values defined
521 // within the loop used in the outer loop.
522 // FIXME: This is really heavy handed. It would be a bit better to use an
523 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
524 // it as it went.
525 if (Promoted)
526 formLCSSARecursively(*L, *DT, LI, SE);
527
528 Changed |= Promoted;
529 }
530 }
531
532 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
533 // specifically moving instructions across the loop boundary and so it is
534 // especially in need of sanity checking here.
535 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!")((void)0);
536 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&((void)0)
537 "Parent loop not left in LCSSA form after LICM!")((void)0);
538
539 if (MSSAU.get() && VerifyMemorySSA)
540 MSSAU->getMemorySSA()->verifyMemorySSA();
541
542 if (Changed && SE)
543 SE->forgetLoopDispositions(L);
544 return Changed;
545}
546
547/// Walk the specified region of the CFG (defined by all blocks dominated by
548/// the specified block, and that are in the current loop) in reverse depth
549/// first order w.r.t the DominatorTree. This allows us to visit uses before
550/// definitions, allowing us to sink a loop body in one pass without iteration.
551///
552bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
553 DominatorTree *DT, BlockFrequencyInfo *BFI,
554 TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
555 Loop *CurLoop, AliasSetTracker *CurAST,
556 MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
557 SinkAndHoistLICMFlags &Flags,
558 OptimizationRemarkEmitter *ORE) {
559
560 // Verify inputs.
561 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&((void)0)
562 CurLoop != nullptr && SafetyInfo != nullptr &&((void)0)
563 "Unexpected input to sinkRegion.")((void)0);
564 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&((void)0)
565 "Either AliasSetTracker or MemorySSA should be initialized.")((void)0);
566
567 // We want to visit children before parents. We will enque all the parents
568 // before their children in the worklist and process the worklist in reverse
569 // order.
570 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
571
572 bool Changed = false;
573 for (DomTreeNode *DTN : reverse(Worklist)) {
574 BasicBlock *BB = DTN->getBlock();
575 // Only need to process the contents of this block if it is not part of a
576 // subloop (which would already have been processed).
577 if (inSubLoop(BB, CurLoop, LI))
578 continue;
579
580 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
581 Instruction &I = *--II;
582
583 // The instruction is not used in the loop if it is dead. In this case,
584 // we just delete it instead of sinking it.
585 if (isInstructionTriviallyDead(&I, TLI)) {
586 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n')do { } while (false);
587 salvageKnowledge(&I);
588 salvageDebugInfo(I);
589 ++II;
590 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
591 Changed = true;
592 continue;
593 }
594
595 // Check to see if we can sink this instruction to the exit blocks
596 // of the loop. We can do this if the all users of the instruction are
597 // outside of the loop. In this case, it doesn't even matter if the
598 // operands of the instruction are loop invariant.
599 //
600 bool FreeInLoop = false;
601 if (!I.mayHaveSideEffects() &&
602 isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) &&
603 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
604 ORE)) {
605 if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) {
606 if (!FreeInLoop) {
607 ++II;
608 salvageDebugInfo(I);
609 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
610 }
611 Changed = true;
612 }
613 }
614 }
615 }
616 if (MSSAU && VerifyMemorySSA)
617 MSSAU->getMemorySSA()->verifyMemorySSA();
618 return Changed;
619}
620
621namespace {
622// This is a helper class for hoistRegion to make it able to hoist control flow
623// in order to be able to hoist phis. The way this works is that we initially
624// start hoisting to the loop preheader, and when we see a loop invariant branch
625// we make note of this. When we then come to hoist an instruction that's
626// conditional on such a branch we duplicate the branch and the relevant control
627// flow, then hoist the instruction into the block corresponding to its original
628// block in the duplicated control flow.
629class ControlFlowHoister {
630private:
631 // Information about the loop we are hoisting from
632 LoopInfo *LI;
633 DominatorTree *DT;
634 Loop *CurLoop;
635 MemorySSAUpdater *MSSAU;
636
637 // A map of blocks in the loop to the block their instructions will be hoisted
638 // to.
639 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
640
641 // The branches that we can hoist, mapped to the block that marks a
642 // convergence point of their control flow.
643 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
644
645public:
646 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
647 MemorySSAUpdater *MSSAU)
648 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
649
650 void registerPossiblyHoistableBranch(BranchInst *BI) {
651 // We can only hoist conditional branches with loop invariant operands.
652 if (!ControlFlowHoisting || !BI->isConditional() ||
653 !CurLoop->hasLoopInvariantOperands(BI))
654 return;
655
656 // The branch destinations need to be in the loop, and we don't gain
657 // anything by duplicating conditional branches with duplicate successors,
658 // as it's essentially the same as an unconditional branch.
659 BasicBlock *TrueDest = BI->getSuccessor(0);
660 BasicBlock *FalseDest = BI->getSuccessor(1);
661 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
662 TrueDest == FalseDest)
663 return;
664
665 // We can hoist BI if one branch destination is the successor of the other,
666 // or both have common successor which we check by seeing if the
667 // intersection of their successors is non-empty.
668 // TODO: This could be expanded to allowing branches where both ends
669 // eventually converge to a single block.
670 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
671 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
672 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
673 BasicBlock *CommonSucc = nullptr;
674 if (TrueDestSucc.count(FalseDest)) {
675 CommonSucc = FalseDest;
676 } else if (FalseDestSucc.count(TrueDest)) {
677 CommonSucc = TrueDest;
678 } else {
679 set_intersect(TrueDestSucc, FalseDestSucc);
680 // If there's one common successor use that.
681 if (TrueDestSucc.size() == 1)
682 CommonSucc = *TrueDestSucc.begin();
683 // If there's more than one pick whichever appears first in the block list
684 // (we can't use the value returned by TrueDestSucc.begin() as it's
685 // unpredicatable which element gets returned).
686 else if (!TrueDestSucc.empty()) {
687 Function *F = TrueDest->getParent();
688 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
689 auto It = llvm::find_if(*F, IsSucc);
690 assert(It != F->end() && "Could not find successor in function")((void)0);
691 CommonSucc = &*It;
692 }
693 }
694 // The common successor has to be dominated by the branch, as otherwise
695 // there will be some other path to the successor that will not be
696 // controlled by this branch so any phi we hoist would be controlled by the
697 // wrong condition. This also takes care of avoiding hoisting of loop back
698 // edges.
699 // TODO: In some cases this could be relaxed if the successor is dominated
700 // by another block that's been hoisted and we can guarantee that the
701 // control flow has been replicated exactly.
702 if (CommonSucc && DT->dominates(BI, CommonSucc))
703 HoistableBranches[BI] = CommonSucc;
704 }
705
706 bool canHoistPHI(PHINode *PN) {
707 // The phi must have loop invariant operands.
708 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
709 return false;
710 // We can hoist phis if the block they are in is the target of hoistable
711 // branches which cover all of the predecessors of the block.
712 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
713 BasicBlock *BB = PN->getParent();
714 for (BasicBlock *PredBB : predecessors(BB))
715 PredecessorBlocks.insert(PredBB);
716 // If we have less predecessor blocks than predecessors then the phi will
717 // have more than one incoming value for the same block which we can't
718 // handle.
719 // TODO: This could be handled be erasing some of the duplicate incoming
720 // values.
721 if (PredecessorBlocks.size() != pred_size(BB))
722 return false;
723 for (auto &Pair : HoistableBranches) {
724 if (Pair.second == BB) {
725 // Which blocks are predecessors via this branch depends on if the
726 // branch is triangle-like or diamond-like.
727 if (Pair.first->getSuccessor(0) == BB) {
728 PredecessorBlocks.erase(Pair.first->getParent());
729 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
730 } else if (Pair.first->getSuccessor(1) == BB) {
731 PredecessorBlocks.erase(Pair.first->getParent());
732 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
733 } else {
734 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
735 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
736 }
737 }
738 }
739 // PredecessorBlocks will now be empty if for every predecessor of BB we
740 // found a hoistable branch source.
741 return PredecessorBlocks.empty();
742 }
743
744 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
745 if (!ControlFlowHoisting)
746 return CurLoop->getLoopPreheader();
747 // If BB has already been hoisted, return that
748 if (HoistDestinationMap.count(BB))
749 return HoistDestinationMap[BB];
750
751 // Check if this block is conditional based on a pending branch
752 auto HasBBAsSuccessor =
753 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
754 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
755 Pair.first->getSuccessor(1) == BB);
756 };
757 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
758
759 // If not involved in a pending branch, hoist to preheader
760 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
761 if (It == HoistableBranches.end()) {
762 LLVM_DEBUG(dbgs() << "LICM using "do { } while (false)
763 << InitialPreheader->getNameOrAsOperand()do { } while (false)
764 << " as hoist destination for "do { } while (false)
765 << BB->getNameOrAsOperand() << "\n")do { } while (false);
766 HoistDestinationMap[BB] = InitialPreheader;
767 return InitialPreheader;
768 }
769 BranchInst *BI = It->first;
770 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==((void)0)
771 HoistableBranches.end() &&((void)0)
772 "BB is expected to be the target of at most one branch")((void)0);
773
774 LLVMContext &C = BB->getContext();
775 BasicBlock *TrueDest = BI->getSuccessor(0);
776 BasicBlock *FalseDest = BI->getSuccessor(1);
777 BasicBlock *CommonSucc = HoistableBranches[BI];
778 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
779
780 // Create hoisted versions of blocks that currently don't have them
781 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
782 if (HoistDestinationMap.count(Orig))
783 return HoistDestinationMap[Orig];
784 BasicBlock *New =
785 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
786 HoistDestinationMap[Orig] = New;
787 DT->addNewBlock(New, HoistTarget);
788 if (CurLoop->getParentLoop())
789 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
790 ++NumCreatedBlocks;
791 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()do { } while (false)
792 << " as hoist destination for " << Orig->getName()do { } while (false)
793 << "\n")do { } while (false);
794 return New;
795 };
796 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
797 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
798 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
799
800 // Link up these blocks with branches.
801 if (!HoistCommonSucc->getTerminator()) {
802 // The new common successor we've generated will branch to whatever that
803 // hoist target branched to.
804 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
805 assert(TargetSucc && "Expected hoist target to have a single successor")((void)0);
806 HoistCommonSucc->moveBefore(TargetSucc);
807 BranchInst::Create(TargetSucc, HoistCommonSucc);
808 }
809 if (!HoistTrueDest->getTerminator()) {
810 HoistTrueDest->moveBefore(HoistCommonSucc);
811 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
812 }
813 if (!HoistFalseDest->getTerminator()) {
814 HoistFalseDest->moveBefore(HoistCommonSucc);
815 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
816 }
817
818 // If BI is being cloned to what was originally the preheader then
819 // HoistCommonSucc will now be the new preheader.
820 if (HoistTarget == InitialPreheader) {
821 // Phis in the loop header now need to use the new preheader.
822 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
823 if (MSSAU)
824 MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
825 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
826 // The new preheader dominates the loop header.
827 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
828 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
829 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
830 // The preheader hoist destination is now the new preheader, with the
831 // exception of the hoist destination of this branch.
832 for (auto &Pair : HoistDestinationMap)
833 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
834 Pair.second = HoistCommonSucc;
835 }
836
837 // Now finally clone BI.
838 ReplaceInstWithInst(
839 HoistTarget->getTerminator(),
840 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
841 ++NumClonedBranches;
842
843 assert(CurLoop->getLoopPreheader() &&((void)0)
844 "Hoisting blocks should not have destroyed preheader")((void)0);
845 return HoistDestinationMap[BB];
846 }
847};
848} // namespace
849
850// Hoisting/sinking instruction out of a loop isn't always beneficial. It's only
851// only worthwhile if the destination block is actually colder than current
852// block.
853static bool worthSinkOrHoistInst(Instruction &I, BasicBlock *DstBlock,
854 OptimizationRemarkEmitter *ORE,
855 BlockFrequencyInfo *BFI) {
856 // Check block frequency only when runtime profile is available
857 // to avoid pathological cases. With static profile, lean towards
858 // hosting because it helps canonicalize the loop for vectorizer.
859 if (!DstBlock->getParent()->hasProfileData())
860 return true;
861
862 if (!HoistSinkColdnessThreshold || !BFI)
863 return true;
864
865 BasicBlock *SrcBlock = I.getParent();
866 if (BFI->getBlockFreq(DstBlock).getFrequency() / HoistSinkColdnessThreshold >
867 BFI->getBlockFreq(SrcBlock).getFrequency()) {
868 ORE->emit([&]() {
869 return OptimizationRemarkMissed(DEBUG_TYPE"licm", "SinkHoistInst", &I)
870 << "failed to sink or hoist instruction because containing block "
871 "has lower frequency than destination block";
872 });
873 return false;
874 }
875
876 return true;
877}
878
879/// Walk the specified region of the CFG (defined by all blocks dominated by
880/// the specified block, and that are in the current loop) in depth first
881/// order w.r.t the DominatorTree. This allows us to visit definitions before
882/// uses, allowing us to hoist a loop body in one pass without iteration.
883///
884bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
885 DominatorTree *DT, BlockFrequencyInfo *BFI,
886 TargetLibraryInfo *TLI, Loop *CurLoop,
887 AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
888 ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo,
889 SinkAndHoistLICMFlags &Flags,
890 OptimizationRemarkEmitter *ORE, bool LoopNestMode) {
891 // Verify inputs.
892 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&((void)0)
893 CurLoop != nullptr && SafetyInfo != nullptr &&((void)0)
894 "Unexpected input to hoistRegion.")((void)0);
895 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&((void)0)
896 "Either AliasSetTracker or MemorySSA should be initialized.")((void)0);
897
898 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
899
900 // Keep track of instructions that have been hoisted, as they may need to be
901 // re-hoisted if they end up not dominating all of their uses.
902 SmallVector<Instruction *, 16> HoistedInstructions;
903
904 // For PHI hoisting to work we need to hoist blocks before their successors.
905 // We can do this by iterating through the blocks in the loop in reverse
906 // post-order.
907 LoopBlocksRPO Worklist(CurLoop);
908 Worklist.perform(LI);
909 bool Changed = false;
910 for (BasicBlock *BB : Worklist) {
911 // Only need to process the contents of this block if it is not part of a
912 // subloop (which would already have been processed).
913 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
914 continue;
915
916 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
917 Instruction &I = *II++;
918 // Try constant folding this instruction. If all the operands are
919 // constants, it is technically hoistable, but it would be better to
920 // just fold it.
921 if (Constant *C = ConstantFoldInstruction(
922 &I, I.getModule()->getDataLayout(), TLI)) {
923 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *Cdo { } while (false)
924 << '\n')do { } while (false);
925 if (CurAST)
926 CurAST->copyValue(&I, C);
927 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
928 I.replaceAllUsesWith(C);
929 if (isInstructionTriviallyDead(&I, TLI))
930 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
931 Changed = true;
932 continue;
933 }
934
935 // Try hoisting the instruction out to the preheader. We can only do
936 // this if all of the operands of the instruction are loop invariant and
937 // if it is safe to hoist the instruction. We also check block frequency
938 // to make sure instruction only gets hoisted into colder blocks.
939 // TODO: It may be safe to hoist if we are hoisting to a conditional block
940 // and we have accurately duplicated the control flow from the loop header
941 // to that block.
942 if (CurLoop->hasLoopInvariantOperands(&I) &&
943 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
944 ORE) &&
945 worthSinkOrHoistInst(I, CurLoop->getLoopPreheader(), ORE, BFI) &&
946 isSafeToExecuteUnconditionally(
947 I, DT, TLI, CurLoop, SafetyInfo, ORE,
948 CurLoop->getLoopPreheader()->getTerminator())) {
949 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
950 MSSAU, SE, ORE);
951 HoistedInstructions.push_back(&I);
952 Changed = true;
953 continue;
954 }
955
956 // Attempt to remove floating point division out of the loop by
957 // converting it to a reciprocal multiplication.
958 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
959 CurLoop->isLoopInvariant(I.getOperand(1))) {
960 auto Divisor = I.getOperand(1);
961 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
962 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
963 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
964 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
965 ReciprocalDivisor->insertBefore(&I);
966
967 auto Product =
968 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
969 Product->setFastMathFlags(I.getFastMathFlags());
970 SafetyInfo->insertInstructionTo(Product, I.getParent());
971 Product->insertAfter(&I);
972 I.replaceAllUsesWith(Product);
973 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
974
975 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
976 SafetyInfo, MSSAU, SE, ORE);
977 HoistedInstructions.push_back(ReciprocalDivisor);
978 Changed = true;
979 continue;
980 }
981
982 auto IsInvariantStart = [&](Instruction &I) {
983 using namespace PatternMatch;
984 return I.use_empty() &&
985 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
986 };
987 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
988 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
989 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
990 };
991 if ((IsInvariantStart(I) || isGuard(&I)) &&
992 CurLoop->hasLoopInvariantOperands(&I) &&
993 MustExecuteWithoutWritesBefore(I)) {
994 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
995 MSSAU, SE, ORE);
996 HoistedInstructions.push_back(&I);
997 Changed = true;
998 continue;
999 }
1000
1001 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
1002 if (CFH.canHoistPHI(PN)) {
1003 // Redirect incoming blocks first to ensure that we create hoisted
1004 // versions of those blocks before we hoist the phi.
1005 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
1006 PN->setIncomingBlock(
1007 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
1008 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
1009 MSSAU, SE, ORE);
1010 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected")((void)0);
1011 Changed = true;
1012 continue;
1013 }
1014 }
1015
1016 // Remember possibly hoistable branches so we can actually hoist them
1017 // later if needed.
1018 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
1019 CFH.registerPossiblyHoistableBranch(BI);
1020 }
1021 }
1022
1023 // If we hoisted instructions to a conditional block they may not dominate
1024 // their uses that weren't hoisted (such as phis where some operands are not
1025 // loop invariant). If so make them unconditional by moving them to their
1026 // immediate dominator. We iterate through the instructions in reverse order
1027 // which ensures that when we rehoist an instruction we rehoist its operands,
1028 // and also keep track of where in the block we are rehoisting to to make sure
1029 // that we rehoist instructions before the instructions that use them.
1030 Instruction *HoistPoint = nullptr;
1031 if (ControlFlowHoisting) {
1032 for (Instruction *I : reverse(HoistedInstructions)) {
1033 if (!llvm::all_of(I->uses(),
1034 [&](Use &U) { return DT->dominates(I, U); })) {
1035 BasicBlock *Dominator =
1036 DT->getNode(I->getParent())->getIDom()->getBlock();
1037 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1038 if (HoistPoint)
1039 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&((void)0)
1040 "New hoist point expected to dominate old hoist point")((void)0);
1041 HoistPoint = Dominator->getTerminator();
1042 }
1043 LLVM_DEBUG(dbgs() << "LICM rehoisting to "do { } while (false)
1044 << HoistPoint->getParent()->getNameOrAsOperand()do { } while (false)
1045 << ": " << *I << "\n")do { } while (false);
1046 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
1047 HoistPoint = I;
1048 Changed = true;
1049 }
1050 }
1051 }
1052 if (MSSAU && VerifyMemorySSA)
1053 MSSAU->getMemorySSA()->verifyMemorySSA();
1054
1055 // Now that we've finished hoisting make sure that LI and DT are still
1056 // valid.
1057#ifdef EXPENSIVE_CHECKS
1058 if (Changed) {
1059 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&((void)0)
1060 "Dominator tree verification failed")((void)0);
1061 LI->verify(*DT);
1062 }
1063#endif
1064
1065 return Changed;
1066}
1067
1068// Return true if LI is invariant within scope of the loop. LI is invariant if
1069// CurLoop is dominated by an invariant.start representing the same memory
1070// location and size as the memory location LI loads from, and also the
1071// invariant.start has no uses.
1072static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1073 Loop *CurLoop) {
1074 Value *Addr = LI->getOperand(0);
1075 const DataLayout &DL = LI->getModule()->getDataLayout();
1076 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1077
1078 // It is not currently possible for clang to generate an invariant.start
1079 // intrinsic with scalable vector types because we don't support thread local
1080 // sizeless types and we don't permit sizeless types in structs or classes.
1081 // Furthermore, even if support is added for this in future the intrinsic
1082 // itself is defined to have a size of -1 for variable sized objects. This
1083 // makes it impossible to verify if the intrinsic envelops our region of
1084 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1085 // types would have a -1 parameter, but the former is clearly double the size
1086 // of the latter.
1087 if (LocSizeInBits.isScalable())
1088 return false;
1089
1090 // if the type is i8 addrspace(x)*, we know this is the type of
1091 // llvm.invariant.start operand
1092 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
1093 LI->getPointerAddressSpace());
1094 unsigned BitcastsVisited = 0;
1095 // Look through bitcasts until we reach the i8* type (this is invariant.start
1096 // operand type).
1097 while (Addr->getType() != PtrInt8Ty) {
1098 auto *BC = dyn_cast<BitCastInst>(Addr);
1099 // Avoid traversing high number of bitcast uses.
1100 if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1101 return false;
1102 Addr = BC->getOperand(0);
1103 }
1104
1105 unsigned UsesVisited = 0;
1106 // Traverse all uses of the load operand value, to see if invariant.start is
1107 // one of the uses, and whether it dominates the load instruction.
1108 for (auto *U : Addr->users()) {
1109 // Avoid traversing for Load operand with high number of users.
1110 if (++UsesVisited > MaxNumUsesTraversed)
1111 return false;
1112 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1113 // If there are escaping uses of invariant.start instruction, the load maybe
1114 // non-invariant.
1115 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1116 !II->use_empty())
1117 continue;
1118 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1119 // The intrinsic supports having a -1 argument for variable sized objects
1120 // so we should check for that here.
1121 if (InvariantSize->isNegative())
1122 continue;
1123 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1124 // Confirm the invariant.start location size contains the load operand size
1125 // in bits. Also, the invariant.start should dominate the load, and we
1126 // should not hoist the load out of a loop that contains this dominating
1127 // invariant.start.
1128 if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits &&
1129 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1130 return true;
1131 }
1132
1133 return false;
1134}
1135
1136namespace {
1137/// Return true if-and-only-if we know how to (mechanically) both hoist and
1138/// sink a given instruction out of a loop. Does not address legality
1139/// concerns such as aliasing or speculation safety.
1140bool isHoistableAndSinkableInst(Instruction &I) {
1141 // Only these instructions are hoistable/sinkable.
1142 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
2
Assuming 'I' is a 'LoadInst'
3
Returning the value 1, which participates in a condition later
1143 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1144 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1145 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1146 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1147 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1148 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1149}
1150/// Return true if all of the alias sets within this AST are known not to
1151/// contain a Mod, or if MSSA knows there are no MemoryDefs in the loop.
1152bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1153 const Loop *L) {
1154 if (CurAST) {
67
Assuming 'CurAST' is null
68
Taking false branch
1155 for (AliasSet &AS : *CurAST) {
1156 if (!AS.isForwardingAliasSet() && AS.isMod()) {
1157 return false;
1158 }
1159 }
1160 return true;
1161 } else { /*MSSAU*/
1162 for (auto *BB : L->getBlocks())
69
Assuming '__begin2' is not equal to '__end2'
1163 if (MSSAU->getMemorySSA()->getBlockDefs(BB))
70
Called C++ object pointer is null
1164 return false;
1165 return true;
1166 }
1167}
1168
1169/// Return true if I is the only Instruction with a MemoryAccess in L.
1170bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1171 const MemorySSAUpdater *MSSAU) {
1172 for (auto *BB : L->getBlocks())
1173 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1174 int NotAPhi = 0;
1175 for (const auto &Acc : *Accs) {
1176 if (isa<MemoryPhi>(&Acc))
1177 continue;
1178 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1179 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1180 return false;
1181 }
1182 }
1183 return true;
1184}
1185}
1186
1187bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1188 Loop *CurLoop, AliasSetTracker *CurAST,
1189 MemorySSAUpdater *MSSAU,
1190 bool TargetExecutesOncePerLoop,
1191 SinkAndHoistLICMFlags *Flags,
1192 OptimizationRemarkEmitter *ORE) {
1193 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&((void)0)
1194 "Either AliasSetTracker or MemorySSA should be initialized.")((void)0);
1195
1196 // If we don't understand the instruction, bail early.
1197 if (!isHoistableAndSinkableInst(I))
1
Calling 'isHoistableAndSinkableInst'
4
Returning from 'isHoistableAndSinkableInst'
5
Taking false branch
1198 return false;
1199
1200 MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
6
Assuming 'MSSAU' is null
7
'?' condition is false
1201 if (MSSA
7.1
'MSSA' is null
7.1
'MSSA' is null
7.1
'MSSA' is null
7.1
'MSSA' is null
)
8
Taking false branch
1202 assert(Flags != nullptr && "Flags cannot be null.")((void)0);
1203
1204 // Loads have extra constraints we have to verify before we can hoist them.
1205 if (LoadInst *LI
9.1
'LI' is null
9.1
'LI' is null
9.1
'LI' is null
9.1
'LI' is null
= dyn_cast<LoadInst>(&I)) {
9
Assuming the object is not a 'LoadInst'
10
Taking false branch
1206 if (!LI->isUnordered())
1207 return false; // Don't sink/hoist volatile or ordered atomic loads!
1208
1209 // Loads from constant memory are always safe to move, even if they end up
1210 // in the same alias set as something that ends up being modified.
1211 if (AA->pointsToConstantMemory(LI->getOperand(0)))
1212 return true;
1213 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1214 return true;
1215
1216 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1217 return false; // Don't risk duplicating unordered loads
1218
1219 // This checks for an invariant.start dominating the load.
1220 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1221 return true;
1222
1223 bool Invalidated;
1224 if (CurAST)
1225 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1226 CurLoop, AA);
1227 else
1228 Invalidated = pointerInvalidatedByLoopWithMSSA(
1229 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags);
1230 // Check loop-invariant address because this may also be a sinkable load
1231 // whose address is not necessarily loop-invariant.
1232 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1233 ORE->emit([&]() {
1234 return OptimizationRemarkMissed(
1235 DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressInvalidated", LI)
1236 << "failed to move load with loop-invariant address "
1237 "because the loop may invalidate its value";
1238 });
1239
1240 return !Invalidated;
1241 } else if (CallInst *CI
11.1
'CI' is non-null
11.1
'CI' is non-null
11.1
'CI' is non-null
11.1
'CI' is non-null
= dyn_cast<CallInst>(&I)) {
11
Assuming the object is a 'CallInst'
12
Taking true branch
1242 // Don't sink or hoist dbg info; it's legal, but not useful.
1243 if (isa<DbgInfoIntrinsic>(I))
13
Assuming 'I' is not a 'DbgInfoIntrinsic'
14
Taking false branch
1244 return false;
1245
1246 // Don't sink calls which can throw.
1247 if (CI->mayThrow())
15
Assuming the condition is false
16
Taking false branch
1248 return false;
1249
1250 // Convergent attribute has been used on operations that involve
1251 // inter-thread communication which results are implicitly affected by the
1252 // enclosing control flows. It is not safe to hoist or sink such operations
1253 // across control flow.
1254 if (CI->isConvergent())
17
Calling 'CallBase::isConvergent'
32
Returning from 'CallBase::isConvergent'
33
Assuming the condition is false
34
Taking false branch
1255 return false;
1256
1257 using namespace PatternMatch;
1258 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
35
Calling 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
42
Returning from 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
43
Taking false branch
1259 // Assumes don't actually alias anything or throw
1260 return true;
1261
1262 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
44
Calling 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
51
Returning from 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
52
Taking false branch
1263 // Widenable conditions don't actually alias anything or throw
1264 return true;
1265
1266 // Handle simple cases by querying alias analysis.
1267 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1268 if (Behavior == FMRB_DoesNotAccessMemory)
53
Assuming 'Behavior' is not equal to FMRB_DoesNotAccessMemory
54
Taking false branch
1269 return true;
1270 if (AAResults::onlyReadsMemory(Behavior)) {
55
Calling 'AAResults::onlyReadsMemory'
58
Returning from 'AAResults::onlyReadsMemory'
59
Taking true branch
1271 // A readonly argmemonly function only reads from memory pointed to by
1272 // it's arguments with arbitrary offsets. If we can prove there are no
1273 // writes to this memory in the loop, we can hoist or sink.
1274 if (AAResults::onlyAccessesArgPointees(Behavior)) {
60
Calling 'AAResults::onlyAccessesArgPointees'
63
Returning from 'AAResults::onlyAccessesArgPointees'
64
Taking false branch
1275 // TODO: expand to writeable arguments
1276 for (Value *Op : CI->arg_operands())
1277 if (Op->getType()->isPointerTy()) {
1278 bool Invalidated;
1279 if (CurAST)
1280 Invalidated = pointerInvalidatedByLoop(
1281 MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA);
1282 else
1283 Invalidated = pointerInvalidatedByLoopWithMSSA(
1284 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1285 *Flags);
1286 if (Invalidated)
1287 return false;
1288 }
1289 return true;
1290 }
1291
1292 // If this call only reads from memory and there are no writes to memory
1293 // in the loop, we can hoist or sink the call as appropriate.
1294 if (isReadOnly(CurAST, MSSAU, CurLoop))
65
Passing null pointer value via 2nd parameter 'MSSAU'
66
Calling 'isReadOnly'
1295 return true;
1296 }
1297
1298 // FIXME: This should use mod/ref information to see if we can hoist or
1299 // sink the call.
1300
1301 return false;
1302 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1303 // Fences alias (most) everything to provide ordering. For the moment,
1304 // just give up if there are any other memory operations in the loop.
1305 if (CurAST) {
1306 auto Begin = CurAST->begin();
1307 assert(Begin != CurAST->end() && "must contain FI")((void)0);
1308 if (std::next(Begin) != CurAST->end())
1309 // constant memory for instance, TODO: handle better
1310 return false;
1311 auto *UniqueI = Begin->getUniqueInstruction();
1312 if (!UniqueI)
1313 // other memory op, give up
1314 return false;
1315 (void)FI; // suppress unused variable warning
1316 assert(UniqueI == FI && "AS must contain FI")((void)0);
1317 return true;
1318 } else // MSSAU
1319 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1320 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1321 if (!SI->isUnordered())
1322 return false; // Don't sink/hoist volatile or ordered atomic store!
1323
1324 // We can only hoist a store that we can prove writes a value which is not
1325 // read or overwritten within the loop. For those cases, we fallback to
1326 // load store promotion instead. TODO: We can extend this to cases where
1327 // there is exactly one write to the location and that write dominates an
1328 // arbitrary number of reads in the loop.
1329 if (CurAST) {
1330 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1331
1332 if (AS.isRef() || !AS.isMustAlias())
1333 // Quick exit test, handled by the full path below as well.
1334 return false;
1335 auto *UniqueI = AS.getUniqueInstruction();
1336 if (!UniqueI)
1337 // other memory op, give up
1338 return false;
1339 assert(UniqueI == SI && "AS must contain SI")((void)0);
1340 return true;
1341 } else { // MSSAU
1342 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1343 return true;
1344 // If there are more accesses than the Promotion cap or no "quota" to
1345 // check clobber, then give up as we're not walking a list that long.
1346 if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls())
1347 return false;
1348 // If there are interfering Uses (i.e. their defining access is in the
1349 // loop), or ordered loads (stored as Defs!), don't move this store.
1350 // Could do better here, but this is conservatively correct.
1351 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1352 // moving accesses. Can also extend to dominating uses.
1353 auto *SIMD = MSSA->getMemoryAccess(SI);
1354 for (auto *BB : CurLoop->getBlocks())
1355 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1356 for (const auto &MA : *Accesses)
1357 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1358 auto *MD = MU->getDefiningAccess();
1359 if (!MSSA->isLiveOnEntryDef(MD) &&
1360 CurLoop->contains(MD->getBlock()))
1361 return false;
1362 // Disable hoisting past potentially interfering loads. Optimized
1363 // Uses may point to an access outside the loop, as getClobbering
1364 // checks the previous iteration when walking the backedge.
1365 // FIXME: More precise: no Uses that alias SI.
1366 if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU))
1367 return false;
1368 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1369 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1370 (void)LI; // Silence warning.
1371 assert(!LI->isUnordered() && "Expected unordered load")((void)0);
1372 return false;
1373 }
1374 // Any call, while it may not be clobbering SI, it may be a use.
1375 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1376 // Check if the call may read from the memory location written
1377 // to by SI. Check CI's attributes and arguments; the number of
1378 // such checks performed is limited above by NoOfMemAccTooLarge.
1379 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1380 if (isModOrRefSet(MRI))
1381 return false;
1382 }
1383 }
1384 }
1385 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1386 Flags->incrementClobberingCalls();
1387 // If there are no clobbering Defs in the loop, store is safe to hoist.
1388 return MSSA->isLiveOnEntryDef(Source) ||
1389 !CurLoop->contains(Source->getBlock());
1390 }
1391 }
1392
1393 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing")((void)0);
1394
1395 // We've established mechanical ability and aliasing, it's up to the caller
1396 // to check fault safety
1397 return true;
1398}
1399
1400/// Returns true if a PHINode is a trivially replaceable with an
1401/// Instruction.
1402/// This is true when all incoming values are that instruction.
1403/// This pattern occurs most often with LCSSA PHI nodes.
1404///
1405static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1406 for (const Value *IncValue : PN.incoming_values())
1407 if (IncValue != &I)
1408 return false;
1409
1410 return true;
1411}
1412
1413/// Return true if the instruction is free in the loop.
1414static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1415 const TargetTransformInfo *TTI) {
1416
1417 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1418 if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) !=
1419 TargetTransformInfo::TCC_Free)
1420 return false;
1421 // For a GEP, we cannot simply use getUserCost because currently it
1422 // optimistically assume that a GEP will fold into addressing mode
1423 // regardless of its users.
1424 const BasicBlock *BB = GEP->getParent();
1425 for (const User *U : GEP->users()) {
1426 const Instruction *UI = cast<Instruction>(U);
1427 if (CurLoop->contains(UI) &&
1428 (BB != UI->getParent() ||
1429 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1430 return false;
1431 }
1432 return true;
1433 } else
1434 return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
1435 TargetTransformInfo::TCC_Free;
1436}
1437
1438/// Return true if the only users of this instruction are outside of
1439/// the loop. If this is true, we can sink the instruction to the exit
1440/// blocks of the loop.
1441///
1442/// We also return true if the instruction could be folded away in lowering.
1443/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1444static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1445 const LoopSafetyInfo *SafetyInfo,
1446 TargetTransformInfo *TTI, bool &FreeInLoop) {
1447 const auto &BlockColors = SafetyInfo->getBlockColors();
1448 bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1449 for (const User *U : I.users()) {
1450 const Instruction *UI = cast<Instruction>(U);
1451 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1452 const BasicBlock *BB = PN->getParent();
1453 // We cannot sink uses in catchswitches.
1454 if (isa<CatchSwitchInst>(BB->getTerminator()))
1455 return false;
1456
1457 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1458 // phi use is too muddled.
1459 if (isa<CallInst>(I))
1460 if (!BlockColors.empty() &&
1461 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1462 return false;
1463 }
1464
1465 if (CurLoop->contains(UI)) {
1466 if (IsFree) {
1467 FreeInLoop = true;
1468 continue;
1469 }
1470 return false;
1471 }
1472 }
1473 return true;
1474}
1475
1476static Instruction *cloneInstructionInExitBlock(
1477 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1478 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1479 Instruction *New;
1480 if (auto *CI = dyn_cast<CallInst>(&I)) {
1481 const auto &BlockColors = SafetyInfo->getBlockColors();
1482
1483 // Sinking call-sites need to be handled differently from other
1484 // instructions. The cloned call-site needs a funclet bundle operand
1485 // appropriate for its location in the CFG.
1486 SmallVector<OperandBundleDef, 1> OpBundles;
1487 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1488 BundleIdx != BundleEnd; ++BundleIdx) {
1489 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1490 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1491 continue;
1492
1493 OpBundles.emplace_back(Bundle);
1494 }
1495
1496 if (!BlockColors.empty()) {
1497 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1498 assert(CV.size() == 1 && "non-unique color for exit block!")((void)0);
1499 BasicBlock *BBColor = CV.front();
1500 Instruction *EHPad = BBColor->getFirstNonPHI();
1501 if (EHPad->isEHPad())
1502 OpBundles.emplace_back("funclet", EHPad);
1503 }
1504
1505 New = CallInst::Create(CI, OpBundles);
1506 } else {
1507 New = I.clone();
1508 }
1509
1510 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1511 if (!I.getName().empty())
1512 New->setName(I.getName() + ".le");
1513
1514 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1515 // Create a new MemoryAccess and let MemorySSA set its defining access.
1516 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1517 New, nullptr, New->getParent(), MemorySSA::Beginning);
1518 if (NewMemAcc) {
1519 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1520 MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1521 else {
1522 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1523 MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1524 }
1525 }
1526 }
1527
1528 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1529 // this is particularly cheap because we can rip off the PHI node that we're
1530 // replacing for the number and blocks of the predecessors.
1531 // OPT: If this shows up in a profile, we can instead finish sinking all
1532 // invariant instructions, and then walk their operands to re-establish
1533 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1534 // sinking bottom-up.
1535 for (Use &Op : New->operands())
1536 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1537 auto *OInst = cast<Instruction>(Op.get());
1538 PHINode *OpPN =
1539 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1540 OInst->getName() + ".lcssa", &ExitBlock.front());
1541 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1542 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1543 Op = OpPN;
1544 }
1545 return New;
1546}
1547
1548static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1549 AliasSetTracker *AST, MemorySSAUpdater *MSSAU) {
1550 if (AST)
1551 AST->deleteValue(&I);
1552 if (MSSAU)
1553 MSSAU->removeMemoryAccess(&I);
1554 SafetyInfo.removeInstruction(&I);
1555 I.eraseFromParent();
1556}
1557
1558static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1559 ICFLoopSafetyInfo &SafetyInfo,
1560 MemorySSAUpdater *MSSAU,
1561 ScalarEvolution *SE) {
1562 SafetyInfo.removeInstruction(&I);
1563 SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1564 I.moveBefore(&Dest);
1565 if (MSSAU)
1566 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1567 MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1568 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1569 MemorySSA::BeforeTerminator);
1570 if (SE)
1571 SE->forgetValue(&I);
1572}
1573
1574static Instruction *sinkThroughTriviallyReplaceablePHI(
1575 PHINode *TPN, Instruction *I, LoopInfo *LI,
1576 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1577 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1578 MemorySSAUpdater *MSSAU) {
1579 assert(isTriviallyReplaceablePHI(*TPN, *I) &&((void)0)
1580 "Expect only trivially replaceable PHI")((void)0);
1581 BasicBlock *ExitBlock = TPN->getParent();
1582 Instruction *New;
1583 auto It = SunkCopies.find(ExitBlock);
1584 if (It != SunkCopies.end())
1585 New = It->second;
1586 else
1587 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1588 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1589 return New;
1590}
1591
1592static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1593 BasicBlock *BB = PN->getParent();
1594 if (!BB->canSplitPredecessors())
1595 return false;
1596 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1597 // it require updating BlockColors for all offspring blocks accordingly. By
1598 // skipping such corner case, we can make updating BlockColors after splitting
1599 // predecessor fairly simple.
1600 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1601 return false;
1602 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1603 BasicBlock *BBPred = *PI;
1604 if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1605 isa<CallBrInst>(BBPred->getTerminator()))
1606 return false;
1607 }
1608 return true;
1609}
1610
1611static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1612 LoopInfo *LI, const Loop *CurLoop,
1613 LoopSafetyInfo *SafetyInfo,
1614 MemorySSAUpdater *MSSAU) {
1615#ifndef NDEBUG1
1616 SmallVector<BasicBlock *, 32> ExitBlocks;
1617 CurLoop->getUniqueExitBlocks(ExitBlocks);
1618 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1619 ExitBlocks.end());
1620#endif
1621 BasicBlock *ExitBB = PN->getParent();
1622 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.")((void)0);
1623
1624 // Split predecessors of the loop exit to make instructions in the loop are
1625 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1626 // loop in the canonical form where each predecessor of each exit block should
1627 // be contained within the loop. For example, this will convert the loop below
1628 // from
1629 //
1630 // LB1:
1631 // %v1 =
1632 // br %LE, %LB2
1633 // LB2:
1634 // %v2 =
1635 // br %LE, %LB1
1636 // LE:
1637 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1638 //
1639 // to
1640 //
1641 // LB1:
1642 // %v1 =
1643 // br %LE.split, %LB2
1644 // LB2:
1645 // %v2 =
1646 // br %LE.split2, %LB1
1647 // LE.split:
1648 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1649 // br %LE
1650 // LE.split2:
1651 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1652 // br %LE
1653 // LE:
1654 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1655 //
1656 const auto &BlockColors = SafetyInfo->getBlockColors();
1657 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1658 while (!PredBBs.empty()) {
1659 BasicBlock *PredBB = *PredBBs.begin();
1660 assert(CurLoop->contains(PredBB) &&((void)0)
1661 "Expect all predecessors are in the loop")((void)0);
1662 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1663 BasicBlock *NewPred = SplitBlockPredecessors(
1664 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1665 // Since we do not allow splitting EH-block with BlockColors in
1666 // canSplitPredecessors(), we can simply assign predecessor's color to
1667 // the new block.
1668 if (!BlockColors.empty())
1669 // Grab a reference to the ColorVector to be inserted before getting the
1670 // reference to the vector we are copying because inserting the new
1671 // element in BlockColors might cause the map to be reallocated.
1672 SafetyInfo->copyColors(NewPred, PredBB);
1673 }
1674 PredBBs.remove(PredBB);
1675 }
1676}
1677
1678/// When an instruction is found to only be used outside of the loop, this
1679/// function moves it to the exit blocks and patches up SSA form as needed.
1680/// This method is guaranteed to remove the original instruction from its
1681/// position, and may either delete it or move it to outside of the loop.
1682///
1683static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1684 BlockFrequencyInfo *BFI, const Loop *CurLoop,
1685 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
1686 OptimizationRemarkEmitter *ORE) {
1687 bool Changed = false;
1688 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n")do { } while (false);
1689
1690 // Iterate over users to be ready for actual sinking. Replace users via
1691 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1692 SmallPtrSet<Instruction *, 8> VisitedUsers;
1693 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1694 auto *User = cast<Instruction>(*UI);
1695 Use &U = UI.getUse();
1696 ++UI;
1697
1698 if (VisitedUsers.count(User) || CurLoop->contains(User))
1699 continue;
1700
1701 if (!DT->isReachableFromEntry(User->getParent())) {
1702 U = UndefValue::get(I.getType());
1703 Changed = true;
1704 continue;
1705 }
1706
1707 // The user must be a PHI node.
1708 PHINode *PN = cast<PHINode>(User);
1709
1710 // Surprisingly, instructions can be used outside of loops without any
1711 // exits. This can only happen in PHI nodes if the incoming block is
1712 // unreachable.
1713 BasicBlock *BB = PN->getIncomingBlock(U);
1714 if (!DT->isReachableFromEntry(BB)) {
1715 U = UndefValue::get(I.getType());
1716 Changed = true;
1717 continue;
1718 }
1719
1720 VisitedUsers.insert(PN);
1721 if (isTriviallyReplaceablePHI(*PN, I))
1722 continue;
1723
1724 if (!canSplitPredecessors(PN, SafetyInfo))
1725 return Changed;
1726
1727 // Split predecessors of the PHI so that we can make users trivially
1728 // replaceable.
1729 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1730
1731 // Should rebuild the iterators, as they may be invalidated by
1732 // splitPredecessorsOfLoopExit().
1733 UI = I.user_begin();
1734 UE = I.user_end();
1735 }
1736
1737 if (VisitedUsers.empty())
1738 return Changed;
1739
1740 ORE->emit([&]() {
1741 return OptimizationRemark(DEBUG_TYPE"licm", "InstSunk", &I)
1742 << "sinking " << ore::NV("Inst", &I);
1743 });
1744 if (isa<LoadInst>(I))
1745 ++NumMovedLoads;
1746 else if (isa<CallInst>(I))
1747 ++NumMovedCalls;
1748 ++NumSunk;
1749
1750#ifndef NDEBUG1
1751 SmallVector<BasicBlock *, 32> ExitBlocks;
1752 CurLoop->getUniqueExitBlocks(ExitBlocks);
1753 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1754 ExitBlocks.end());
1755#endif
1756
1757 // Clones of this instruction. Don't create more than one per exit block!
1758 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1759
1760 // If this instruction is only used outside of the loop, then all users are
1761 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1762 // the instruction.
1763 // First check if I is worth sinking for all uses. Sink only when it is worth
1764 // across all uses.
1765 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1766 SmallVector<PHINode *, 8> ExitPNs;
1767 for (auto *UI : Users) {
1768 auto *User = cast<Instruction>(UI);
1769
1770 if (CurLoop->contains(User))
1771 continue;
1772
1773 PHINode *PN = cast<PHINode>(User);
1774 assert(ExitBlockSet.count(PN->getParent()) &&((void)0)
1775 "The LCSSA PHI is not in an exit block!")((void)0);
1776 if (!worthSinkOrHoistInst(I, PN->getParent(), ORE, BFI)) {
1777 return Changed;
1778 }
1779
1780 ExitPNs.push_back(PN);
1781 }
1782
1783 for (auto *PN : ExitPNs) {
1784
1785 // The PHI must be trivially replaceable.
1786 Instruction *New = sinkThroughTriviallyReplaceablePHI(
1787 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1788 PN->replaceAllUsesWith(New);
1789 eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr);
1790 Changed = true;
1791 }
1792 return Changed;
1793}
1794
1795/// When an instruction is found to only use loop invariant operands that
1796/// is safe to hoist, this instruction is called to do the dirty work.
1797///
1798static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1799 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1800 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1801 OptimizationRemarkEmitter *ORE) {
1802 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "do { } while (false)
1803 << I << "\n")do { } while (false);
1804 ORE->emit([&]() {
1805 return OptimizationRemark(DEBUG_TYPE"licm", "Hoisted", &I) << "hoisting "
1806 << ore::NV("Inst", &I);
1807 });
1808
1809 // Metadata can be dependent on conditions we are hoisting above.
1810 // Conservatively strip all metadata on the instruction unless we were
1811 // guaranteed to execute I if we entered the loop, in which case the metadata
1812 // is valid in the loop preheader.
1813 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1814 // then moving to the preheader means we should strip attributes on the call
1815 // that can cause UB since we may be hoisting above conditions that allowed
1816 // inferring those attributes. They may not be valid at the preheader.
1817 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1818 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1819 // time in isGuaranteedToExecute if we don't actually have anything to
1820 // drop. It is a compile time optimization, not required for correctness.
1821 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1822 I.dropUndefImplyingAttrsAndUnknownMetadata();
1823
1824 if (isa<PHINode>(I))
1825 // Move the new node to the end of the phi list in the destination block.
1826 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1827 else
1828 // Move the new node to the destination block, before its terminator.
1829 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1830
1831 I.updateLocationAfterHoist();
1832
1833 if (isa<LoadInst>(I))
1834 ++NumMovedLoads;
1835 else if (isa<CallInst>(I))
1836 ++NumMovedCalls;
1837 ++NumHoisted;
1838}
1839
1840/// Only sink or hoist an instruction if it is not a trapping instruction,
1841/// or if the instruction is known not to trap when moved to the preheader.
1842/// or if it is a trapping instruction and is guaranteed to execute.
1843static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1844 const DominatorTree *DT,
1845 const TargetLibraryInfo *TLI,
1846 const Loop *CurLoop,
1847 const LoopSafetyInfo *SafetyInfo,
1848 OptimizationRemarkEmitter *ORE,
1849 const Instruction *CtxI) {
1850 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI))
1851 return true;
1852
1853 bool GuaranteedToExecute =
1854 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1855
1856 if (!GuaranteedToExecute) {
1857 auto *LI = dyn_cast<LoadInst>(&Inst);
1858 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1859 ORE->emit([&]() {
1860 return OptimizationRemarkMissed(
1861 DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressCondExecuted", LI)
1862 << "failed to hoist load with loop-invariant address "
1863 "because load is conditionally executed";
1864 });
1865 }
1866
1867 return GuaranteedToExecute;
1868}
1869
1870namespace {
1871class LoopPromoter : public LoadAndStorePromoter {
1872 Value *SomePtr; // Designated pointer to store to.
1873 const SmallSetVector<Value *, 8> &PointerMustAliases;
1874 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1875 SmallVectorImpl<Instruction *> &LoopInsertPts;
1876 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1877 PredIteratorCache &PredCache;
1878 AliasSetTracker *AST;
1879 MemorySSAUpdater *MSSAU;
1880 LoopInfo &LI;
1881 DebugLoc DL;
1882 int Alignment;
1883 bool UnorderedAtomic;
1884 AAMDNodes AATags;
1885 ICFLoopSafetyInfo &SafetyInfo;
1886
1887 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1888 // (if legal) if doing so would add an out-of-loop use to an instruction
1889 // defined in-loop.
1890 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1891 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1892 return V;
1893
1894 Instruction *I = cast<Instruction>(V);
1895 // We need to create an LCSSA PHI node for the incoming value and
1896 // store that.
1897 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1898 I->getName() + ".lcssa", &BB->front());
1899 for (BasicBlock *Pred : PredCache.get(BB))
1900 PN->addIncoming(I, Pred);
1901 return PN;
1902 }
1903
1904public:
1905 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1906 const SmallSetVector<Value *, 8> &PMA,
1907 SmallVectorImpl<BasicBlock *> &LEB,
1908 SmallVectorImpl<Instruction *> &LIP,
1909 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1910 AliasSetTracker *ast, MemorySSAUpdater *MSSAU, LoopInfo &li,
1911 DebugLoc dl, int alignment, bool UnorderedAtomic,
1912 const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo)
1913 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1914 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1915 PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1916 Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1917 SafetyInfo(SafetyInfo) {}
1918
1919 bool isInstInList(Instruction *I,
1920 const SmallVectorImpl<Instruction *> &) const override {
1921 Value *Ptr;
1922 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1923 Ptr = LI->getOperand(0);
1924 else
1925 Ptr = cast<StoreInst>(I)->getPointerOperand();
1926 return PointerMustAliases.count(Ptr);
1927 }
1928
1929 void doExtraRewritesBeforeFinalDeletion() override {
1930 // Insert stores after in the loop exit blocks. Each exit block gets a
1931 // store of the live-out values that feed them. Since we've already told
1932 // the SSA updater about the defs in the loop and the preheader
1933 // definition, it is all set and we can start using it.
1934 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1935 BasicBlock *ExitBlock = LoopExitBlocks[i];
1936 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1937 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1938 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1939 Instruction *InsertPos = LoopInsertPts[i];
1940 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1941 if (UnorderedAtomic)
1942 NewSI->setOrdering(AtomicOrdering::Unordered);
1943 NewSI->setAlignment(Align(Alignment));
1944 NewSI->setDebugLoc(DL);
1945 if (AATags)
1946 NewSI->setAAMetadata(AATags);
1947
1948 if (MSSAU) {
1949 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1950 MemoryAccess *NewMemAcc;
1951 if (!MSSAInsertPoint) {
1952 NewMemAcc = MSSAU->createMemoryAccessInBB(
1953 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1954 } else {
1955 NewMemAcc =
1956 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1957 }
1958 MSSAInsertPts[i] = NewMemAcc;
1959 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1960 // FIXME: true for safety, false may still be correct.
1961 }
1962 }
1963 }
1964
1965 void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
1966 // Update alias analysis.
1967 if (AST)
1968 AST->copyValue(LI, V);
1969 }
1970 void instructionDeleted(Instruction *I) const override {
1971 SafetyInfo.removeInstruction(I);
1972 if (AST)
1973 AST->deleteValue(I);
1974 if (MSSAU)
1975 MSSAU->removeMemoryAccess(I);
1976 }
1977};
1978
1979bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1980 DominatorTree *DT) {
1981 // We can perform the captured-before check against any instruction in the
1982 // loop header, as the loop header is reachable from any instruction inside
1983 // the loop.
1984 // TODO: ReturnCaptures=true shouldn't be necessary here.
1985 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1986 /* StoreCaptures */ true,
1987 L->getHeader()->getTerminator(), DT);
1988}
1989
1990/// Return true iff we can prove that a caller of this function can not inspect
1991/// the contents of the provided object in a well defined program.
1992bool isKnownNonEscaping(Value *Object, const Loop *L,
1993 const TargetLibraryInfo *TLI, DominatorTree *DT) {
1994 if (isa<AllocaInst>(Object))
1995 // Since the alloca goes out of scope, we know the caller can't retain a
1996 // reference to it and be well defined. Thus, we don't need to check for
1997 // capture.
1998 return true;
1999
2000 // For all other objects we need to know that the caller can't possibly
2001 // have gotten a reference to the object. There are two components of
2002 // that:
2003 // 1) Object can't be escaped by this function. This is what
2004 // PointerMayBeCaptured checks.
2005 // 2) Object can't have been captured at definition site. For this, we
2006 // need to know the return value is noalias. At the moment, we use a
2007 // weaker condition and handle only AllocLikeFunctions (which are
2008 // known to be noalias). TODO
2009 return isAllocLikeFn(Object, TLI) &&
2010 isNotCapturedBeforeOrInLoop(Object, L, DT);
2011}
2012
2013} // namespace
2014
2015/// Try to promote memory values to scalars by sinking stores out of the
2016/// loop and moving loads to before the loop. We do this by looping over
2017/// the stores in the loop, looking for stores to Must pointers which are
2018/// loop invariant.
2019///
2020bool llvm::promoteLoopAccessesToScalars(
2021 const SmallSetVector<Value *, 8> &PointerMustAliases,
2022 SmallVectorImpl<BasicBlock *> &ExitBlocks,
2023 SmallVectorImpl<Instruction *> &InsertPts,
2024 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
2025 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
2026 Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
2027 ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) {
2028 // Verify inputs.
2029 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&((void)0)
2030 SafetyInfo != nullptr &&((void)0)
2031 "Unexpected Input to promoteLoopAccessesToScalars")((void)0);
2032
2033 Value *SomePtr = *PointerMustAliases.begin();
2034 BasicBlock *Preheader = CurLoop->getLoopPreheader();
2035
2036 // It is not safe to promote a load/store from the loop if the load/store is
2037 // conditional. For example, turning:
2038 //
2039 // for () { if (c) *P += 1; }
2040 //
2041 // into:
2042 //
2043 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
2044 //
2045 // is not safe, because *P may only be valid to access if 'c' is true.
2046 //
2047 // The safety property divides into two parts:
2048 // p1) The memory may not be dereferenceable on entry to the loop. In this
2049 // case, we can't insert the required load in the preheader.
2050 // p2) The memory model does not allow us to insert a store along any dynamic
2051 // path which did not originally have one.
2052 //
2053 // If at least one store is guaranteed to execute, both properties are
2054 // satisfied, and promotion is legal.
2055 //
2056 // This, however, is not a necessary condition. Even if no store/load is
2057 // guaranteed to execute, we can still establish these properties.
2058 // We can establish (p1) by proving that hoisting the load into the preheader
2059 // is safe (i.e. proving dereferenceability on all paths through the loop). We
2060 // can use any access within the alias set to prove dereferenceability,
2061 // since they're all must alias.
2062 //
2063 // There are two ways establish (p2):
2064 // a) Prove the location is thread-local. In this case the memory model
2065 // requirement does not apply, and stores are safe to insert.
2066 // b) Prove a store dominates every exit block. In this case, if an exit
2067 // blocks is reached, the original dynamic path would have taken us through
2068 // the store, so inserting a store into the exit block is safe. Note that this
2069 // is different from the store being guaranteed to execute. For instance,
2070 // if an exception is thrown on the first iteration of the loop, the original
2071 // store is never executed, but the exit blocks are not executed either.
2072
2073 bool DereferenceableInPH = false;
2074 bool SafeToInsertStore = false;
2075
2076 SmallVector<Instruction *, 64> LoopUses;
2077
2078 // We start with an alignment of one and try to find instructions that allow
2079 // us to prove better alignment.
2080 Align Alignment;
2081 // Keep track of which types of access we see
2082 bool SawUnorderedAtomic = false;
2083 bool SawNotAtomic = false;
2084 AAMDNodes AATags;
2085
2086 const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2087
2088 bool IsKnownThreadLocalObject = false;
2089 if (SafetyInfo->anyBlockMayThrow()) {
2090 // If a loop can throw, we have to insert a store along each unwind edge.
2091 // That said, we can't actually make the unwind edge explicit. Therefore,
2092 // we have to prove that the store is dead along the unwind edge. We do
2093 // this by proving that the caller can't have a reference to the object
2094 // after return and thus can't possibly load from the object.
2095 Value *Object = getUnderlyingObject(SomePtr);
2096 if (!isKnownNonEscaping(Object, CurLoop, TLI, DT))
2097 return false;
2098 // Subtlety: Alloca's aren't visible to callers, but *are* potentially
2099 // visible to other threads if captured and used during their lifetimes.
2100 IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
2101 }
2102
2103 // Check that all of the pointers in the alias set have the same type. We
2104 // cannot (yet) promote a memory location that is loaded and stored in
2105 // different sizes. While we are at it, collect alignment and AA info.
2106 for (Value *ASIV : PointerMustAliases) {
2107 // Check that all of the pointers in the alias set have the same type. We
2108 // cannot (yet) promote a memory location that is loaded and stored in
2109 // different sizes.
2110 if (SomePtr->getType() != ASIV->getType())
2111 return false;
2112
2113 for (User *U : ASIV->users()) {
2114 // Ignore instructions that are outside the loop.
2115 Instruction *UI = dyn_cast<Instruction>(U);
2116 if (!UI || !CurLoop->contains(UI))
2117 continue;
2118
2119 // If there is an non-load/store instruction in the loop, we can't promote
2120 // it.
2121 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2122 if (!Load->isUnordered())
2123 return false;
2124
2125 SawUnorderedAtomic |= Load->isAtomic();
2126 SawNotAtomic |= !Load->isAtomic();
2127
2128 Align InstAlignment = Load->getAlign();
2129
2130 // Note that proving a load safe to speculate requires proving
2131 // sufficient alignment at the target location. Proving it guaranteed
2132 // to execute does as well. Thus we can increase our guaranteed
2133 // alignment as well.
2134 if (!DereferenceableInPH || (InstAlignment > Alignment))
2135 if (isSafeToExecuteUnconditionally(*Load, DT, TLI, CurLoop,
2136 SafetyInfo, ORE,
2137 Preheader->getTerminator())) {
2138 DereferenceableInPH = true;
2139 Alignment = std::max(Alignment, InstAlignment);
2140 }
2141 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2142 // Stores *of* the pointer are not interesting, only stores *to* the
2143 // pointer.
2144 if (UI->getOperand(1) != ASIV)
2145 continue;
2146 if (!Store->isUnordered())
2147 return false;
2148
2149 SawUnorderedAtomic |= Store->isAtomic();
2150 SawNotAtomic |= !Store->isAtomic();
2151
2152 // If the store is guaranteed to execute, both properties are satisfied.
2153 // We may want to check if a store is guaranteed to execute even if we
2154 // already know that promotion is safe, since it may have higher
2155 // alignment than any other guaranteed stores, in which case we can
2156 // raise the alignment on the promoted store.
2157 Align InstAlignment = Store->getAlign();
2158
2159 if (!DereferenceableInPH || !SafeToInsertStore ||
2160 (InstAlignment > Alignment)) {
2161 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
2162 DereferenceableInPH = true;
2163 SafeToInsertStore = true;
2164 Alignment = std::max(Alignment, InstAlignment);
2165 }
2166 }
2167
2168 // If a store dominates all exit blocks, it is safe to sink.
2169 // As explained above, if an exit block was executed, a dominating
2170 // store must have been executed at least once, so we are not
2171 // introducing stores on paths that did not have them.
2172 // Note that this only looks at explicit exit blocks. If we ever
2173 // start sinking stores into unwind edges (see above), this will break.
2174 if (!SafeToInsertStore)
2175 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2176 return DT->dominates(Store->getParent(), Exit);
2177 });
2178
2179 // If the store is not guaranteed to execute, we may still get
2180 // deref info through it.
2181 if (!DereferenceableInPH) {
2182 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2183 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2184 Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI);
2185 }
2186 } else
2187 return false; // Not a load or store.
2188
2189 // Merge the AA tags.
2190 if (LoopUses.empty()) {
2191 // On the first load/store, just take its AA tags.
2192 UI->getAAMetadata(AATags);
2193 } else if (AATags) {
2194 UI->getAAMetadata(AATags, /* Merge = */ true);
2195 }
2196
2197 LoopUses.push_back(UI);
2198 }
2199 }
2200
2201 // If we found both an unordered atomic instruction and a non-atomic memory
2202 // access, bail. We can't blindly promote non-atomic to atomic since we
2203 // might not be able to lower the result. We can't downgrade since that
2204 // would violate memory model. Also, align 0 is an error for atomics.
2205 if (SawUnorderedAtomic && SawNotAtomic)
2206 return false;
2207
2208 // If we're inserting an atomic load in the preheader, we must be able to
2209 // lower it. We're only guaranteed to be able to lower naturally aligned
2210 // atomics.
2211 auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2212 if (SawUnorderedAtomic &&
2213 Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2214 return false;
2215
2216 // If we couldn't prove we can hoist the load, bail.
2217 if (!DereferenceableInPH)
2218 return false;
2219
2220 // We know we can hoist the load, but don't have a guaranteed store.
2221 // Check whether the location is thread-local. If it is, then we can insert
2222 // stores along paths which originally didn't have them without violating the
2223 // memory model.
2224 if (!SafeToInsertStore) {
2225 if (IsKnownThreadLocalObject)
2226 SafeToInsertStore = true;
2227 else {
2228 Value *Object = getUnderlyingObject(SomePtr);
2229 SafeToInsertStore =
2230 (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2231 isNotCapturedBeforeOrInLoop(Object, CurLoop, DT);
2232 }
2233 }
2234
2235 // If we've still failed to prove we can sink the store, give up.
2236 if (!SafeToInsertStore)
2237 return false;
2238
2239 // Otherwise, this is safe to promote, lets do it!
2240 LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtrdo { } while (false)
2241 << '\n')do { } while (false);
2242 ORE->emit([&]() {
2243 return OptimizationRemark(DEBUG_TYPE"licm", "PromoteLoopAccessesToScalar",
2244 LoopUses[0])
2245 << "Moving accesses to memory location out of the loop";
2246 });
2247 ++NumPromoted;
2248
2249 // Look at all the loop uses, and try to merge their locations.
2250 std::vector<const DILocation *> LoopUsesLocs;
2251 for (auto U : LoopUses)
2252 LoopUsesLocs.push_back(U->getDebugLoc().get());
2253 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2254
2255 // We use the SSAUpdater interface to insert phi nodes as required.
2256 SmallVector<PHINode *, 16> NewPHIs;
2257 SSAUpdater SSA(&NewPHIs);
2258 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2259 InsertPts, MSSAInsertPts, PIC, CurAST, MSSAU, *LI, DL,
2260 Alignment.value(), SawUnorderedAtomic, AATags,
2261 *SafetyInfo);
2262
2263 // Set up the preheader to have a definition of the value. It is the live-out
2264 // value from the preheader that uses in the loop will use.
2265 LoadInst *PreheaderLoad = new LoadInst(
2266 SomePtr->getType()->getPointerElementType(), SomePtr,
2267 SomePtr->getName() + ".promoted", Preheader->getTerminator());
2268 if (SawUnorderedAtomic)
2269 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2270 PreheaderLoad->setAlignment(Alignment);
2271 PreheaderLoad->setDebugLoc(DebugLoc());
2272 if (AATags)
2273 PreheaderLoad->setAAMetadata(AATags);
2274 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2275
2276 if (MSSAU) {
2277 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2278 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2279 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2280 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2281 }
2282
2283 if (MSSAU && VerifyMemorySSA)
2284 MSSAU->getMemorySSA()->verifyMemorySSA();
2285 // Rewrite all the loads in the loop and remember all the definitions from
2286 // stores in the loop.
2287 Promoter.run(LoopUses);
2288
2289 if (MSSAU && VerifyMemorySSA)
2290 MSSAU->getMemorySSA()->verifyMemorySSA();
2291 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2292 if (PreheaderLoad->use_empty())
2293 eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU);
2294
2295 return true;
2296}
2297
2298static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2299 function_ref<void(Instruction *)> Fn) {
2300 for (const BasicBlock *BB : L->blocks())
2301 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2302 for (const auto &Access : *Accesses)
2303 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2304 Fn(MUD->getMemoryInst());
2305}
2306
2307static SmallVector<SmallSetVector<Value *, 8>, 0>
2308collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) {
2309 AliasSetTracker AST(*AA);
2310
2311 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2312 if (const auto *SI = dyn_cast<StoreInst>(I))
2313 return L->isLoopInvariant(SI->getPointerOperand());
2314 if (const auto *LI = dyn_cast<LoadInst>(I))
2315 return L->isLoopInvariant(LI->getPointerOperand());
2316 return false;
2317 };
2318
2319 // Populate AST with potentially promotable accesses and remove them from
2320 // MaybePromotable, so they will not be checked again on the next iteration.
2321 SmallPtrSet<Value *, 16> AttemptingPromotion;
2322 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2323 if (IsPotentiallyPromotable(I)) {
2324 AttemptingPromotion.insert(I);
2325 AST.add(I);
2326 }
2327 });
2328
2329 // We're only interested in must-alias sets that contain a mod.
2330 SmallVector<const AliasSet *, 8> Sets;
2331 for (AliasSet &AS : AST)
2332 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2333 Sets.push_back(&AS);
2334
2335 if (Sets.empty())
2336 return {}; // Nothing to promote...
2337
2338 // Discard any sets for which there is an aliasing non-promotable access.
2339 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2340 if (AttemptingPromotion.contains(I))
2341 return;
2342
2343 llvm::erase_if(Sets, [&](const AliasSet *AS) {
2344 return AS->aliasesUnknownInst(I, *AA);
2345 });
2346 });
2347
2348 SmallVector<SmallSetVector<Value *, 8>, 0> Result;
2349 for (const AliasSet *Set : Sets) {
2350 SmallSetVector<Value *, 8> PointerMustAliases;
2351 for (const auto &ASI : *Set)
2352 PointerMustAliases.insert(ASI.getValue());
2353 Result.push_back(std::move(PointerMustAliases));
2354 }
2355
2356 return Result;
2357}
2358
2359/// Returns an owning pointer to an alias set which incorporates aliasing info
2360/// from L and all subloops of L.
2361std::unique_ptr<AliasSetTracker>
2362LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI,
2363 AAResults *AA) {
2364 auto CurAST = std::make_unique<AliasSetTracker>(*AA);
2365
2366 // Add everything from all the sub loops.
2367 for (Loop *InnerL : L->getSubLoops())
2368 for (BasicBlock *BB : InnerL->blocks())
2369 CurAST->add(*BB);
2370
2371 // And merge in this loop (without anything from inner loops).
2372 for (BasicBlock *BB : L->blocks())
2373 if (LI->getLoopFor(BB) == L)
2374 CurAST->add(*BB);
2375
2376 return CurAST;
2377}
2378
2379static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2380 AliasSetTracker *CurAST, Loop *CurLoop,
2381 AAResults *AA) {
2382 // First check to see if any of the basic blocks in CurLoop invalidate *V.
2383 bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod();
2384
2385 if (!isInvalidatedAccordingToAST || !LICMN2Theshold)
2386 return isInvalidatedAccordingToAST;
2387
2388 // Check with a diagnostic analysis if we can refine the information above.
2389 // This is to identify the limitations of using the AST.
2390 // The alias set mechanism used by LICM has a major weakness in that it
2391 // combines all things which may alias into a single set *before* asking
2392 // modref questions. As a result, a single readonly call within a loop will
2393 // collapse all loads and stores into a single alias set and report
2394 // invalidation if the loop contains any store. For example, readonly calls
2395 // with deopt states have this form and create a general alias set with all
2396 // loads and stores. In order to get any LICM in loops containing possible
2397 // deopt states we need a more precise invalidation of checking the mod ref
2398 // info of each instruction within the loop and LI. This has a complexity of
2399 // O(N^2), so currently, it is used only as a diagnostic tool since the
2400 // default value of LICMN2Threshold is zero.
2401
2402 // Don't look at nested loops.
2403 if (CurLoop->begin() != CurLoop->end())
2404 return true;
2405
2406 int N = 0;
2407 for (BasicBlock *BB : CurLoop->getBlocks())
2408 for (Instruction &I : *BB) {
2409 if (N >= LICMN2Theshold) {
2410 LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "do { } while (false)
2411 << *(MemLoc.Ptr) << "\n")do { } while (false);
2412 return true;
2413 }
2414 N++;
2415 auto Res = AA->getModRefInfo(&I, MemLoc);
2416 if (isModSet(Res)) {
2417 LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "do { } while (false)
2418 << *(MemLoc.Ptr) << "\n")do { } while (false);
2419 return true;
2420 }
2421 }
2422 LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n")do { } while (false);
2423 return false;
2424}
2425
2426bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2427 Loop *CurLoop, Instruction &I,
2428 SinkAndHoistLICMFlags &Flags) {
2429 // For hoisting, use the walker to determine safety
2430 if (!Flags.getIsSink()) {
2431 MemoryAccess *Source;
2432 // See declaration of SetLicmMssaOptCap for usage details.
2433 if (Flags.tooManyClobberingCalls())
2434 Source = MU->getDefiningAccess();
2435 else {
2436 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2437 Flags.incrementClobberingCalls();
2438 }
2439 return !MSSA->isLiveOnEntryDef(Source) &&
2440 CurLoop->contains(Source->getBlock());
2441 }
2442
2443 // For sinking, we'd need to check all Defs below this use. The getClobbering
2444 // call will look on the backedge of the loop, but will check aliasing with
2445 // the instructions on the previous iteration.
2446 // For example:
2447 // for (i ... )
2448 // load a[i] ( Use (LoE)
2449 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2450 // i++;
2451 // The load sees no clobbering inside the loop, as the backedge alias check
2452 // does phi translation, and will check aliasing against store a[i-1].
2453 // However sinking the load outside the loop, below the store is incorrect.
2454
2455 // For now, only sink if there are no Defs in the loop, and the existing ones
2456 // precede the use and are in the same block.
2457 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2458 // needs PostDominatorTreeAnalysis.
2459 // FIXME: More precise: no Defs that alias this Use.
2460 if (Flags.tooManyMemoryAccesses())
2461 return true;
2462 for (auto *BB : CurLoop->getBlocks())
2463 if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU))
2464 return true;
2465 // When sinking, the source block may not be part of the loop so check it.
2466 if (!CurLoop->contains(&I))
2467 return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU);
2468
2469 return false;
2470}
2471
2472bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
2473 MemoryUse &MU) {
2474 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2475 for (const auto &MA : *Accesses)
2476 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2477 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2478 return true;
2479 return false;
2480}
2481
2482/// Little predicate that returns true if the specified basic block is in
2483/// a subloop of the current one, not the current one itself.
2484///
2485static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2486 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop")((void)0);
2487 return LI->getLoopFor(BB) != CurLoop;
2488}

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR/InstrTypes.h

1//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines various meta classes of instructions that exist in the VM
10// representation. Specific concrete subclasses of these may be found in the
11// i*.h files...
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRTYPES_H
16#define LLVM_IR_INSTRTYPES_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/OperandTraits.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/User.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/Casting.h"
38#include "llvm/Support/ErrorHandling.h"
39#include <algorithm>
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <string>
45#include <vector>
46
47namespace llvm {
48
49namespace Intrinsic {
50typedef unsigned ID;
51}
52
53//===----------------------------------------------------------------------===//
54// UnaryInstruction Class
55//===----------------------------------------------------------------------===//
56
57class UnaryInstruction : public Instruction {
58protected:
59 UnaryInstruction(Type *Ty, unsigned iType, Value *V,
60 Instruction *IB = nullptr)
61 : Instruction(Ty, iType, &Op<0>(), 1, IB) {
62 Op<0>() = V;
63 }
64 UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
65 : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
66 Op<0>() = V;
67 }
68
69public:
70 // allocate space for exactly one operand
71 void *operator new(size_t S) { return User::operator new(S, 1); }
72 void operator delete(void *Ptr) { User::operator delete(Ptr); }
73
74 /// Transparently provide more efficient getOperand methods.
75 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
76
77 // Methods for support type inquiry through isa, cast, and dyn_cast:
78 static bool classof(const Instruction *I) {
79 return I->isUnaryOp() ||
80 I->getOpcode() == Instruction::Alloca ||
81 I->getOpcode() == Instruction::Load ||
82 I->getOpcode() == Instruction::VAArg ||
83 I->getOpcode() == Instruction::ExtractValue ||
84 (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
85 }
86 static bool classof(const Value *V) {
87 return isa<Instruction>(V) && classof(cast<Instruction>(V));
88 }
89};
90
91template <>
92struct OperandTraits<UnaryInstruction> :
93 public FixedNumOperandTraits<UnaryInstruction, 1> {
94};
95
96DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)UnaryInstruction::op_iterator UnaryInstruction::op_begin() { return
OperandTraits<UnaryInstruction>::op_begin(this); } UnaryInstruction
::const_op_iterator UnaryInstruction::op_begin() const { return
OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this)); } UnaryInstruction::op_iterator
UnaryInstruction::op_end() { return OperandTraits<UnaryInstruction
>::op_end(this); } UnaryInstruction::const_op_iterator UnaryInstruction
::op_end() const { return OperandTraits<UnaryInstruction>
::op_end(const_cast<UnaryInstruction*>(this)); } Value *
UnaryInstruction::getOperand(unsigned i_nocapture) const { ((
void)0); return cast_or_null<Value>( OperandTraits<UnaryInstruction
>::op_begin(const_cast<UnaryInstruction*>(this))[i_nocapture
].get()); } void UnaryInstruction::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { ((void)0); OperandTraits<UnaryInstruction
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
UnaryInstruction::getNumOperands() const { return OperandTraits
<UnaryInstruction>::operands(this); } template <int Idx_nocapture
> Use &UnaryInstruction::Op() { return this->OpFrom
<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &UnaryInstruction::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
97
98//===----------------------------------------------------------------------===//
99// UnaryOperator Class
100//===----------------------------------------------------------------------===//
101
102class UnaryOperator : public UnaryInstruction {
103 void AssertOK();
104
105protected:
106 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
107 const Twine &Name, Instruction *InsertBefore);
108 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
109 const Twine &Name, BasicBlock *InsertAtEnd);
110
111 // Note: Instruction needs to be a friend here to call cloneImpl.
112 friend class Instruction;
113
114 UnaryOperator *cloneImpl() const;
115
116public:
117
118 /// Construct a unary instruction, given the opcode and an operand.
119 /// Optionally (if InstBefore is specified) insert the instruction
120 /// into a BasicBlock right before the specified instruction. The specified
121 /// Instruction is allowed to be a dereferenced end iterator.
122 ///
123 static UnaryOperator *Create(UnaryOps Op, Value *S,
124 const Twine &Name = Twine(),
125 Instruction *InsertBefore = nullptr);
126
127 /// Construct a unary instruction, given the opcode and an operand.
128 /// Also automatically insert this instruction to the end of the
129 /// BasicBlock specified.
130 ///
131 static UnaryOperator *Create(UnaryOps Op, Value *S,
132 const Twine &Name,
133 BasicBlock *InsertAtEnd);
134
135 /// These methods just forward to Create, and are useful when you
136 /// statically know what type of instruction you're going to create. These
137 /// helpers just save some typing.
138#define HANDLE_UNARY_INST(N, OPC, CLASS) \
139 static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
140 return Create(Instruction::OPC, V, Name);\
141 }
142#include "llvm/IR/Instruction.def"
143#define HANDLE_UNARY_INST(N, OPC, CLASS) \
144 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
145 BasicBlock *BB) {\
146 return Create(Instruction::OPC, V, Name, BB);\
147 }
148#include "llvm/IR/Instruction.def"
149#define HANDLE_UNARY_INST(N, OPC, CLASS) \
150 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
151 Instruction *I) {\
152 return Create(Instruction::OPC, V, Name, I);\
153 }
154#include "llvm/IR/Instruction.def"
155
156 static UnaryOperator *
157 CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO,
158 const Twine &Name = "",
159 Instruction *InsertBefore = nullptr) {
160 UnaryOperator *UO = Create(Opc, V, Name, InsertBefore);
161 UO->copyIRFlags(CopyO);
162 return UO;
163 }
164
165 static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
166 const Twine &Name = "",
167 Instruction *InsertBefore = nullptr) {
168 return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name,
169 InsertBefore);
170 }
171
172 UnaryOps getOpcode() const {
173 return static_cast<UnaryOps>(Instruction::getOpcode());
174 }
175
176 // Methods for support type inquiry through isa, cast, and dyn_cast:
177 static bool classof(const Instruction *I) {
178 return I->isUnaryOp();
179 }
180 static bool classof(const Value *V) {
181 return isa<Instruction>(V) && classof(cast<Instruction>(V));
182 }
183};
184
185//===----------------------------------------------------------------------===//
186// BinaryOperator Class
187//===----------------------------------------------------------------------===//
188
189class BinaryOperator : public Instruction {
190 void AssertOK();
191
192protected:
193 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
194 const Twine &Name, Instruction *InsertBefore);
195 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
196 const Twine &Name, BasicBlock *InsertAtEnd);
197
198 // Note: Instruction needs to be a friend here to call cloneImpl.
199 friend class Instruction;
200
201 BinaryOperator *cloneImpl() const;
202
203public:
204 // allocate space for exactly two operands
205 void *operator new(size_t S) { return User::operator new(S, 2); }
206 void operator delete(void *Ptr) { User::operator delete(Ptr); }
207
208 /// Transparently provide more efficient getOperand methods.
209 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
210
211 /// Construct a binary instruction, given the opcode and the two
212 /// operands. Optionally (if InstBefore is specified) insert the instruction
213 /// into a BasicBlock right before the specified instruction. The specified
214 /// Instruction is allowed to be a dereferenced end iterator.
215 ///
216 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
217 const Twine &Name = Twine(),
218 Instruction *InsertBefore = nullptr);
219
220 /// Construct a binary instruction, given the opcode and the two
221 /// operands. Also automatically insert this instruction to the end of the
222 /// BasicBlock specified.
223 ///
224 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
225 const Twine &Name, BasicBlock *InsertAtEnd);
226
227 /// These methods just forward to Create, and are useful when you
228 /// statically know what type of instruction you're going to create. These
229 /// helpers just save some typing.
230#define HANDLE_BINARY_INST(N, OPC, CLASS) \
231 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
232 const Twine &Name = "") {\
233 return Create(Instruction::OPC, V1, V2, Name);\
234 }
235#include "llvm/IR/Instruction.def"
236#define HANDLE_BINARY_INST(N, OPC, CLASS) \
237 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
238 const Twine &Name, BasicBlock *BB) {\
239 return Create(Instruction::OPC, V1, V2, Name, BB);\
240 }
241#include "llvm/IR/Instruction.def"
242#define HANDLE_BINARY_INST(N, OPC, CLASS) \
243 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
244 const Twine &Name, Instruction *I) {\
245 return Create(Instruction::OPC, V1, V2, Name, I);\
246 }
247#include "llvm/IR/Instruction.def"
248
249 static BinaryOperator *
250 CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Instruction *CopyO,
251 const Twine &Name = "",
252 Instruction *InsertBefore = nullptr) {
253 BinaryOperator *BO = Create(Opc, V1, V2, Name, InsertBefore);
254 BO->copyIRFlags(CopyO);
255 return BO;
256 }
257
258 static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
259 Instruction *FMFSource,
260 const Twine &Name = "") {
261 return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
262 }
263 static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
264 Instruction *FMFSource,
265 const Twine &Name = "") {
266 return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
267 }
268 static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
269 Instruction *FMFSource,
270 const Twine &Name = "") {
271 return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
272 }
273 static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
274 Instruction *FMFSource,
275 const Twine &Name = "") {
276 return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
277 }
278 static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
279 Instruction *FMFSource,
280 const Twine &Name = "") {
281 return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
282 }
283
284 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
285 const Twine &Name = "") {
286 BinaryOperator *BO = Create(Opc, V1, V2, Name);
287 BO->setHasNoSignedWrap(true);
288 return BO;
289 }
290 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
291 const Twine &Name, BasicBlock *BB) {
292 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
293 BO->setHasNoSignedWrap(true);
294 return BO;
295 }
296 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
297 const Twine &Name, Instruction *I) {
298 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
299 BO->setHasNoSignedWrap(true);
300 return BO;
301 }
302
303 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
304 const Twine &Name = "") {
305 BinaryOperator *BO = Create(Opc, V1, V2, Name);
306 BO->setHasNoUnsignedWrap(true);
307 return BO;
308 }
309 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
310 const Twine &Name, BasicBlock *BB) {
311 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
312 BO->setHasNoUnsignedWrap(true);
313 return BO;
314 }
315 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
316 const Twine &Name, Instruction *I) {
317 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
318 BO->setHasNoUnsignedWrap(true);
319 return BO;
320 }
321
322 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
323 const Twine &Name = "") {
324 BinaryOperator *BO = Create(Opc, V1, V2, Name);
325 BO->setIsExact(true);
326 return BO;
327 }
328 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
329 const Twine &Name, BasicBlock *BB) {
330 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
331 BO->setIsExact(true);
332 return BO;
333 }
334 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
335 const Twine &Name, Instruction *I) {
336 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
337 BO->setIsExact(true);
338 return BO;
339 }
340
341#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
342 static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
343 const Twine &Name = "") { \
344 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
345 } \
346 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
347 Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
348 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
349 } \
350 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
351 Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
352 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
353 }
354
355 DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
356 DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
357 DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
358 DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
359 DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
360 DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
361 DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
362 DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
363
364 DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
365 DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
366 DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
367 DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
368
369#undef DEFINE_HELPERS
370
371 /// Helper functions to construct and inspect unary operations (NEG and NOT)
372 /// via binary operators SUB and XOR:
373 ///
374 /// Create the NEG and NOT instructions out of SUB and XOR instructions.
375 ///
376 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
377 Instruction *InsertBefore = nullptr);
378 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
379 BasicBlock *InsertAtEnd);
380 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
381 Instruction *InsertBefore = nullptr);
382 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
383 BasicBlock *InsertAtEnd);
384 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
385 Instruction *InsertBefore = nullptr);
386 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
387 BasicBlock *InsertAtEnd);
388 static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
389 Instruction *InsertBefore = nullptr);
390 static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
391 BasicBlock *InsertAtEnd);
392
393 BinaryOps getOpcode() const {
394 return static_cast<BinaryOps>(Instruction::getOpcode());
395 }
396
397 /// Exchange the two operands to this instruction.
398 /// This instruction is safe to use on any binary instruction and
399 /// does not modify the semantics of the instruction. If the instruction
400 /// cannot be reversed (ie, it's a Div), then return true.
401 ///
402 bool swapOperands();
403
404 // Methods for support type inquiry through isa, cast, and dyn_cast:
405 static bool classof(const Instruction *I) {
406 return I->isBinaryOp();
407 }
408 static bool classof(const Value *V) {
409 return isa<Instruction>(V) && classof(cast<Instruction>(V));
410 }
411};
412
413template <>
414struct OperandTraits<BinaryOperator> :
415 public FixedNumOperandTraits<BinaryOperator, 2> {
416};
417
418DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)BinaryOperator::op_iterator BinaryOperator::op_begin() { return
OperandTraits<BinaryOperator>::op_begin(this); } BinaryOperator
::const_op_iterator BinaryOperator::op_begin() const { return
OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this)); } BinaryOperator::op_iterator BinaryOperator
::op_end() { return OperandTraits<BinaryOperator>::op_end
(this); } BinaryOperator::const_op_iterator BinaryOperator::op_end
() const { return OperandTraits<BinaryOperator>::op_end
(const_cast<BinaryOperator*>(this)); } Value *BinaryOperator
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<BinaryOperator>::op_begin(
const_cast<BinaryOperator*>(this))[i_nocapture].get());
} void BinaryOperator::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<BinaryOperator
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
BinaryOperator::getNumOperands() const { return OperandTraits
<BinaryOperator>::operands(this); } template <int Idx_nocapture
> Use &BinaryOperator::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &BinaryOperator::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
419
420//===----------------------------------------------------------------------===//
421// CastInst Class
422//===----------------------------------------------------------------------===//
423
424/// This is the base class for all instructions that perform data
425/// casts. It is simply provided so that instruction category testing
426/// can be performed with code like:
427///
428/// if (isa<CastInst>(Instr)) { ... }
429/// Base class of casting instructions.
430class CastInst : public UnaryInstruction {
431protected:
432 /// Constructor with insert-before-instruction semantics for subclasses
433 CastInst(Type *Ty, unsigned iType, Value *S,
434 const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
435 : UnaryInstruction(Ty, iType, S, InsertBefore) {
436 setName(NameStr);
437 }
438 /// Constructor with insert-at-end-of-block semantics for subclasses
439 CastInst(Type *Ty, unsigned iType, Value *S,
440 const Twine &NameStr, BasicBlock *InsertAtEnd)
441 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
442 setName(NameStr);
443 }
444
445public:
446 /// Provides a way to construct any of the CastInst subclasses using an
447 /// opcode instead of the subclass's constructor. The opcode must be in the
448 /// CastOps category (Instruction::isCast(opcode) returns true). This
449 /// constructor has insert-before-instruction semantics to automatically
450 /// insert the new CastInst before InsertBefore (if it is non-null).
451 /// Construct any of the CastInst subclasses
452 static CastInst *Create(
453 Instruction::CastOps, ///< The opcode of the cast instruction
454 Value *S, ///< The value to be casted (operand 0)
455 Type *Ty, ///< The type to which cast should be made
456 const Twine &Name = "", ///< Name for the instruction
457 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
458 );
459 /// Provides a way to construct any of the CastInst subclasses using an
460 /// opcode instead of the subclass's constructor. The opcode must be in the
461 /// CastOps category. This constructor has insert-at-end-of-block semantics
462 /// to automatically insert the new CastInst at the end of InsertAtEnd (if
463 /// its non-null).
464 /// Construct any of the CastInst subclasses
465 static CastInst *Create(
466 Instruction::CastOps, ///< The opcode for the cast instruction
467 Value *S, ///< The value to be casted (operand 0)
468 Type *Ty, ///< The type to which operand is casted
469 const Twine &Name, ///< The name for the instruction
470 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
471 );
472
473 /// Create a ZExt or BitCast cast instruction
474 static CastInst *CreateZExtOrBitCast(
475 Value *S, ///< The value to be casted (operand 0)
476 Type *Ty, ///< The type to which cast should be made
477 const Twine &Name = "", ///< Name for the instruction
478 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
479 );
480
481 /// Create a ZExt or BitCast cast instruction
482 static CastInst *CreateZExtOrBitCast(
483 Value *S, ///< The value to be casted (operand 0)
484 Type *Ty, ///< The type to which operand is casted
485 const Twine &Name, ///< The name for the instruction
486 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
487 );
488
489 /// Create a SExt or BitCast cast instruction
490 static CastInst *CreateSExtOrBitCast(
491 Value *S, ///< The value to be casted (operand 0)
492 Type *Ty, ///< The type to which cast should be made
493 const Twine &Name = "", ///< Name for the instruction
494 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
495 );
496
497 /// Create a SExt or BitCast cast instruction
498 static CastInst *CreateSExtOrBitCast(
499 Value *S, ///< The value to be casted (operand 0)
500 Type *Ty, ///< The type to which operand is casted
501 const Twine &Name, ///< The name for the instruction
502 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
503 );
504
505 /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
506 static CastInst *CreatePointerCast(
507 Value *S, ///< The pointer value to be casted (operand 0)
508 Type *Ty, ///< The type to which operand is casted
509 const Twine &Name, ///< The name for the instruction
510 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
511 );
512
513 /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
514 static CastInst *CreatePointerCast(
515 Value *S, ///< The pointer value to be casted (operand 0)
516 Type *Ty, ///< The type to which cast should be made
517 const Twine &Name = "", ///< Name for the instruction
518 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
519 );
520
521 /// Create a BitCast or an AddrSpaceCast cast instruction.
522 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
523 Value *S, ///< The pointer value to be casted (operand 0)
524 Type *Ty, ///< The type to which operand is casted
525 const Twine &Name, ///< The name for the instruction
526 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
527 );
528
529 /// Create a BitCast or an AddrSpaceCast cast instruction.
530 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
531 Value *S, ///< The pointer value to be casted (operand 0)
532 Type *Ty, ///< The type to which cast should be made
533 const Twine &Name = "", ///< Name for the instruction
534 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
535 );
536
537 /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
538 ///
539 /// If the value is a pointer type and the destination an integer type,
540 /// creates a PtrToInt cast. If the value is an integer type and the
541 /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
542 /// a bitcast.
543 static CastInst *CreateBitOrPointerCast(
544 Value *S, ///< The pointer value to be casted (operand 0)
545 Type *Ty, ///< The type to which cast should be made
546 const Twine &Name = "", ///< Name for the instruction
547 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
548 );
549
550 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
551 static CastInst *CreateIntegerCast(
552 Value *S, ///< The pointer value to be casted (operand 0)
553 Type *Ty, ///< The type to which cast should be made
554 bool isSigned, ///< Whether to regard S as signed or not
555 const Twine &Name = "", ///< Name for the instruction
556 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
557 );
558
559 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
560 static CastInst *CreateIntegerCast(
561 Value *S, ///< The integer value to be casted (operand 0)
562 Type *Ty, ///< The integer type to which operand is casted
563 bool isSigned, ///< Whether to regard S as signed or not
564 const Twine &Name, ///< The name for the instruction
565 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
566 );
567
568 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
569 static CastInst *CreateFPCast(
570 Value *S, ///< The floating point value to be casted
571 Type *Ty, ///< The floating point type to cast to
572 const Twine &Name = "", ///< Name for the instruction
573 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
574 );
575
576 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
577 static CastInst *CreateFPCast(
578 Value *S, ///< The floating point value to be casted
579 Type *Ty, ///< The floating point type to cast to
580 const Twine &Name, ///< The name for the instruction
581 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
582 );
583
584 /// Create a Trunc or BitCast cast instruction
585 static CastInst *CreateTruncOrBitCast(
586 Value *S, ///< The value to be casted (operand 0)
587 Type *Ty, ///< The type to which cast should be made
588 const Twine &Name = "", ///< Name for the instruction
589 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
590 );
591
592 /// Create a Trunc or BitCast cast instruction
593 static CastInst *CreateTruncOrBitCast(
594 Value *S, ///< The value to be casted (operand 0)
595 Type *Ty, ///< The type to which operand is casted
596 const Twine &Name, ///< The name for the instruction
597 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
598 );
599
600 /// Check whether a bitcast between these types is valid
601 static bool isBitCastable(
602 Type *SrcTy, ///< The Type from which the value should be cast.
603 Type *DestTy ///< The Type to which the value should be cast.
604 );
605
606 /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
607 /// types is valid and a no-op.
608 ///
609 /// This ensures that any pointer<->integer cast has enough bits in the
610 /// integer and any other cast is a bitcast.
611 static bool isBitOrNoopPointerCastable(
612 Type *SrcTy, ///< The Type from which the value should be cast.
613 Type *DestTy, ///< The Type to which the value should be cast.
614 const DataLayout &DL);
615
616 /// Returns the opcode necessary to cast Val into Ty using usual casting
617 /// rules.
618 /// Infer the opcode for cast operand and type
619 static Instruction::CastOps getCastOpcode(
620 const Value *Val, ///< The value to cast
621 bool SrcIsSigned, ///< Whether to treat the source as signed
622 Type *Ty, ///< The Type to which the value should be casted
623 bool DstIsSigned ///< Whether to treate the dest. as signed
624 );
625
626 /// There are several places where we need to know if a cast instruction
627 /// only deals with integer source and destination types. To simplify that
628 /// logic, this method is provided.
629 /// @returns true iff the cast has only integral typed operand and dest type.
630 /// Determine if this is an integer-only cast.
631 bool isIntegerCast() const;
632
633 /// A lossless cast is one that does not alter the basic value. It implies
634 /// a no-op cast but is more stringent, preventing things like int->float,
635 /// long->double, or int->ptr.
636 /// @returns true iff the cast is lossless.
637 /// Determine if this is a lossless cast.
638 bool isLosslessCast() const;
639
640 /// A no-op cast is one that can be effected without changing any bits.
641 /// It implies that the source and destination types are the same size. The
642 /// DataLayout argument is to determine the pointer size when examining casts
643 /// involving Integer and Pointer types. They are no-op casts if the integer
644 /// is the same size as the pointer. However, pointer size varies with
645 /// platform. Note that a precondition of this method is that the cast is
646 /// legal - i.e. the instruction formed with these operands would verify.
647 static bool isNoopCast(
648 Instruction::CastOps Opcode, ///< Opcode of cast
649 Type *SrcTy, ///< SrcTy of cast
650 Type *DstTy, ///< DstTy of cast
651 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
652 );
653
654 /// Determine if this cast is a no-op cast.
655 ///
656 /// \param DL is the DataLayout to determine pointer size.
657 bool isNoopCast(const DataLayout &DL) const;
658
659 /// Determine how a pair of casts can be eliminated, if they can be at all.
660 /// This is a helper function for both CastInst and ConstantExpr.
661 /// @returns 0 if the CastInst pair can't be eliminated, otherwise
662 /// returns Instruction::CastOps value for a cast that can replace
663 /// the pair, casting SrcTy to DstTy.
664 /// Determine if a cast pair is eliminable
665 static unsigned isEliminableCastPair(
666 Instruction::CastOps firstOpcode, ///< Opcode of first cast
667 Instruction::CastOps secondOpcode, ///< Opcode of second cast
668 Type *SrcTy, ///< SrcTy of 1st cast
669 Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
670 Type *DstTy, ///< DstTy of 2nd cast
671 Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
672 Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
673 Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
674 );
675
676 /// Return the opcode of this CastInst
677 Instruction::CastOps getOpcode() const {
678 return Instruction::CastOps(Instruction::getOpcode());
679 }
680
681 /// Return the source type, as a convenience
682 Type* getSrcTy() const { return getOperand(0)->getType(); }
683 /// Return the destination type, as a convenience
684 Type* getDestTy() const { return getType(); }
685
686 /// This method can be used to determine if a cast from SrcTy to DstTy using
687 /// Opcode op is valid or not.
688 /// @returns true iff the proposed cast is valid.
689 /// Determine if a cast is valid without creating one.
690 static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy);
691 static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
692 return castIsValid(op, S->getType(), DstTy);
693 }
694
695 /// Methods for support type inquiry through isa, cast, and dyn_cast:
696 static bool classof(const Instruction *I) {
697 return I->isCast();
698 }
699 static bool classof(const Value *V) {
700 return isa<Instruction>(V) && classof(cast<Instruction>(V));
701 }
702};
703
704//===----------------------------------------------------------------------===//
705// CmpInst Class
706//===----------------------------------------------------------------------===//
707
708/// This class is the base class for the comparison instructions.
709/// Abstract base class of comparison instructions.
710class CmpInst : public Instruction {
711public:
712 /// This enumeration lists the possible predicates for CmpInst subclasses.
713 /// Values in the range 0-31 are reserved for FCmpInst, while values in the
714 /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
715 /// predicate values are not overlapping between the classes.
716 ///
717 /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
718 /// FCMP_* values. Changing the bit patterns requires a potential change to
719 /// those passes.
720 enum Predicate : unsigned {
721 // Opcode U L G E Intuitive operation
722 FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
723 FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
724 FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
725 FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
726 FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
727 FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
728 FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
729 FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
730 FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
731 FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
732 FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
733 FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
734 FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
735 FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
736 FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
737 FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
738 FIRST_FCMP_PREDICATE = FCMP_FALSE,
739 LAST_FCMP_PREDICATE = FCMP_TRUE,
740 BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
741 ICMP_EQ = 32, ///< equal
742 ICMP_NE = 33, ///< not equal
743 ICMP_UGT = 34, ///< unsigned greater than
744 ICMP_UGE = 35, ///< unsigned greater or equal
745 ICMP_ULT = 36, ///< unsigned less than
746 ICMP_ULE = 37, ///< unsigned less or equal
747 ICMP_SGT = 38, ///< signed greater than
748 ICMP_SGE = 39, ///< signed greater or equal
749 ICMP_SLT = 40, ///< signed less than
750 ICMP_SLE = 41, ///< signed less or equal
751 FIRST_ICMP_PREDICATE = ICMP_EQ,
752 LAST_ICMP_PREDICATE = ICMP_SLE,
753 BAD_ICMP_PREDICATE = ICMP_SLE + 1
754 };
755 using PredicateField =
756 Bitfield::Element<Predicate, 0, 6, LAST_ICMP_PREDICATE>;
757
758protected:
759 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
760 Value *LHS, Value *RHS, const Twine &Name = "",
761 Instruction *InsertBefore = nullptr,
762 Instruction *FlagsSource = nullptr);
763
764 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
765 Value *LHS, Value *RHS, const Twine &Name,
766 BasicBlock *InsertAtEnd);
767
768public:
769 // allocate space for exactly two operands
770 void *operator new(size_t S) { return User::operator new(S, 2); }
771 void operator delete(void *Ptr) { User::operator delete(Ptr); }
772
773 /// Construct a compare instruction, given the opcode, the predicate and
774 /// the two operands. Optionally (if InstBefore is specified) insert the
775 /// instruction into a BasicBlock right before the specified instruction.
776 /// The specified Instruction is allowed to be a dereferenced end iterator.
777 /// Create a CmpInst
778 static CmpInst *Create(OtherOps Op,
779 Predicate predicate, Value *S1,
780 Value *S2, const Twine &Name = "",
781 Instruction *InsertBefore = nullptr);
782
783 /// Construct a compare instruction, given the opcode, the predicate and the
784 /// two operands. Also automatically insert this instruction to the end of
785 /// the BasicBlock specified.
786 /// Create a CmpInst
787 static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
788 Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
789
790 /// Get the opcode casted to the right type
791 OtherOps getOpcode() const {
792 return static_cast<OtherOps>(Instruction::getOpcode());
793 }
794
795 /// Return the predicate for this instruction.
796 Predicate getPredicate() const { return getSubclassData<PredicateField>(); }
797
798 /// Set the predicate for this instruction to the specified value.
799 void setPredicate(Predicate P) { setSubclassData<PredicateField>(P); }
800
801 static bool isFPPredicate(Predicate P) {
802 static_assert(FIRST_FCMP_PREDICATE == 0,
803 "FIRST_FCMP_PREDICATE is required to be 0");
804 return P <= LAST_FCMP_PREDICATE;
805 }
806
807 static bool isIntPredicate(Predicate P) {
808 return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
809 }
810
811 static StringRef getPredicateName(Predicate P);
812
813 bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
814 bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
815
816 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
817 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
818 /// @returns the inverse predicate for the instruction's current predicate.
819 /// Return the inverse of the instruction's predicate.
820 Predicate getInversePredicate() const {
821 return getInversePredicate(getPredicate());
822 }
823
824 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
825 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
826 /// @returns the inverse predicate for predicate provided in \p pred.
827 /// Return the inverse of a given predicate
828 static Predicate getInversePredicate(Predicate pred);
829
830 /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
831 /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
832 /// @returns the predicate that would be the result of exchanging the two
833 /// operands of the CmpInst instruction without changing the result
834 /// produced.
835 /// Return the predicate as if the operands were swapped
836 Predicate getSwappedPredicate() const {
837 return getSwappedPredicate(getPredicate());
838 }
839
840 /// This is a static version that you can use without an instruction
841 /// available.
842 /// Return the predicate as if the operands were swapped.
843 static Predicate getSwappedPredicate(Predicate pred);
844
845 /// This is a static version that you can use without an instruction
846 /// available.
847 /// @returns true if the comparison predicate is strict, false otherwise.
848 static bool isStrictPredicate(Predicate predicate);
849
850 /// @returns true if the comparison predicate is strict, false otherwise.
851 /// Determine if this instruction is using an strict comparison predicate.
852 bool isStrictPredicate() const { return isStrictPredicate(getPredicate()); }
853
854 /// This is a static version that you can use without an instruction
855 /// available.
856 /// @returns true if the comparison predicate is non-strict, false otherwise.
857 static bool isNonStrictPredicate(Predicate predicate);
858
859 /// @returns true if the comparison predicate is non-strict, false otherwise.
860 /// Determine if this instruction is using an non-strict comparison predicate.
861 bool isNonStrictPredicate() const {
862 return isNonStrictPredicate(getPredicate());
863 }
864
865 /// For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
866 /// Returns the strict version of non-strict comparisons.
867 Predicate getStrictPredicate() const {
868 return getStrictPredicate(getPredicate());
869 }
870
871 /// This is a static version that you can use without an instruction
872 /// available.
873 /// @returns the strict version of comparison provided in \p pred.
874 /// If \p pred is not a strict comparison predicate, returns \p pred.
875 /// Returns the strict version of non-strict comparisons.
876 static Predicate getStrictPredicate(Predicate pred);
877
878 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
879 /// Returns the non-strict version of strict comparisons.
880 Predicate getNonStrictPredicate() const {
881 return getNonStrictPredicate(getPredicate());
882 }
883
884 /// This is a static version that you can use without an instruction
885 /// available.
886 /// @returns the non-strict version of comparison provided in \p pred.
887 /// If \p pred is not a strict comparison predicate, returns \p pred.
888 /// Returns the non-strict version of strict comparisons.
889 static Predicate getNonStrictPredicate(Predicate pred);
890
891 /// This is a static version that you can use without an instruction
892 /// available.
893 /// Return the flipped strictness of predicate
894 static Predicate getFlippedStrictnessPredicate(Predicate pred);
895
896 /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
897 /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
898 /// does not support other kind of predicates.
899 /// @returns the predicate that does not contains is equal to zero if
900 /// it had and vice versa.
901 /// Return the flipped strictness of predicate
902 Predicate getFlippedStrictnessPredicate() const {
903 return getFlippedStrictnessPredicate(getPredicate());
904 }
905
906 /// Provide more efficient getOperand methods.
907 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
908
909 /// This is just a convenience that dispatches to the subclasses.
910 /// Swap the operands and adjust predicate accordingly to retain
911 /// the same comparison.
912 void swapOperands();
913
914 /// This is just a convenience that dispatches to the subclasses.
915 /// Determine if this CmpInst is commutative.
916 bool isCommutative() const;
917
918 /// Determine if this is an equals/not equals predicate.
919 /// This is a static version that you can use without an instruction
920 /// available.
921 static bool isEquality(Predicate pred);
922
923 /// Determine if this is an equals/not equals predicate.
924 bool isEquality() const { return isEquality(getPredicate()); }
925
926 /// Return true if the predicate is relational (not EQ or NE).
927 static bool isRelational(Predicate P) { return !isEquality(P); }
928
929 /// Return true if the predicate is relational (not EQ or NE).
930 bool isRelational() const { return !isEquality(); }
931
932 /// @returns true if the comparison is signed, false otherwise.
933 /// Determine if this instruction is using a signed comparison.
934 bool isSigned() const {
935 return isSigned(getPredicate());
936 }
937
938 /// @returns true if the comparison is unsigned, false otherwise.
939 /// Determine if this instruction is using an unsigned comparison.
940 bool isUnsigned() const {
941 return isUnsigned(getPredicate());
942 }
943
944 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
945 /// @returns the signed version of the unsigned predicate pred.
946 /// return the signed version of a predicate
947 static Predicate getSignedPredicate(Predicate pred);
948
949 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
950 /// @returns the signed version of the predicate for this instruction (which
951 /// has to be an unsigned predicate).
952 /// return the signed version of a predicate
953 Predicate getSignedPredicate() {
954 return getSignedPredicate(getPredicate());
955 }
956
957 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
958 /// @returns the unsigned version of the signed predicate pred.
959 static Predicate getUnsignedPredicate(Predicate pred);
960
961 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
962 /// @returns the unsigned version of the predicate for this instruction (which
963 /// has to be an signed predicate).
964 /// return the unsigned version of a predicate
965 Predicate getUnsignedPredicate() {
966 return getUnsignedPredicate(getPredicate());
967 }
968
969 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
970 /// @returns the unsigned version of the signed predicate pred or
971 /// the signed version of the signed predicate pred.
972 static Predicate getFlippedSignednessPredicate(Predicate pred);
973
974 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
975 /// @returns the unsigned version of the signed predicate pred or
976 /// the signed version of the signed predicate pred.
977 Predicate getFlippedSignednessPredicate() {
978 return getFlippedSignednessPredicate(getPredicate());
979 }
980
981 /// This is just a convenience.
982 /// Determine if this is true when both operands are the same.
983 bool isTrueWhenEqual() const {
984 return isTrueWhenEqual(getPredicate());
985 }
986
987 /// This is just a convenience.
988 /// Determine if this is false when both operands are the same.
989 bool isFalseWhenEqual() const {
990 return isFalseWhenEqual(getPredicate());
991 }
992
993 /// @returns true if the predicate is unsigned, false otherwise.
994 /// Determine if the predicate is an unsigned operation.
995 static bool isUnsigned(Predicate predicate);
996
997 /// @returns true if the predicate is signed, false otherwise.
998 /// Determine if the predicate is an signed operation.
999 static bool isSigned(Predicate predicate);
1000
1001 /// Determine if the predicate is an ordered operation.
1002 static bool isOrdered(Predicate predicate);
1003
1004 /// Determine if the predicate is an unordered operation.
1005 static bool isUnordered(Predicate predicate);
1006
1007 /// Determine if the predicate is true when comparing a value with itself.
1008 static bool isTrueWhenEqual(Predicate predicate);
1009
1010 /// Determine if the predicate is false when comparing a value with itself.
1011 static bool isFalseWhenEqual(Predicate predicate);
1012
1013 /// Determine if Pred1 implies Pred2 is true when two compares have matching
1014 /// operands.
1015 static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
1016
1017 /// Determine if Pred1 implies Pred2 is false when two compares have matching
1018 /// operands.
1019 static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
1020
1021 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1022 static bool classof(const Instruction *I) {
1023 return I->getOpcode() == Instruction::ICmp ||
1024 I->getOpcode() == Instruction::FCmp;
1025 }
1026 static bool classof(const Value *V) {
1027 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1028 }
1029
1030 /// Create a result type for fcmp/icmp
1031 static Type* makeCmpResultType(Type* opnd_type) {
1032 if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
1033 return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
1034 vt->getElementCount());
1035 }
1036 return Type::getInt1Ty(opnd_type->getContext());
1037 }
1038
1039private:
1040 // Shadow Value::setValueSubclassData with a private forwarding method so that
1041 // subclasses cannot accidentally use it.
1042 void setValueSubclassData(unsigned short D) {
1043 Value::setValueSubclassData(D);
1044 }
1045};
1046
1047// FIXME: these are redundant if CmpInst < BinaryOperator
1048template <>
1049struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
1050};
1051
1052DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)CmpInst::op_iterator CmpInst::op_begin() { return OperandTraits
<CmpInst>::op_begin(this); } CmpInst::const_op_iterator
CmpInst::op_begin() const { return OperandTraits<CmpInst>
::op_begin(const_cast<CmpInst*>(this)); } CmpInst::op_iterator
CmpInst::op_end() { return OperandTraits<CmpInst>::op_end
(this); } CmpInst::const_op_iterator CmpInst::op_end() const {
return OperandTraits<CmpInst>::op_end(const_cast<CmpInst
*>(this)); } Value *CmpInst::getOperand(unsigned i_nocapture
) const { ((void)0); return cast_or_null<Value>( OperandTraits
<CmpInst>::op_begin(const_cast<CmpInst*>(this))[i_nocapture
].get()); } void CmpInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<CmpInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned CmpInst::getNumOperands
() const { return OperandTraits<CmpInst>::operands(this
); } template <int Idx_nocapture> Use &CmpInst::Op(
) { return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CmpInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
1053
1054/// A lightweight accessor for an operand bundle meant to be passed
1055/// around by value.
1056struct OperandBundleUse {
1057 ArrayRef<Use> Inputs;
1058
1059 OperandBundleUse() = default;
1060 explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
1061 : Inputs(Inputs), Tag(Tag) {}
1062
1063 /// Return true if the operand at index \p Idx in this operand bundle
1064 /// has the attribute A.
1065 bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
1066 if (isDeoptOperandBundle())
1067 if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
1068 return Inputs[Idx]->getType()->isPointerTy();
1069
1070 // Conservative answer: no operands have any attributes.
1071 return false;
1072 }
1073
1074 /// Return the tag of this operand bundle as a string.
1075 StringRef getTagName() const {
1076 return Tag->getKey();
1077 }
1078
1079 /// Return the tag of this operand bundle as an integer.
1080 ///
1081 /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
1082 /// and this function returns the unique integer getOrInsertBundleTag
1083 /// associated the tag of this operand bundle to.
1084 uint32_t getTagID() const {
1085 return Tag->getValue();
1086 }
1087
1088 /// Return true if this is a "deopt" operand bundle.
1089 bool isDeoptOperandBundle() const {
1090 return getTagID() == LLVMContext::OB_deopt;
1091 }
1092
1093 /// Return true if this is a "funclet" operand bundle.
1094 bool isFuncletOperandBundle() const {
1095 return getTagID() == LLVMContext::OB_funclet;
1096 }
1097
1098 /// Return true if this is a "cfguardtarget" operand bundle.
1099 bool isCFGuardTargetOperandBundle() const {
1100 return getTagID() == LLVMContext::OB_cfguardtarget;
1101 }
1102
1103private:
1104 /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
1105 StringMapEntry<uint32_t> *Tag;
1106};
1107
1108/// A container for an operand bundle being viewed as a set of values
1109/// rather than a set of uses.
1110///
1111/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
1112/// so it is possible to create and pass around "self-contained" instances of
1113/// OperandBundleDef and ConstOperandBundleDef.
1114template <typename InputTy> class OperandBundleDefT {
1115 std::string Tag;
1116 std::vector<InputTy> Inputs;
1117
1118public:
1119 explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
1120 : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
1121 explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
1122 : Tag(std::move(Tag)), Inputs(Inputs) {}
1123
1124 explicit OperandBundleDefT(const OperandBundleUse &OBU) {
1125 Tag = std::string(OBU.getTagName());
1126 llvm::append_range(Inputs, OBU.Inputs);
1127 }
1128
1129 ArrayRef<InputTy> inputs() const { return Inputs; }
1130
1131 using input_iterator = typename std::vector<InputTy>::const_iterator;
1132
1133 size_t input_size() const { return Inputs.size(); }
1134 input_iterator input_begin() const { return Inputs.begin(); }
1135 input_iterator input_end() const { return Inputs.end(); }
1136
1137 StringRef getTag() const { return Tag; }
1138};
1139
1140using OperandBundleDef = OperandBundleDefT<Value *>;
1141using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
1142
1143//===----------------------------------------------------------------------===//
1144// CallBase Class
1145//===----------------------------------------------------------------------===//
1146
1147/// Base class for all callable instructions (InvokeInst and CallInst)
1148/// Holds everything related to calling a function.
1149///
1150/// All call-like instructions are required to use a common operand layout:
1151/// - Zero or more arguments to the call,
1152/// - Zero or more operand bundles with zero or more operand inputs each
1153/// bundle,
1154/// - Zero or more subclass controlled operands
1155/// - The called function.
1156///
1157/// This allows this base class to easily access the called function and the
1158/// start of the arguments without knowing how many other operands a particular
1159/// subclass requires. Note that accessing the end of the argument list isn't
1160/// as cheap as most other operations on the base class.
1161class CallBase : public Instruction {
1162protected:
1163 // The first two bits are reserved by CallInst for fast retrieval,
1164 using CallInstReservedField = Bitfield::Element<unsigned, 0, 2>;
1165 using CallingConvField =
1166 Bitfield::Element<CallingConv::ID, CallInstReservedField::NextBit, 10,
1167 CallingConv::MaxID>;
1168 static_assert(
1169 Bitfield::areContiguous<CallInstReservedField, CallingConvField>(),
1170 "Bitfields must be contiguous");
1171
1172 /// The last operand is the called operand.
1173 static constexpr int CalledOperandOpEndIdx = -1;
1174
1175 AttributeList Attrs; ///< parameter attributes for callable
1176 FunctionType *FTy;
1177
1178 template <class... ArgsTy>
1179 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1180 : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1181
1182 using Instruction::Instruction;
1183
1184 bool hasDescriptor() const { return Value::HasDescriptor; }
1185
1186 unsigned getNumSubclassExtraOperands() const {
1187 switch (getOpcode()) {
1188 case Instruction::Call:
1189 return 0;
1190 case Instruction::Invoke:
1191 return 2;
1192 case Instruction::CallBr:
1193 return getNumSubclassExtraOperandsDynamic();
1194 }
1195 llvm_unreachable("Invalid opcode!")__builtin_unreachable();
1196 }
1197
1198 /// Get the number of extra operands for instructions that don't have a fixed
1199 /// number of extra operands.
1200 unsigned getNumSubclassExtraOperandsDynamic() const;
1201
1202public:
1203 using Instruction::getContext;
1204
1205 /// Create a clone of \p CB with a different set of operand bundles and
1206 /// insert it before \p InsertPt.
1207 ///
1208 /// The returned call instruction is identical \p CB in every way except that
1209 /// the operand bundles for the new instruction are set to the operand bundles
1210 /// in \p Bundles.
1211 static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
1212 Instruction *InsertPt = nullptr);
1213
1214 /// Create a clone of \p CB with the operand bundle with the tag matching
1215 /// \p Bundle's tag replaced with Bundle, and insert it before \p InsertPt.
1216 ///
1217 /// The returned call instruction is identical \p CI in every way except that
1218 /// the specified operand bundle has been replaced.
1219 static CallBase *Create(CallBase *CB,
1220 OperandBundleDef Bundle,
1221 Instruction *InsertPt = nullptr);
1222
1223 /// Create a clone of \p CB with operand bundle \p OB added.
1224 static CallBase *addOperandBundle(CallBase *CB, uint32_t ID,
1225 OperandBundleDef OB,
1226 Instruction *InsertPt = nullptr);
1227
1228 /// Create a clone of \p CB with operand bundle \p ID removed.
1229 static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
1230 Instruction *InsertPt = nullptr);
1231
1232 static bool classof(const Instruction *I) {
1233 return I->getOpcode() == Instruction::Call ||
1234 I->getOpcode() == Instruction::Invoke ||
1235 I->getOpcode() == Instruction::CallBr;
1236 }
1237 static bool classof(const Value *V) {
1238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1239 }
1240
1241 FunctionType *getFunctionType() const { return FTy; }
1242
1243 void mutateFunctionType(FunctionType *FTy) {
1244 Value::mutateType(FTy->getReturnType());
1245 this->FTy = FTy;
1246 }
1247
1248 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1249
1250 /// data_operands_begin/data_operands_end - Return iterators iterating over
1251 /// the call / invoke argument list and bundle operands. For invokes, this is
1252 /// the set of instruction operands except the invoke target and the two
1253 /// successor blocks; and for calls this is the set of instruction operands
1254 /// except the call target.
1255 User::op_iterator data_operands_begin() { return op_begin(); }
1256 User::const_op_iterator data_operands_begin() const {
1257 return const_cast<CallBase *>(this)->data_operands_begin();
1258 }
1259 User::op_iterator data_operands_end() {
1260 // Walk from the end of the operands over the called operand and any
1261 // subclass operands.
1262 return op_end() - getNumSubclassExtraOperands() - 1;
1263 }
1264 User::const_op_iterator data_operands_end() const {
1265 return const_cast<CallBase *>(this)->data_operands_end();
1266 }
1267 iterator_range<User::op_iterator> data_ops() {
1268 return make_range(data_operands_begin(), data_operands_end());
1269 }
1270 iterator_range<User::const_op_iterator> data_ops() const {
1271 return make_range(data_operands_begin(), data_operands_end());
1272 }
1273 bool data_operands_empty() const {
1274 return data_operands_end() == data_operands_begin();
1275 }
1276 unsigned data_operands_size() const {
1277 return std::distance(data_operands_begin(), data_operands_end());
1278 }
1279
1280 bool isDataOperand(const Use *U) const {
1281 assert(this == U->getUser() &&((void)0)
1282 "Only valid to query with a use of this instruction!")((void)0);
1283 return data_operands_begin() <= U && U < data_operands_end();
1284 }
1285 bool isDataOperand(Value::const_user_iterator UI) const {
1286 return isDataOperand(&UI.getUse());
1287 }
1288
1289 /// Given a value use iterator, return the data operand corresponding to it.
1290 /// Iterator must actually correspond to a data operand.
1291 unsigned getDataOperandNo(Value::const_user_iterator UI) const {
1292 return getDataOperandNo(&UI.getUse());
1293 }
1294
1295 /// Given a use for a data operand, get the data operand number that
1296 /// corresponds to it.
1297 unsigned getDataOperandNo(const Use *U) const {
1298 assert(isDataOperand(U) && "Data operand # out of range!")((void)0);
1299 return U - data_operands_begin();
1300 }
1301
1302 /// Return the iterator pointing to the beginning of the argument list.
1303 User::op_iterator arg_begin() { return op_begin(); }
1304 User::const_op_iterator arg_begin() const {
1305 return const_cast<CallBase *>(this)->arg_begin();
1306 }
1307
1308 /// Return the iterator pointing to the end of the argument list.
1309 User::op_iterator arg_end() {
1310 // From the end of the data operands, walk backwards past the bundle
1311 // operands.
1312 return data_operands_end() - getNumTotalBundleOperands();
1313 }
1314 User::const_op_iterator arg_end() const {
1315 return const_cast<CallBase *>(this)->arg_end();
1316 }
1317
1318 /// Iteration adapter for range-for loops.
1319 iterator_range<User::op_iterator> args() {
1320 return make_range(arg_begin(), arg_end());
1321 }
1322 iterator_range<User::const_op_iterator> args() const {
1323 return make_range(arg_begin(), arg_end());
1324 }
1325 bool arg_empty() const { return arg_end() == arg_begin(); }
1326 unsigned arg_size() const { return arg_end() - arg_begin(); }
1327
1328 // Legacy API names that duplicate the above and will be removed once users
1329 // are migrated.
1330 iterator_range<User::op_iterator> arg_operands() {
1331 return make_range(arg_begin(), arg_end());
1332 }
1333 iterator_range<User::const_op_iterator> arg_operands() const {
1334 return make_range(arg_begin(), arg_end());
1335 }
1336 unsigned getNumArgOperands() const { return arg_size(); }
1337
1338 Value *getArgOperand(unsigned i) const {
1339 assert(i < getNumArgOperands() && "Out of bounds!")((void)0);
1340 return getOperand(i);
1341 }
1342
1343 void setArgOperand(unsigned i, Value *v) {
1344 assert(i < getNumArgOperands() && "Out of bounds!")((void)0);
1345 setOperand(i, v);
1346 }
1347
1348 /// Wrappers for getting the \c Use of a call argument.
1349 const Use &getArgOperandUse(unsigned i) const {
1350 assert(i < getNumArgOperands() && "Out of bounds!")((void)0);
1351 return User::getOperandUse(i);
1352 }
1353 Use &getArgOperandUse(unsigned i) {
1354 assert(i < getNumArgOperands() && "Out of bounds!")((void)0);
1355 return User::getOperandUse(i);
1356 }
1357
1358 bool isArgOperand(const Use *U) const {
1359 assert(this == U->getUser() &&((void)0)
1360 "Only valid to query with a use of this instruction!")((void)0);
1361 return arg_begin() <= U && U < arg_end();
1362 }
1363 bool isArgOperand(Value::const_user_iterator UI) const {
1364 return isArgOperand(&UI.getUse());
1365 }
1366
1367 /// Given a use for a arg operand, get the arg operand number that
1368 /// corresponds to it.
1369 unsigned getArgOperandNo(const Use *U) const {
1370 assert(isArgOperand(U) && "Arg operand # out of range!")((void)0);
1371 return U - arg_begin();
1372 }
1373
1374 /// Given a value use iterator, return the arg operand number corresponding to
1375 /// it. Iterator must actually correspond to a data operand.
1376 unsigned getArgOperandNo(Value::const_user_iterator UI) const {
1377 return getArgOperandNo(&UI.getUse());
1378 }
1379
1380 /// Returns true if this CallSite passes the given Value* as an argument to
1381 /// the called function.
1382 bool hasArgument(const Value *V) const {
1383 return llvm::is_contained(args(), V);
1384 }
1385
1386 Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
1387
1388 const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
1389 Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
1390
1391 /// Returns the function called, or null if this is an
1392 /// indirect function invocation.
1393 Function *getCalledFunction() const {
1394 return dyn_cast_or_null<Function>(getCalledOperand());
1395 }
1396
1397 /// Return true if the callsite is an indirect call.
1398 bool isIndirectCall() const;
1399
1400 /// Determine whether the passed iterator points to the callee operand's Use.
1401 bool isCallee(Value::const_user_iterator UI) const {
1402 return isCallee(&UI.getUse());
1403 }
1404
1405 /// Determine whether this Use is the callee operand's Use.
1406 bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
1407
1408 /// Helper to get the caller (the parent function).
1409 Function *getCaller();
1410 const Function *getCaller() const {
1411 return const_cast<CallBase *>(this)->getCaller();
1412 }
1413
1414 /// Tests if this call site must be tail call optimized. Only a CallInst can
1415 /// be tail call optimized.
1416 bool isMustTailCall() const;
1417
1418 /// Tests if this call site is marked as a tail call.
1419 bool isTailCall() const;
1420
1421 /// Returns the intrinsic ID of the intrinsic called or
1422 /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
1423 /// this is an indirect call.
1424 Intrinsic::ID getIntrinsicID() const;
1425
1426 void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
1427
1428 /// Sets the function called, including updating the function type.
1429 void setCalledFunction(Function *Fn) {
1430 setCalledFunction(Fn->getFunctionType(), Fn);
1431 }
1432
1433 /// Sets the function called, including updating the function type.
1434 void setCalledFunction(FunctionCallee Fn) {
1435 setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
1436 }
1437
1438 /// Sets the function called, including updating to the specified function
1439 /// type.
1440 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1441 this->FTy = FTy;
1442 assert(cast<PointerType>(Fn->getType())->isOpaqueOrPointeeTypeMatches(FTy))((void)0);
1443 // This function doesn't mutate the return type, only the function
1444 // type. Seems broken, but I'm just gonna stick an assert in for now.
1445 assert(getType() == FTy->getReturnType())((void)0);
1446 setCalledOperand(Fn);
1447 }
1448
1449 CallingConv::ID getCallingConv() const {
1450 return getSubclassData<CallingConvField>();
1451 }
1452
1453 void setCallingConv(CallingConv::ID CC) {
1454 setSubclassData<CallingConvField>(CC);
1455 }
1456
1457 /// Check if this call is an inline asm statement.
1458 bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
1459
1460 /// \name Attribute API
1461 ///
1462 /// These methods access and modify attributes on this call (including
1463 /// looking through to the attributes on the called function when necessary).
1464 ///@{
1465
1466 /// Return the parameter attributes for this call.
1467 ///
1468 AttributeList getAttributes() const { return Attrs; }
1469
1470 /// Set the parameter attributes for this call.
1471 ///
1472 void setAttributes(AttributeList A) { Attrs = A; }
1473
1474 /// Determine whether this call has the given attribute. If it does not
1475 /// then determine if the called function has the attribute, but only if
1476 /// the attribute is allowed for the call.
1477 bool hasFnAttr(Attribute::AttrKind Kind) const {
1478 assert(Kind != Attribute::NoBuiltin &&((void)0)
1479 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")((void)0);
1480 return hasFnAttrImpl(Kind);
19
Calling 'CallBase::hasFnAttrImpl'
28
Returning from 'CallBase::hasFnAttrImpl'
29
Returning value, which participates in a condition later
1481 }
1482
1483 /// Determine whether this call has the given attribute. If it does not
1484 /// then determine if the called function has the attribute, but only if
1485 /// the attribute is allowed for the call.
1486 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1487
1488 /// adds the attribute to the list of attributes.
1489 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1490 AttributeList PAL = getAttributes();
1491 PAL = PAL.addAttribute(getContext(), i, Kind);
1492 setAttributes(PAL);
1493 }
1494
1495 /// adds the attribute to the list of attributes.
1496 void addAttribute(unsigned i, Attribute Attr) {
1497 AttributeList PAL = getAttributes();
1498 PAL = PAL.addAttribute(getContext(), i, Attr);
1499 setAttributes(PAL);
1500 }
1501
1502 /// Adds the attribute to the indicated argument
1503 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1504 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1505 AttributeList PAL = getAttributes();
1506 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1507 setAttributes(PAL);
1508 }
1509
1510 /// Adds the attribute to the indicated argument
1511 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1512 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1513 AttributeList PAL = getAttributes();
1514 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1515 setAttributes(PAL);
1516 }
1517
1518 /// removes the attribute from the list of attributes.
1519 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1520 AttributeList PAL = getAttributes();
1521 PAL = PAL.removeAttribute(getContext(), i, Kind);
1522 setAttributes(PAL);
1523 }
1524
1525 /// removes the attribute from the list of attributes.
1526 void removeAttribute(unsigned i, StringRef Kind) {
1527 AttributeList PAL = getAttributes();
1528 PAL = PAL.removeAttribute(getContext(), i, Kind);
1529 setAttributes(PAL);
1530 }
1531
1532 void removeAttributes(unsigned i, const AttrBuilder &Attrs) {
1533 AttributeList PAL = getAttributes();
1534 PAL = PAL.removeAttributes(getContext(), i, Attrs);
1535 setAttributes(PAL);
1536 }
1537
1538 /// Removes the attribute from the given argument
1539 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1540 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1541 AttributeList PAL = getAttributes();
1542 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1543 setAttributes(PAL);
1544 }
1545
1546 /// Removes the attribute from the given argument
1547 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1548 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1549 AttributeList PAL = getAttributes();
1550 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1551 setAttributes(PAL);
1552 }
1553
1554 /// Removes the attributes from the given argument
1555 void removeParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) {
1556 AttributeList PAL = getAttributes();
1557 PAL = PAL.removeParamAttributes(getContext(), ArgNo, Attrs);
1558 setAttributes(PAL);
1559 }
1560
1561 /// adds the dereferenceable attribute to the list of attributes.
1562 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1563 AttributeList PAL = getAttributes();
1564 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1565 setAttributes(PAL);
1566 }
1567
1568 /// adds the dereferenceable_or_null attribute to the list of
1569 /// attributes.
1570 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1571 AttributeList PAL = getAttributes();
1572 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1573 setAttributes(PAL);
1574 }
1575
1576 /// Determine whether the return value has the given attribute.
1577 bool hasRetAttr(Attribute::AttrKind Kind) const {
1578 return hasRetAttrImpl(Kind);
1579 }
1580 /// Determine whether the return value has the given attribute.
1581 bool hasRetAttr(StringRef Kind) const { return hasRetAttrImpl(Kind); }
1582
1583 /// Determine whether the argument or parameter has the given attribute.
1584 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1585
1586 /// Get the attribute of a given kind at a position.
1587 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1588 return getAttributes().getAttribute(i, Kind);
1589 }
1590
1591 /// Get the attribute of a given kind at a position.
1592 Attribute getAttribute(unsigned i, StringRef Kind) const {
1593 return getAttributes().getAttribute(i, Kind);
1594 }
1595
1596 /// Get the attribute of a given kind from a given arg
1597 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1598 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1599 return getAttributes().getParamAttr(ArgNo, Kind);
1600 }
1601
1602 /// Get the attribute of a given kind from a given arg
1603 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1604 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1605 return getAttributes().getParamAttr(ArgNo, Kind);
1606 }
1607
1608 /// Return true if the data operand at index \p i has the attribute \p
1609 /// A.
1610 ///
1611 /// Data operands include call arguments and values used in operand bundles,
1612 /// but does not include the callee operand. This routine dispatches to the
1613 /// underlying AttributeList or the OperandBundleUser as appropriate.
1614 ///
1615 /// The index \p i is interpreted as
1616 ///
1617 /// \p i == Attribute::ReturnIndex -> the return value
1618 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1619 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1620 /// (\p i - 1) in the operand list.
1621 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1622 // Note that we have to add one because `i` isn't zero-indexed.
1623 assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&((void)0)
1624 "Data operand index out of bounds!")((void)0);
1625
1626 // The attribute A can either be directly specified, if the operand in
1627 // question is a call argument; or be indirectly implied by the kind of its
1628 // containing operand bundle, if the operand is a bundle operand.
1629
1630 if (i == AttributeList::ReturnIndex)
1631 return hasRetAttr(Kind);
1632
1633 // FIXME: Avoid these i - 1 calculations and update the API to use
1634 // zero-based indices.
1635 if (i < (getNumArgOperands() + 1))
1636 return paramHasAttr(i - 1, Kind);
1637
1638 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&((void)0)
1639 "Must be either a call argument or an operand bundle!")((void)0);
1640 return bundleOperandHasAttr(i - 1, Kind);
1641 }
1642
1643 /// Determine whether this data operand is not captured.
1644 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1645 // better indicate that this may return a conservative answer.
1646 bool doesNotCapture(unsigned OpNo) const {
1647 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
1648 }
1649
1650 /// Determine whether this argument is passed by value.
1651 bool isByValArgument(unsigned ArgNo) const {
1652 return paramHasAttr(ArgNo, Attribute::ByVal);
1653 }
1654
1655 /// Determine whether this argument is passed in an alloca.
1656 bool isInAllocaArgument(unsigned ArgNo) const {
1657 return paramHasAttr(ArgNo, Attribute::InAlloca);
1658 }
1659
1660 /// Determine whether this argument is passed by value, in an alloca, or is
1661 /// preallocated.
1662 bool isPassPointeeByValueArgument(unsigned ArgNo) const {
1663 return paramHasAttr(ArgNo, Attribute::ByVal) ||
1664 paramHasAttr(ArgNo, Attribute::InAlloca) ||
1665 paramHasAttr(ArgNo, Attribute::Preallocated);
1666 }
1667
1668 /// Determine whether passing undef to this argument is undefined behavior.
1669 /// If passing undef to this argument is UB, passing poison is UB as well
1670 /// because poison is more undefined than undef.
1671 bool isPassingUndefUB(unsigned ArgNo) const {
1672 return paramHasAttr(ArgNo, Attribute::NoUndef) ||
1673 // dereferenceable implies noundef.
1674 paramHasAttr(ArgNo, Attribute::Dereferenceable) ||
1675 // dereferenceable implies noundef, and null is a well-defined value.
1676 paramHasAttr(ArgNo, Attribute::DereferenceableOrNull);
1677 }
1678
1679 /// Determine if there are is an inalloca argument. Only the last argument can
1680 /// have the inalloca attribute.
1681 bool hasInAllocaArgument() const {
1682 return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
1683 }
1684
1685 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1686 // better indicate that this may return a conservative answer.
1687 bool doesNotAccessMemory(unsigned OpNo) const {
1688 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1689 }
1690
1691 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1692 // better indicate that this may return a conservative answer.
1693 bool onlyReadsMemory(unsigned OpNo) const {
1694 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
1695 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1696 }
1697
1698 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1699 // better indicate that this may return a conservative answer.
1700 bool doesNotReadMemory(unsigned OpNo) const {
1701 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
1702 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1703 }
1704
1705 /// Extract the alignment of the return value.
1706 MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
1707
1708 /// Extract the alignment for a call or parameter (0=unknown).
1709 MaybeAlign getParamAlign(unsigned ArgNo) const {
1710 return Attrs.getParamAlignment(ArgNo);
1711 }
1712
1713 MaybeAlign getParamStackAlign(unsigned ArgNo) const {
1714 return Attrs.getParamStackAlignment(ArgNo);
1715 }
1716
1717 /// Extract the byval type for a call or parameter.
1718 Type *getParamByValType(unsigned ArgNo) const {
1719 if (auto *Ty = Attrs.getParamByValType(ArgNo))
1720 return Ty;
1721 if (const Function *F = getCalledFunction())
1722 return F->getAttributes().getParamByValType(ArgNo);
1723 return nullptr;
1724 }
1725
1726 /// Extract the preallocated type for a call or parameter.
1727 Type *getParamPreallocatedType(unsigned ArgNo) const {
1728 if (auto *Ty = Attrs.getParamPreallocatedType(ArgNo))
1729 return Ty;
1730 if (const Function *F = getCalledFunction())
1731 return F->getAttributes().getParamPreallocatedType(ArgNo);
1732 return nullptr;
1733 }
1734
1735 /// Extract the preallocated type for a call or parameter.
1736 Type *getParamInAllocaType(unsigned ArgNo) const {
1737 if (auto *Ty = Attrs.getParamInAllocaType(ArgNo))
1738 return Ty;
1739 if (const Function *F = getCalledFunction())
1740 return F->getAttributes().getParamInAllocaType(ArgNo);
1741 return nullptr;
1742 }
1743
1744 /// Extract the number of dereferenceable bytes for a call or
1745 /// parameter (0=unknown).
1746 uint64_t getDereferenceableBytes(unsigned i) const {
1747 return Attrs.getDereferenceableBytes(i);
1748 }
1749
1750 /// Extract the number of dereferenceable_or_null bytes for a call or
1751 /// parameter (0=unknown).
1752 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1753 return Attrs.getDereferenceableOrNullBytes(i);
1754 }
1755
1756 /// Return true if the return value is known to be not null.
1757 /// This may be because it has the nonnull attribute, or because at least
1758 /// one byte is dereferenceable and the pointer is in addrspace(0).
1759 bool isReturnNonNull() const;
1760
1761 /// Determine if the return value is marked with NoAlias attribute.
1762 bool returnDoesNotAlias() const {
1763 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1764 }
1765
1766 /// If one of the arguments has the 'returned' attribute, returns its
1767 /// operand value. Otherwise, return nullptr.
1768 Value *getReturnedArgOperand() const;
1769
1770 /// Return true if the call should not be treated as a call to a
1771 /// builtin.
1772 bool isNoBuiltin() const {
1773 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1774 !hasFnAttrImpl(Attribute::Builtin);
1775 }
1776
1777 /// Determine if the call requires strict floating point semantics.
1778 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1779
1780 /// Return true if the call should not be inlined.
1781 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1782 void setIsNoInline() {
1783 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1784 }
1785 /// Determine if the call does not access memory.
1786 bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
1787 void setDoesNotAccessMemory() {
1788 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1789 }
1790
1791 /// Determine if the call does not access or only reads memory.
1792 bool onlyReadsMemory() const {
1793 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1794 }
1795
1796 void setOnlyReadsMemory() {
1797 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1798 }
1799
1800 /// Determine if the call does not access or only writes memory.
1801 bool doesNotReadMemory() const {
1802 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1803 }
1804 void setDoesNotReadMemory() {
1805 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1806 }
1807
1808 /// Determine if the call can access memmory only using pointers based
1809 /// on its arguments.
1810 bool onlyAccessesArgMemory() const {
1811 return hasFnAttr(Attribute::ArgMemOnly);
1812 }
1813 void setOnlyAccessesArgMemory() {
1814 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1815 }
1816
1817 /// Determine if the function may only access memory that is
1818 /// inaccessible from the IR.
1819 bool onlyAccessesInaccessibleMemory() const {
1820 return hasFnAttr(Attribute::InaccessibleMemOnly);
1821 }
1822 void setOnlyAccessesInaccessibleMemory() {
1823 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1824 }
1825
1826 /// Determine if the function may only access memory that is
1827 /// either inaccessible from the IR or pointed to by its arguments.
1828 bool onlyAccessesInaccessibleMemOrArgMem() const {
1829 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1830 }
1831 void setOnlyAccessesInaccessibleMemOrArgMem() {
1832 addAttribute(AttributeList::FunctionIndex,
1833 Attribute::InaccessibleMemOrArgMemOnly);
1834 }
1835 /// Determine if the call cannot return.
1836 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1837 void setDoesNotReturn() {
1838 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1839 }
1840
1841 /// Determine if the call should not perform indirect branch tracking.
1842 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1843
1844 /// Determine if the call cannot unwind.
1845 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1846 void setDoesNotThrow() {
1847 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1848 }
1849
1850 /// Determine if the invoke cannot be duplicated.
1851 bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
1852 void setCannotDuplicate() {
1853 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1854 }
1855
1856 /// Determine if the call cannot be tail merged.
1857 bool cannotMerge() const { return hasFnAttr(Attribute::NoMerge); }
1858 void setCannotMerge() {
1859 addAttribute(AttributeList::FunctionIndex, Attribute::NoMerge);
1860 }
1861
1862 /// Determine if the invoke is convergent
1863 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
18
Calling 'CallBase::hasFnAttr'
30
Returning from 'CallBase::hasFnAttr'
31
Returning value, which participates in a condition later
1864 void setConvergent() {
1865 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1866 }
1867 void setNotConvergent() {
1868 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1869 }
1870
1871 /// Determine if the call returns a structure through first
1872 /// pointer argument.
1873 bool hasStructRetAttr() const {
1874 if (getNumArgOperands() == 0)
1875 return false;
1876
1877 // Be friendly and also check the callee.
1878 return paramHasAttr(0, Attribute::StructRet);
1879 }
1880
1881 /// Determine if any call argument is an aggregate passed by value.
1882 bool hasByValArgument() const {
1883 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1884 }
1885
1886 ///@{
1887 // End of attribute API.
1888
1889 /// \name Operand Bundle API
1890 ///
1891 /// This group of methods provides the API to access and manipulate operand
1892 /// bundles on this call.
1893 /// @{
1894
1895 /// Return the number of operand bundles associated with this User.
1896 unsigned getNumOperandBundles() const {
1897 return std::distance(bundle_op_info_begin(), bundle_op_info_end());
1898 }
1899
1900 /// Return true if this User has any operand bundles.
1901 bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
1902
1903 /// Return the index of the first bundle operand in the Use array.
1904 unsigned getBundleOperandsStartIndex() const {
1905 assert(hasOperandBundles() && "Don't call otherwise!")((void)0);
1906 return bundle_op_info_begin()->Begin;
1907 }
1908
1909 /// Return the index of the last bundle operand in the Use array.
1910 unsigned getBundleOperandsEndIndex() const {
1911 assert(hasOperandBundles() && "Don't call otherwise!")((void)0);
1912 return bundle_op_info_end()[-1].End;
1913 }
1914
1915 /// Return true if the operand at index \p Idx is a bundle operand.
1916 bool isBundleOperand(unsigned Idx) const {
1917 return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
1918 Idx < getBundleOperandsEndIndex();
1919 }
1920
1921 /// Returns true if the use is a bundle operand.
1922 bool isBundleOperand(const Use *U) const {
1923 assert(this == U->getUser() &&((void)0)
1924 "Only valid to query with a use of this instruction!")((void)0);
1925 return hasOperandBundles() && isBundleOperand(U - op_begin());
1926 }
1927 bool isBundleOperand(Value::const_user_iterator UI) const {
1928 return isBundleOperand(&UI.getUse());
1929 }
1930
1931 /// Return the total number operands (not operand bundles) used by
1932 /// every operand bundle in this OperandBundleUser.
1933 unsigned getNumTotalBundleOperands() const {
1934 if (!hasOperandBundles())
1935 return 0;
1936
1937 unsigned Begin = getBundleOperandsStartIndex();
1938 unsigned End = getBundleOperandsEndIndex();
1939
1940 assert(Begin <= End && "Should be!")((void)0);
1941 return End - Begin;
1942 }
1943
1944 /// Return the operand bundle at a specific index.
1945 OperandBundleUse getOperandBundleAt(unsigned Index) const {
1946 assert(Index < getNumOperandBundles() && "Index out of bounds!")((void)0);
1947 return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
1948 }
1949
1950 /// Return the number of operand bundles with the tag Name attached to
1951 /// this instruction.
1952 unsigned countOperandBundlesOfType(StringRef Name) const {
1953 unsigned Count = 0;
1954 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1955 if (getOperandBundleAt(i).getTagName() == Name)
1956 Count++;
1957
1958 return Count;
1959 }
1960
1961 /// Return the number of operand bundles with the tag ID attached to
1962 /// this instruction.
1963 unsigned countOperandBundlesOfType(uint32_t ID) const {
1964 unsigned Count = 0;
1965 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1966 if (getOperandBundleAt(i).getTagID() == ID)
1967 Count++;
1968
1969 return Count;
1970 }
1971
1972 /// Return an operand bundle by name, if present.
1973 ///
1974 /// It is an error to call this for operand bundle types that may have
1975 /// multiple instances of them on the same instruction.
1976 Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
1977 assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!")((void)0);
1978
1979 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1980 OperandBundleUse U = getOperandBundleAt(i);
1981 if (U.getTagName() == Name)
1982 return U;
1983 }
1984
1985 return None;
1986 }
1987
1988 /// Return an operand bundle by tag ID, if present.
1989 ///
1990 /// It is an error to call this for operand bundle types that may have
1991 /// multiple instances of them on the same instruction.
1992 Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
1993 assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!")((void)0);
1994
1995 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1996 OperandBundleUse U = getOperandBundleAt(i);
1997 if (U.getTagID() == ID)
1998 return U;
1999 }
2000
2001 return None;
2002 }
2003
2004 /// Return the list of operand bundles attached to this instruction as
2005 /// a vector of OperandBundleDefs.
2006 ///
2007 /// This function copies the OperandBundeUse instances associated with this
2008 /// OperandBundleUser to a vector of OperandBundleDefs. Note:
2009 /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
2010 /// representations of operand bundles (see documentation above).
2011 void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const;
2012
2013 /// Return the operand bundle for the operand at index OpIdx.
2014 ///
2015 /// It is an error to call this with an OpIdx that does not correspond to an
2016 /// bundle operand.
2017 OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
2018 return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
2019 }
2020
2021 /// Return true if this operand bundle user has operand bundles that
2022 /// may read from the heap.
2023 bool hasReadingOperandBundles() const;
2024
2025 /// Return true if this operand bundle user has operand bundles that
2026 /// may write to the heap.
2027 bool hasClobberingOperandBundles() const {
2028 for (auto &BOI : bundle_op_infos()) {
2029 if (BOI.Tag->second == LLVMContext::OB_deopt ||
2030 BOI.Tag->second == LLVMContext::OB_funclet)
2031 continue;
2032
2033 // This instruction has an operand bundle that is not known to us.
2034 // Assume the worst.
2035 return true;
2036 }
2037
2038 return false;
2039 }
2040
2041 /// Return true if the bundle operand at index \p OpIdx has the
2042 /// attribute \p A.
2043 bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
2044 auto &BOI = getBundleOpInfoForOperand(OpIdx);
2045 auto OBU = operandBundleFromBundleOpInfo(BOI);
2046 return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
2047 }
2048
2049 /// Return true if \p Other has the same sequence of operand bundle
2050 /// tags with the same number of operands on each one of them as this
2051 /// OperandBundleUser.
2052 bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
2053 if (getNumOperandBundles() != Other.getNumOperandBundles())
2054 return false;
2055
2056 return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
2057 Other.bundle_op_info_begin());
2058 }
2059
2060 /// Return true if this operand bundle user contains operand bundles
2061 /// with tags other than those specified in \p IDs.
2062 bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
2063 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
2064 uint32_t ID = getOperandBundleAt(i).getTagID();
2065 if (!is_contained(IDs, ID))
2066 return true;
2067 }
2068 return false;
2069 }
2070
2071 /// Is the function attribute S disallowed by some operand bundle on
2072 /// this operand bundle user?
2073 bool isFnAttrDisallowedByOpBundle(StringRef S) const {
2074 // Operand bundles only possibly disallow readnone, readonly and argmemonly
2075 // attributes. All String attributes are fine.
2076 return false;
2077 }
2078
2079 /// Is the function attribute A disallowed by some operand bundle on
2080 /// this operand bundle user?
2081 bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
2082 switch (A) {
23
Control jumps to the 'default' case at line 2083
2083 default:
2084 return false;
24
Returning zero, which participates in a condition later
2085
2086 case Attribute::InaccessibleMemOrArgMemOnly:
2087 return hasReadingOperandBundles();
2088
2089 case Attribute::InaccessibleMemOnly:
2090 return hasReadingOperandBundles();
2091
2092 case Attribute::ArgMemOnly:
2093 return hasReadingOperandBundles();
2094
2095 case Attribute::ReadNone:
2096 return hasReadingOperandBundles();
2097
2098 case Attribute::ReadOnly:
2099 return hasClobberingOperandBundles();
2100 }
2101
2102 llvm_unreachable("switch has a default case!")__builtin_unreachable();
2103 }
2104
2105 /// Used to keep track of an operand bundle. See the main comment on
2106 /// OperandBundleUser above.
2107 struct BundleOpInfo {
2108 /// The operand bundle tag, interned by
2109 /// LLVMContextImpl::getOrInsertBundleTag.
2110 StringMapEntry<uint32_t> *Tag;
2111
2112 /// The index in the Use& vector where operands for this operand
2113 /// bundle starts.
2114 uint32_t Begin;
2115
2116 /// The index in the Use& vector where operands for this operand
2117 /// bundle ends.
2118 uint32_t End;
2119
2120 bool operator==(const BundleOpInfo &Other) const {
2121 return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
2122 }
2123 };
2124
2125 /// Simple helper function to map a BundleOpInfo to an
2126 /// OperandBundleUse.
2127 OperandBundleUse
2128 operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
2129 auto begin = op_begin();
2130 ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
2131 return OperandBundleUse(BOI.Tag, Inputs);
2132 }
2133
2134 using bundle_op_iterator = BundleOpInfo *;
2135 using const_bundle_op_iterator = const BundleOpInfo *;
2136
2137 /// Return the start of the list of BundleOpInfo instances associated
2138 /// with this OperandBundleUser.
2139 ///
2140 /// OperandBundleUser uses the descriptor area co-allocated with the host User
2141 /// to store some meta information about which operands are "normal" operands,
2142 /// and which ones belong to some operand bundle.
2143 ///
2144 /// The layout of an operand bundle user is
2145 ///
2146 /// +-----------uint32_t End-------------------------------------+
2147 /// | |
2148 /// | +--------uint32_t Begin--------------------+ |
2149 /// | | | |
2150 /// ^ ^ v v
2151 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2152 /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
2153 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2154 /// v v ^ ^
2155 /// | | | |
2156 /// | +--------uint32_t Begin------------+ |
2157 /// | |
2158 /// +-----------uint32_t End-----------------------------+
2159 ///
2160 ///
2161 /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
2162 /// list. These descriptions are installed and managed by this class, and
2163 /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
2164 ///
2165 /// DU is an additional descriptor installed by User's 'operator new' to keep
2166 /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
2167 /// access or modify DU in any way, it's an implementation detail private to
2168 /// User.
2169 ///
2170 /// The regular Use& vector for the User starts at U0. The operand bundle
2171 /// uses are part of the Use& vector, just like normal uses. In the diagram
2172 /// above, the operand bundle uses start at BOI0_U0. Each instance of
2173 /// BundleOpInfo has information about a contiguous set of uses constituting
2174 /// an operand bundle, and the total set of operand bundle uses themselves
2175 /// form a contiguous set of uses (i.e. there are no gaps between uses
2176 /// corresponding to individual operand bundles).
2177 ///
2178 /// This class does not know the location of the set of operand bundle uses
2179 /// within the use list -- that is decided by the User using this class via
2180 /// the BeginIdx argument in populateBundleOperandInfos.
2181 ///
2182 /// Currently operand bundle users with hung-off operands are not supported.
2183 bundle_op_iterator bundle_op_info_begin() {
2184 if (!hasDescriptor())
2185 return nullptr;
2186
2187 uint8_t *BytesBegin = getDescriptor().begin();
2188 return reinterpret_cast<bundle_op_iterator>(BytesBegin);
2189 }
2190
2191 /// Return the start of the list of BundleOpInfo instances associated
2192 /// with this OperandBundleUser.
2193 const_bundle_op_iterator bundle_op_info_begin() const {
2194 auto *NonConstThis = const_cast<CallBase *>(this);
2195 return NonConstThis->bundle_op_info_begin();
2196 }
2197
2198 /// Return the end of the list of BundleOpInfo instances associated
2199 /// with this OperandBundleUser.
2200 bundle_op_iterator bundle_op_info_end() {
2201 if (!hasDescriptor())
2202 return nullptr;
2203
2204 uint8_t *BytesEnd = getDescriptor().end();
2205 return reinterpret_cast<bundle_op_iterator>(BytesEnd);
2206 }
2207
2208 /// Return the end of the list of BundleOpInfo instances associated
2209 /// with this OperandBundleUser.
2210 const_bundle_op_iterator bundle_op_info_end() const {
2211 auto *NonConstThis = const_cast<CallBase *>(this);
2212 return NonConstThis->bundle_op_info_end();
2213 }
2214
2215 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2216 iterator_range<bundle_op_iterator> bundle_op_infos() {
2217 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2218 }
2219
2220 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2221 iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
2222 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2223 }
2224
2225 /// Populate the BundleOpInfo instances and the Use& vector from \p
2226 /// Bundles. Return the op_iterator pointing to the Use& one past the last
2227 /// last bundle operand use.
2228 ///
2229 /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
2230 /// instance allocated in this User's descriptor.
2231 op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
2232 const unsigned BeginIndex);
2233
2234public:
2235 /// Return the BundleOpInfo for the operand at index OpIdx.
2236 ///
2237 /// It is an error to call this with an OpIdx that does not correspond to an
2238 /// bundle operand.
2239 BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx);
2240 const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
2241 return const_cast<CallBase *>(this)->getBundleOpInfoForOperand(OpIdx);
2242 }
2243
2244protected:
2245 /// Return the total number of values used in \p Bundles.
2246 static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
2247 unsigned Total = 0;
2248 for (auto &B : Bundles)
2249 Total += B.input_size();
2250 return Total;
2251 }
2252
2253 /// @}
2254 // End of operand bundle API.
2255
2256private:
2257 bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
2258 bool hasFnAttrOnCalledFunction(StringRef Kind) const;
2259
2260 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
2261 if (Attrs.hasFnAttribute(Kind))
20
Assuming the condition is false
21
Taking false branch
2262 return true;
2263
2264 // Operand bundles override attributes on the called function, but don't
2265 // override attributes directly present on the call instruction.
2266 if (isFnAttrDisallowedByOpBundle(Kind))
22
Calling 'CallBase::isFnAttrDisallowedByOpBundle'
25
Returning from 'CallBase::isFnAttrDisallowedByOpBundle'
26
Taking false branch
2267 return false;
2268
2269 return hasFnAttrOnCalledFunction(Kind);
27
Returning value, which participates in a condition later
2270 }
2271
2272 /// Determine whether the return value has the given attribute. Supports
2273 /// Attribute::AttrKind and StringRef as \p AttrKind types.
2274 template <typename AttrKind> bool hasRetAttrImpl(AttrKind Kind) const {
2275 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
2276 return true;
2277
2278 // Look at the callee, if available.
2279 if (const Function *F = getCalledFunction())
2280 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
2281 return false;
2282 }
2283};
2284
2285template <>
2286struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
2287
2288DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)CallBase::op_iterator CallBase::op_begin() { return OperandTraits
<CallBase>::op_begin(this); } CallBase::const_op_iterator
CallBase::op_begin() const { return OperandTraits<CallBase
>::op_begin(const_cast<CallBase*>(this)); } CallBase
::op_iterator CallBase::op_end() { return OperandTraits<CallBase
>::op_end(this); } CallBase::const_op_iterator CallBase::op_end
() const { return OperandTraits<CallBase>::op_end(const_cast
<CallBase*>(this)); } Value *CallBase::getOperand(unsigned
i_nocapture) const { ((void)0); return cast_or_null<Value
>( OperandTraits<CallBase>::op_begin(const_cast<CallBase
*>(this))[i_nocapture].get()); } void CallBase::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((void)0); OperandTraits
<CallBase>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CallBase::getNumOperands() const { return OperandTraits
<CallBase>::operands(this); } template <int Idx_nocapture
> Use &CallBase::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
CallBase::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2289
2290//===----------------------------------------------------------------------===//
2291// FuncletPadInst Class
2292//===----------------------------------------------------------------------===//
2293class FuncletPadInst : public Instruction {
2294private:
2295 FuncletPadInst(const FuncletPadInst &CPI);
2296
2297 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2298 ArrayRef<Value *> Args, unsigned Values,
2299 const Twine &NameStr, Instruction *InsertBefore);
2300 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2301 ArrayRef<Value *> Args, unsigned Values,
2302 const Twine &NameStr, BasicBlock *InsertAtEnd);
2303
2304 void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
2305
2306protected:
2307 // Note: Instruction needs to be a friend here to call cloneImpl.
2308 friend class Instruction;
2309 friend class CatchPadInst;
2310 friend class CleanupPadInst;
2311
2312 FuncletPadInst *cloneImpl() const;
2313
2314public:
2315 /// Provide fast operand accessors
2316 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2317
2318 /// getNumArgOperands - Return the number of funcletpad arguments.
2319 ///
2320 unsigned getNumArgOperands() const { return getNumOperands() - 1; }
2321
2322 /// Convenience accessors
2323
2324 /// Return the outer EH-pad this funclet is nested within.
2325 ///
2326 /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
2327 /// is a CatchPadInst.
2328 Value *getParentPad() const { return Op<-1>(); }
2329 void setParentPad(Value *ParentPad) {
2330 assert(ParentPad)((void)0);
2331 Op<-1>() = ParentPad;
2332 }
2333
2334 /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
2335 ///
2336 Value *getArgOperand(unsigned i) const { return getOperand(i); }
2337 void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
2338
2339 /// arg_operands - iteration adapter for range-for loops.
2340 op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
2341
2342 /// arg_operands - iteration adapter for range-for loops.
2343 const_op_range arg_operands() const {
2344 return const_op_range(op_begin(), op_end() - 1);
2345 }
2346
2347 // Methods for support type inquiry through isa, cast, and dyn_cast:
2348 static bool classof(const Instruction *I) { return I->isFuncletPad(); }
2349 static bool classof(const Value *V) {
2350 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2351 }
2352};
2353
2354template <>
2355struct OperandTraits<FuncletPadInst>
2356 : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
2357
2358DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)FuncletPadInst::op_iterator FuncletPadInst::op_begin() { return
OperandTraits<FuncletPadInst>::op_begin(this); } FuncletPadInst
::const_op_iterator FuncletPadInst::op_begin() const { return
OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this)); } FuncletPadInst::op_iterator FuncletPadInst
::op_end() { return OperandTraits<FuncletPadInst>::op_end
(this); } FuncletPadInst::const_op_iterator FuncletPadInst::op_end
() const { return OperandTraits<FuncletPadInst>::op_end
(const_cast<FuncletPadInst*>(this)); } Value *FuncletPadInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<FuncletPadInst>::op_begin(
const_cast<FuncletPadInst*>(this))[i_nocapture].get());
} void FuncletPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<FuncletPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
FuncletPadInst::getNumOperands() const { return OperandTraits
<FuncletPadInst>::operands(this); } template <int Idx_nocapture
> Use &FuncletPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &FuncletPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2359
2360} // end namespace llvm
2361
2362#endif // LLVM_IR_INSTRTYPES_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR/PatternMatch.h

1//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a simple and efficient mechanism for performing general
10// tree-based pattern matches on the LLVM IR. The power of these routines is
11// that it allows you to write concise patterns that are expressive and easy to
12// understand. The other major advantage of this is that it allows you to
13// trivially capture/bind elements in the pattern to variables. For example,
14// you can do something like this:
15//
16// Value *Exp = ...
17// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2)
18// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
19// m_And(m_Value(Y), m_ConstantInt(C2))))) {
20// ... Pattern is matched and variables are bound ...
21// }
22//
23// This is primarily useful to things like the instruction combiner, but can
24// also be useful for static analysis tools or code generators.
25//
26//===----------------------------------------------------------------------===//
27
28#ifndef LLVM_IR_PATTERNMATCH_H
29#define LLVM_IR_PATTERNMATCH_H
30
31#include "llvm/ADT/APFloat.h"
32#include "llvm/ADT/APInt.h"
33#include "llvm/IR/Constant.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/InstrTypes.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/IntrinsicInst.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Operator.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/Casting.h"
44#include <cstdint>
45
46namespace llvm {
47namespace PatternMatch {
48
49template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
50 return const_cast<Pattern &>(P).match(V);
36
Calling 'IntrinsicID_match::match'
40
Returning from 'IntrinsicID_match::match'
41
Returning zero, which participates in a condition later
45
Calling 'IntrinsicID_match::match'
49
Returning from 'IntrinsicID_match::match'
50
Returning zero, which participates in a condition later
51}
52
53template <typename Pattern> bool match(ArrayRef<int> Mask, const Pattern &P) {
54 return const_cast<Pattern &>(P).match(Mask);
55}
56
57template <typename SubPattern_t> struct OneUse_match {
58 SubPattern_t SubPattern;
59
60 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
61
62 template <typename OpTy> bool match(OpTy *V) {
63 return V->hasOneUse() && SubPattern.match(V);
64 }
65};
66
67template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
68 return SubPattern;
69}
70
71template <typename Class> struct class_match {
72 template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
73};
74
75/// Match an arbitrary value and ignore it.
76inline class_match<Value> m_Value() { return class_match<Value>(); }
77
78/// Match an arbitrary unary operation and ignore it.
79inline class_match<UnaryOperator> m_UnOp() {
80 return class_match<UnaryOperator>();
81}
82
83/// Match an arbitrary binary operation and ignore it.
84inline class_match<BinaryOperator> m_BinOp() {
85 return class_match<BinaryOperator>();
86}
87
88/// Matches any compare instruction and ignore it.
89inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }
90
91struct undef_match {
92 static bool check(const Value *V) {
93 if (isa<UndefValue>(V))
94 return true;
95
96 const auto *CA = dyn_cast<ConstantAggregate>(V);
97 if (!CA)
98 return false;
99
100 SmallPtrSet<const ConstantAggregate *, 8> Seen;
101 SmallVector<const ConstantAggregate *, 8> Worklist;
102
103 // Either UndefValue, PoisonValue, or an aggregate that only contains
104 // these is accepted by matcher.
105 // CheckValue returns false if CA cannot satisfy this constraint.
106 auto CheckValue = [&](const ConstantAggregate *CA) {
107 for (const Value *Op : CA->operand_values()) {
108 if (isa<UndefValue>(Op))
109 continue;
110
111 const auto *CA = dyn_cast<ConstantAggregate>(Op);
112 if (!CA)
113 return false;
114 if (Seen.insert(CA).second)
115 Worklist.emplace_back(CA);
116 }
117
118 return true;
119 };
120
121 if (!CheckValue(CA))
122 return false;
123
124 while (!Worklist.empty()) {
125 if (!CheckValue(Worklist.pop_back_val()))
126 return false;
127 }
128 return true;
129 }
130 template <typename ITy> bool match(ITy *V) { return check(V); }
131};
132
133/// Match an arbitrary undef constant. This matches poison as well.
134/// If this is an aggregate and contains a non-aggregate element that is
135/// neither undef nor poison, the aggregate is not matched.
136inline auto m_Undef() { return undef_match(); }
137
138/// Match an arbitrary poison constant.
139inline class_match<PoisonValue> m_Poison() { return class_match<PoisonValue>(); }
140
141/// Match an arbitrary Constant and ignore it.
142inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
143
144/// Match an arbitrary ConstantInt and ignore it.
145inline class_match<ConstantInt> m_ConstantInt() {
146 return class_match<ConstantInt>();
147}
148
149/// Match an arbitrary ConstantFP and ignore it.
150inline class_match<ConstantFP> m_ConstantFP() {
151 return class_match<ConstantFP>();
152}
153
154/// Match an arbitrary ConstantExpr and ignore it.
155inline class_match<ConstantExpr> m_ConstantExpr() {
156 return class_match<ConstantExpr>();
157}
158
159/// Match an arbitrary basic block value and ignore it.
160inline class_match<BasicBlock> m_BasicBlock() {
161 return class_match<BasicBlock>();
162}
163
164/// Inverting matcher
165template <typename Ty> struct match_unless {
166 Ty M;
167
168 match_unless(const Ty &Matcher) : M(Matcher) {}
169
170 template <typename ITy> bool match(ITy *V) { return !M.match(V); }
171};
172
173/// Match if the inner matcher does *NOT* match.
174template <typename Ty> inline match_unless<Ty> m_Unless(const Ty &M) {
175 return match_unless<Ty>(M);
176}
177
178/// Matching combinators
179template <typename LTy, typename RTy> struct match_combine_or {
180 LTy L;
181 RTy R;
182
183 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
184
185 template <typename ITy> bool match(ITy *V) {
186 if (L.match(V))
187 return true;
188 if (R.match(V))
189 return true;
190 return false;
191 }
192};
193
194template <typename LTy, typename RTy> struct match_combine_and {
195 LTy L;
196 RTy R;
197
198 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
199
200 template <typename ITy> bool match(ITy *V) {
201 if (L.match(V))
202 if (R.match(V))
203 return true;
204 return false;
205 }
206};
207
208/// Combine two pattern matchers matching L || R
209template <typename LTy, typename RTy>
210inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
211 return match_combine_or<LTy, RTy>(L, R);
212}
213
214/// Combine two pattern matchers matching L && R
215template <typename LTy, typename RTy>
216inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
217 return match_combine_and<LTy, RTy>(L, R);
218}
219
220struct apint_match {
221 const APInt *&Res;
222 bool AllowUndef;
223
224 apint_match(const APInt *&Res, bool AllowUndef)
225 : Res(Res), AllowUndef(AllowUndef) {}
226
227 template <typename ITy> bool match(ITy *V) {
228 if (auto *CI = dyn_cast<ConstantInt>(V)) {
229 Res = &CI->getValue();
230 return true;
231 }
232 if (V->getType()->isVectorTy())
233 if (const auto *C = dyn_cast<Constant>(V))
234 if (auto *CI = dyn_cast_or_null<ConstantInt>(
235 C->getSplatValue(AllowUndef))) {
236 Res = &CI->getValue();
237 return true;
238 }
239 return false;
240 }
241};
242// Either constexpr if or renaming ConstantFP::getValueAPF to
243// ConstantFP::getValue is needed to do it via single template
244// function for both apint/apfloat.
245struct apfloat_match {
246 const APFloat *&Res;
247 bool AllowUndef;
248
249 apfloat_match(const APFloat *&Res, bool AllowUndef)
250 : Res(Res), AllowUndef(AllowUndef) {}
251
252 template <typename ITy> bool match(ITy *V) {
253 if (auto *CI = dyn_cast<ConstantFP>(V)) {
254 Res = &CI->getValueAPF();
255 return true;
256 }
257 if (V->getType()->isVectorTy())
258 if (const auto *C = dyn_cast<Constant>(V))
259 if (auto *CI = dyn_cast_or_null<ConstantFP>(
260 C->getSplatValue(AllowUndef))) {
261 Res = &CI->getValueAPF();
262 return true;
263 }
264 return false;
265 }
266};
267
268/// Match a ConstantInt or splatted ConstantVector, binding the
269/// specified pointer to the contained APInt.
270inline apint_match m_APInt(const APInt *&Res) {
271 // Forbid undefs by default to maintain previous behavior.
272 return apint_match(Res, /* AllowUndef */ false);
273}
274
275/// Match APInt while allowing undefs in splat vector constants.
276inline apint_match m_APIntAllowUndef(const APInt *&Res) {
277 return apint_match(Res, /* AllowUndef */ true);
278}
279
280/// Match APInt while forbidding undefs in splat vector constants.
281inline apint_match m_APIntForbidUndef(const APInt *&Res) {
282 return apint_match(Res, /* AllowUndef */ false);
283}
284
285/// Match a ConstantFP or splatted ConstantVector, binding the
286/// specified pointer to the contained APFloat.
287inline apfloat_match m_APFloat(const APFloat *&Res) {
288 // Forbid undefs by default to maintain previous behavior.
289 return apfloat_match(Res, /* AllowUndef */ false);
290}
291
292/// Match APFloat while allowing undefs in splat vector constants.
293inline apfloat_match m_APFloatAllowUndef(const APFloat *&Res) {
294 return apfloat_match(Res, /* AllowUndef */ true);
295}
296
297/// Match APFloat while forbidding undefs in splat vector constants.
298inline apfloat_match m_APFloatForbidUndef(const APFloat *&Res) {
299 return apfloat_match(Res, /* AllowUndef */ false);
300}
301
302template <int64_t Val> struct constantint_match {
303 template <typename ITy> bool match(ITy *V) {
304 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
305 const APInt &CIV = CI->getValue();
306 if (Val >= 0)
307 return CIV == static_cast<uint64_t>(Val);
308 // If Val is negative, and CI is shorter than it, truncate to the right
309 // number of bits. If it is larger, then we have to sign extend. Just
310 // compare their negated values.
311 return -CIV == -Val;
312 }
313 return false;
314 }
315};
316
317/// Match a ConstantInt with a specific value.
318template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
319 return constantint_match<Val>();
320}
321
322/// This helper class is used to match constant scalars, vector splats,
323/// and fixed width vectors that satisfy a specified predicate.
324/// For fixed width vector constants, undefined elements are ignored.
325template <typename Predicate, typename ConstantVal>
326struct cstval_pred_ty : public Predicate {
327 template <typename ITy> bool match(ITy *V) {
328 if (const auto *CV = dyn_cast<ConstantVal>(V))
329 return this->isValue(CV->getValue());
330 if (const auto *VTy = dyn_cast<VectorType>(V->getType())) {
331 if (const auto *C = dyn_cast<Constant>(V)) {
332 if (const auto *CV = dyn_cast_or_null<ConstantVal>(C->getSplatValue()))
333 return this->isValue(CV->getValue());
334
335 // Number of elements of a scalable vector unknown at compile time
336 auto *FVTy = dyn_cast<FixedVectorType>(VTy);
337 if (!FVTy)
338 return false;
339
340 // Non-splat vector constant: check each element for a match.
341 unsigned NumElts = FVTy->getNumElements();
342 assert(NumElts != 0 && "Constant vector with no elements?")((void)0);
343 bool HasNonUndefElements = false;
344 for (unsigned i = 0; i != NumElts; ++i) {
345 Constant *Elt = C->getAggregateElement(i);
346 if (!Elt)
347 return false;
348 if (isa<UndefValue>(Elt))
349 continue;
350 auto *CV = dyn_cast<ConstantVal>(Elt);
351 if (!CV || !this->isValue(CV->getValue()))
352 return false;
353 HasNonUndefElements = true;
354 }
355 return HasNonUndefElements;
356 }
357 }
358 return false;
359 }
360};
361
362/// specialization of cstval_pred_ty for ConstantInt
363template <typename Predicate>
364using cst_pred_ty = cstval_pred_ty<Predicate, ConstantInt>;
365
366/// specialization of cstval_pred_ty for ConstantFP
367template <typename Predicate>
368using cstfp_pred_ty = cstval_pred_ty<Predicate, ConstantFP>;
369
370/// This helper class is used to match scalar and vector constants that
371/// satisfy a specified predicate, and bind them to an APInt.
372template <typename Predicate> struct api_pred_ty : public Predicate {
373 const APInt *&Res;
374
375 api_pred_ty(const APInt *&R) : Res(R) {}
376
377 template <typename ITy> bool match(ITy *V) {
378 if (const auto *CI = dyn_cast<ConstantInt>(V))
379 if (this->isValue(CI->getValue())) {
380 Res = &CI->getValue();
381 return true;
382 }
383 if (V->getType()->isVectorTy())
384 if (const auto *C = dyn_cast<Constant>(V))
385 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
386 if (this->isValue(CI->getValue())) {
387 Res = &CI->getValue();
388 return true;
389 }
390
391 return false;
392 }
393};
394
395/// This helper class is used to match scalar and vector constants that
396/// satisfy a specified predicate, and bind them to an APFloat.
397/// Undefs are allowed in splat vector constants.
398template <typename Predicate> struct apf_pred_ty : public Predicate {
399 const APFloat *&Res;
400
401 apf_pred_ty(const APFloat *&R) : Res(R) {}
402
403 template <typename ITy> bool match(ITy *V) {
404 if (const auto *CI = dyn_cast<ConstantFP>(V))
405 if (this->isValue(CI->getValue())) {
406 Res = &CI->getValue();
407 return true;
408 }
409 if (V->getType()->isVectorTy())
410 if (const auto *C = dyn_cast<Constant>(V))
411 if (auto *CI = dyn_cast_or_null<ConstantFP>(
412 C->getSplatValue(/* AllowUndef */ true)))
413 if (this->isValue(CI->getValue())) {
414 Res = &CI->getValue();
415 return true;
416 }
417
418 return false;
419 }
420};
421
422///////////////////////////////////////////////////////////////////////////////
423//
424// Encapsulate constant value queries for use in templated predicate matchers.
425// This allows checking if constants match using compound predicates and works
426// with vector constants, possibly with relaxed constraints. For example, ignore
427// undef values.
428//
429///////////////////////////////////////////////////////////////////////////////
430
431struct is_any_apint {
432 bool isValue(const APInt &C) { return true; }
433};
434/// Match an integer or vector with any integral constant.
435/// For vectors, this includes constants with undefined elements.
436inline cst_pred_ty<is_any_apint> m_AnyIntegralConstant() {
437 return cst_pred_ty<is_any_apint>();
438}
439
440struct is_all_ones {
441 bool isValue(const APInt &C) { return C.isAllOnesValue(); }
442};
443/// Match an integer or vector with all bits set.
444/// For vectors, this includes constants with undefined elements.
445inline cst_pred_ty<is_all_ones> m_AllOnes() {
446 return cst_pred_ty<is_all_ones>();
447}
448
449struct is_maxsignedvalue {
450 bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
451};
452/// Match an integer or vector with values having all bits except for the high
453/// bit set (0x7f...).
454/// For vectors, this includes constants with undefined elements.
455inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() {
456 return cst_pred_ty<is_maxsignedvalue>();
457}
458inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) {
459 return V;
460}
461
462struct is_negative {
463 bool isValue(const APInt &C) { return C.isNegative(); }
464};
465/// Match an integer or vector of negative values.
466/// For vectors, this includes constants with undefined elements.
467inline cst_pred_ty<is_negative> m_Negative() {
468 return cst_pred_ty<is_negative>();
469}
470inline api_pred_ty<is_negative> m_Negative(const APInt *&V) {
471 return V;
472}
473
474struct is_nonnegative {
475 bool isValue(const APInt &C) { return C.isNonNegative(); }
476};
477/// Match an integer or vector of non-negative values.
478/// For vectors, this includes constants with undefined elements.
479inline cst_pred_ty<is_nonnegative> m_NonNegative() {
480 return cst_pred_ty<is_nonnegative>();
481}
482inline api_pred_ty<is_nonnegative> m_NonNegative(const APInt *&V) {
483 return V;
484}
485
486struct is_strictlypositive {
487 bool isValue(const APInt &C) { return C.isStrictlyPositive(); }
488};
489/// Match an integer or vector of strictly positive values.
490/// For vectors, this includes constants with undefined elements.
491inline cst_pred_ty<is_strictlypositive> m_StrictlyPositive() {
492 return cst_pred_ty<is_strictlypositive>();
493}
494inline api_pred_ty<is_strictlypositive> m_StrictlyPositive(const APInt *&V) {
495 return V;
496}
497
498struct is_nonpositive {
499 bool isValue(const APInt &C) { return C.isNonPositive(); }
500};
501/// Match an integer or vector of non-positive values.
502/// For vectors, this includes constants with undefined elements.
503inline cst_pred_ty<is_nonpositive> m_NonPositive() {
504 return cst_pred_ty<is_nonpositive>();
505}
506inline api_pred_ty<is_nonpositive> m_NonPositive(const APInt *&V) { return V; }
507
508struct is_one {
509 bool isValue(const APInt &C) { return C.isOneValue(); }
510};
511/// Match an integer 1 or a vector with all elements equal to 1.
512/// For vectors, this includes constants with undefined elements.
513inline cst_pred_ty<is_one> m_One() {
514 return cst_pred_ty<is_one>();
515}
516
517struct is_zero_int {
518 bool isValue(const APInt &C) { return C.isNullValue(); }
519};
520/// Match an integer 0 or a vector with all elements equal to 0.
521/// For vectors, this includes constants with undefined elements.
522inline cst_pred_ty<is_zero_int> m_ZeroInt() {
523 return cst_pred_ty<is_zero_int>();
524}
525
526struct is_zero {
527 template <typename ITy> bool match(ITy *V) {
528 auto *C = dyn_cast<Constant>(V);
529 // FIXME: this should be able to do something for scalable vectors
530 return C && (C->isNullValue() || cst_pred_ty<is_zero_int>().match(C));
531 }
532};
533/// Match any null constant or a vector with all elements equal to 0.
534/// For vectors, this includes constants with undefined elements.
535inline is_zero m_Zero() {
536 return is_zero();
537}
538
539struct is_power2 {
540 bool isValue(const APInt &C) { return C.isPowerOf2(); }
541};
542/// Match an integer or vector power-of-2.
543/// For vectors, this includes constants with undefined elements.
544inline cst_pred_ty<is_power2> m_Power2() {
545 return cst_pred_ty<is_power2>();
546}
547inline api_pred_ty<is_power2> m_Power2(const APInt *&V) {
548 return V;
549}
550
551struct is_negated_power2 {
552 bool isValue(const APInt &C) { return (-C).isPowerOf2(); }
553};
554/// Match a integer or vector negated power-of-2.
555/// For vectors, this includes constants with undefined elements.
556inline cst_pred_ty<is_negated_power2> m_NegatedPower2() {
557 return cst_pred_ty<is_negated_power2>();
558}
559inline api_pred_ty<is_negated_power2> m_NegatedPower2(const APInt *&V) {
560 return V;
561}
562
563struct is_power2_or_zero {
564 bool isValue(const APInt &C) { return !C || C.isPowerOf2(); }
565};
566/// Match an integer or vector of 0 or power-of-2 values.
567/// For vectors, this includes constants with undefined elements.
568inline cst_pred_ty<is_power2_or_zero> m_Power2OrZero() {
569 return cst_pred_ty<is_power2_or_zero>();
570}
571inline api_pred_ty<is_power2_or_zero> m_Power2OrZero(const APInt *&V) {
572 return V;
573}
574
575struct is_sign_mask {
576 bool isValue(const APInt &C) { return C.isSignMask(); }
577};
578/// Match an integer or vector with only the sign bit(s) set.
579/// For vectors, this includes constants with undefined elements.
580inline cst_pred_ty<is_sign_mask> m_SignMask() {
581 return cst_pred_ty<is_sign_mask>();
582}
583
584struct is_lowbit_mask {
585 bool isValue(const APInt &C) { return C.isMask(); }
586};
587/// Match an integer or vector with only the low bit(s) set.
588/// For vectors, this includes constants with undefined elements.
589inline cst_pred_ty<is_lowbit_mask> m_LowBitMask() {
590 return cst_pred_ty<is_lowbit_mask>();
591}
592
593struct icmp_pred_with_threshold {
594 ICmpInst::Predicate Pred;
595 const APInt *Thr;
596 bool isValue(const APInt &C) {
597 switch (Pred) {
598 case ICmpInst::Predicate::ICMP_EQ:
599 return C.eq(*Thr);
600 case ICmpInst::Predicate::ICMP_NE:
601 return C.ne(*Thr);
602 case ICmpInst::Predicate::ICMP_UGT:
603 return C.ugt(*Thr);
604 case ICmpInst::Predicate::ICMP_UGE:
605 return C.uge(*Thr);
606 case ICmpInst::Predicate::ICMP_ULT:
607 return C.ult(*Thr);
608 case ICmpInst::Predicate::ICMP_ULE:
609 return C.ule(*Thr);
610 case ICmpInst::Predicate::ICMP_SGT:
611 return C.sgt(*Thr);
612 case ICmpInst::Predicate::ICMP_SGE:
613 return C.sge(*Thr);
614 case ICmpInst::Predicate::ICMP_SLT:
615 return C.slt(*Thr);
616 case ICmpInst::Predicate::ICMP_SLE:
617 return C.sle(*Thr);
618 default:
619 llvm_unreachable("Unhandled ICmp predicate")__builtin_unreachable();
620 }
621 }
622};
623/// Match an integer or vector with every element comparing 'pred' (eg/ne/...)
624/// to Threshold. For vectors, this includes constants with undefined elements.
625inline cst_pred_ty<icmp_pred_with_threshold>
626m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold) {
627 cst_pred_ty<icmp_pred_with_threshold> P;
628 P.Pred = Predicate;
629 P.Thr = &Threshold;
630 return P;
631}
632
633struct is_nan {
634 bool isValue(const APFloat &C) { return C.isNaN(); }
635};
636/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
637/// For vectors, this includes constants with undefined elements.
638inline cstfp_pred_ty<is_nan> m_NaN() {
639 return cstfp_pred_ty<is_nan>();
640}
641
642struct is_nonnan {
643 bool isValue(const APFloat &C) { return !C.isNaN(); }
644};
645/// Match a non-NaN FP constant.
646/// For vectors, this includes constants with undefined elements.
647inline cstfp_pred_ty<is_nonnan> m_NonNaN() {
648 return cstfp_pred_ty<is_nonnan>();
649}
650
651struct is_inf {
652 bool isValue(const APFloat &C) { return C.isInfinity(); }
653};
654/// Match a positive or negative infinity FP constant.
655/// For vectors, this includes constants with undefined elements.
656inline cstfp_pred_ty<is_inf> m_Inf() {
657 return cstfp_pred_ty<is_inf>();
658}
659
660struct is_noninf {
661 bool isValue(const APFloat &C) { return !C.isInfinity(); }
662};
663/// Match a non-infinity FP constant, i.e. finite or NaN.
664/// For vectors, this includes constants with undefined elements.
665inline cstfp_pred_ty<is_noninf> m_NonInf() {
666 return cstfp_pred_ty<is_noninf>();
667}
668
669struct is_finite {
670 bool isValue(const APFloat &C) { return C.isFinite(); }
671};
672/// Match a finite FP constant, i.e. not infinity or NaN.
673/// For vectors, this includes constants with undefined elements.
674inline cstfp_pred_ty<is_finite> m_Finite() {
675 return cstfp_pred_ty<is_finite>();
676}
677inline apf_pred_ty<is_finite> m_Finite(const APFloat *&V) { return V; }
678
679struct is_finitenonzero {
680 bool isValue(const APFloat &C) { return C.isFiniteNonZero(); }
681};
682/// Match a finite non-zero FP constant.
683/// For vectors, this includes constants with undefined elements.
684inline cstfp_pred_ty<is_finitenonzero> m_FiniteNonZero() {
685 return cstfp_pred_ty<is_finitenonzero>();
686}
687inline apf_pred_ty<is_finitenonzero> m_FiniteNonZero(const APFloat *&V) {
688 return V;
689}
690
691struct is_any_zero_fp {
692 bool isValue(const APFloat &C) { return C.isZero(); }
693};
694/// Match a floating-point negative zero or positive zero.
695/// For vectors, this includes constants with undefined elements.
696inline cstfp_pred_ty<is_any_zero_fp> m_AnyZeroFP() {
697 return cstfp_pred_ty<is_any_zero_fp>();
698}
699
700struct is_pos_zero_fp {
701 bool isValue(const APFloat &C) { return C.isPosZero(); }
702};
703/// Match a floating-point positive zero.
704/// For vectors, this includes constants with undefined elements.
705inline cstfp_pred_ty<is_pos_zero_fp> m_PosZeroFP() {
706 return cstfp_pred_ty<is_pos_zero_fp>();
707}
708
709struct is_neg_zero_fp {
710 bool isValue(const APFloat &C) { return C.isNegZero(); }
711};
712/// Match a floating-point negative zero.
713/// For vectors, this includes constants with undefined elements.
714inline cstfp_pred_ty<is_neg_zero_fp> m_NegZeroFP() {
715 return cstfp_pred_ty<is_neg_zero_fp>();
716}
717
718struct is_non_zero_fp {
719 bool isValue(const APFloat &C) { return C.isNonZero(); }
720};
721/// Match a floating-point non-zero.
722/// For vectors, this includes constants with undefined elements.
723inline cstfp_pred_ty<is_non_zero_fp> m_NonZeroFP() {
724 return cstfp_pred_ty<is_non_zero_fp>();
725}
726
727///////////////////////////////////////////////////////////////////////////////
728
729template <typename Class> struct bind_ty {
730 Class *&VR;
731
732 bind_ty(Class *&V) : VR(V) {}
733
734 template <typename ITy> bool match(ITy *V) {
735 if (auto *CV = dyn_cast<Class>(V)) {
736 VR = CV;
737 return true;
738 }
739 return false;
740 }
741};
742
743/// Match a value, capturing it if we match.
744inline bind_ty<Value> m_Value(Value *&V) { return V; }
745inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
746
747/// Match an instruction, capturing it if we match.
748inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
749/// Match a unary operator, capturing it if we match.
750inline bind_ty<UnaryOperator> m_UnOp(UnaryOperator *&I) { return I; }
751/// Match a binary operator, capturing it if we match.
752inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
753/// Match a with overflow intrinsic, capturing it if we match.
754inline bind_ty<WithOverflowInst> m_WithOverflowInst(WithOverflowInst *&I) { return I; }
755inline bind_ty<const WithOverflowInst>
756m_WithOverflowInst(const WithOverflowInst *&I) {
757 return I;
758}
759
760/// Match a Constant, capturing the value if we match.
761inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
762
763/// Match a ConstantInt, capturing the value if we match.
764inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
765
766/// Match a ConstantFP, capturing the value if we match.
767inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
768
769/// Match a ConstantExpr, capturing the value if we match.
770inline bind_ty<ConstantExpr> m_ConstantExpr(ConstantExpr *&C) { return C; }
771
772/// Match a basic block value, capturing it if we match.
773inline bind_ty<BasicBlock> m_BasicBlock(BasicBlock *&V) { return V; }
774inline bind_ty<const BasicBlock> m_BasicBlock(const BasicBlock *&V) {
775 return V;
776}
777
778/// Match an arbitrary immediate Constant and ignore it.
779inline match_combine_and<class_match<Constant>,
780 match_unless<class_match<ConstantExpr>>>
781m_ImmConstant() {
782 return m_CombineAnd(m_Constant(), m_Unless(m_ConstantExpr()));
783}
784
785/// Match an immediate Constant, capturing the value if we match.
786inline match_combine_and<bind_ty<Constant>,
787 match_unless<class_match<ConstantExpr>>>
788m_ImmConstant(Constant *&C) {
789 return m_CombineAnd(m_Constant(C), m_Unless(m_ConstantExpr()));
790}
791
792/// Match a specified Value*.
793struct specificval_ty {
794 const Value *Val;
795
796 specificval_ty(const Value *V) : Val(V) {}
797
798 template <typename ITy> bool match(ITy *V) { return V == Val; }
799};
800
801/// Match if we have a specific specified value.
802inline specificval_ty m_Specific(const Value *V) { return V; }
803
804/// Stores a reference to the Value *, not the Value * itself,
805/// thus can be used in commutative matchers.
806template <typename Class> struct deferredval_ty {
807 Class *const &Val;
808
809 deferredval_ty(Class *const &V) : Val(V) {}
810
811 template <typename ITy> bool match(ITy *const V) { return V == Val; }
812};
813
814/// Like m_Specific(), but works if the specific value to match is determined
815/// as part of the same match() expression. For example:
816/// m_Add(m_Value(X), m_Specific(X)) is incorrect, because m_Specific() will
817/// bind X before the pattern match starts.
818/// m_Add(m_Value(X), m_Deferred(X)) is correct, and will check against
819/// whichever value m_Value(X) populated.
820inline deferredval_ty<Value> m_Deferred(Value *const &V) { return V; }
821inline deferredval_ty<const Value> m_Deferred(const Value *const &V) {
822 return V;
823}
824
825/// Match a specified floating point value or vector of all elements of
826/// that value.
827struct specific_fpval {
828 double Val;
829