Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/GenericDomTree.h
Warning:line 494, column 12
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SIFixSGPRCopies.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp

1//===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Copies from VGPR to SGPR registers are illegal and the register coalescer
11/// will sometimes generate these illegal copies in situations like this:
12///
13/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
14///
15/// BB0:
16/// %0 <sgpr> = SCALAR_INST
17/// %1 <vsrc> = COPY %0 <sgpr>
18/// ...
19/// BRANCH %cond BB1, BB2
20/// BB1:
21/// %2 <vgpr> = VECTOR_INST
22/// %3 <vsrc> = COPY %2 <vgpr>
23/// BB2:
24/// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
25/// %5 <vgpr> = VECTOR_INST %4 <vsrc>
26///
27///
28/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
29/// code will look like this:
30///
31/// BB0:
32/// %0 <sgpr> = SCALAR_INST
33/// ...
34/// BRANCH %cond BB1, BB2
35/// BB1:
36/// %2 <vgpr> = VECTOR_INST
37/// %3 <vsrc> = COPY %2 <vgpr>
38/// BB2:
39/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
40/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
41///
42/// Now that the result of the PHI instruction is an SGPR, the register
43/// allocator is now forced to constrain the register class of %3 to
44/// <sgpr> so we end up with final code like this:
45///
46/// BB0:
47/// %0 <sgpr> = SCALAR_INST
48/// ...
49/// BRANCH %cond BB1, BB2
50/// BB1:
51/// %2 <vgpr> = VECTOR_INST
52/// %3 <sgpr> = COPY %2 <vgpr>
53/// BB2:
54/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
55/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
56///
57/// Now this code contains an illegal copy from a VGPR to an SGPR.
58///
59/// In order to avoid this problem, this pass searches for PHI instructions
60/// which define a <vsrc> register and constrains its definition class to
61/// <vgpr> if the user of the PHI's definition register is a vector instruction.
62/// If the PHI's definition class is constrained to <vgpr> then the coalescer
63/// will be unable to perform the COPY removal from the above example which
64/// ultimately led to the creation of an illegal COPY.
65//===----------------------------------------------------------------------===//
66
67#include "AMDGPU.h"
68#include "GCNSubtarget.h"
69#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
70#include "llvm/CodeGen/MachineDominators.h"
71#include "llvm/InitializePasses.h"
72#include "llvm/Target/TargetMachine.h"
73
74using namespace llvm;
75
76#define DEBUG_TYPE"si-fix-sgpr-copies" "si-fix-sgpr-copies"
77
78static cl::opt<bool> EnableM0Merge(
79 "amdgpu-enable-merge-m0",
80 cl::desc("Merge and hoist M0 initializations"),
81 cl::init(true));
82
83namespace {
84
85class SIFixSGPRCopies : public MachineFunctionPass {
86 MachineDominatorTree *MDT;
87
88public:
89 static char ID;
90
91 MachineRegisterInfo *MRI;
92 const SIRegisterInfo *TRI;
93 const SIInstrInfo *TII;
94
95 SIFixSGPRCopies() : MachineFunctionPass(ID) {}
96
97 bool runOnMachineFunction(MachineFunction &MF) override;
98
99 MachineBasicBlock *processPHINode(MachineInstr &MI);
100
101 StringRef getPassName() const override { return "SI Fix SGPR copies"; }
102
103 void getAnalysisUsage(AnalysisUsage &AU) const override {
104 AU.addRequired<MachineDominatorTree>();
105 AU.addPreserved<MachineDominatorTree>();
106 AU.setPreservesCFG();
107 MachineFunctionPass::getAnalysisUsage(AU);
108 }
109};
110
111} // end anonymous namespace
112
113INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,static void *initializeSIFixSGPRCopiesPassOnce(PassRegistry &
Registry) {
114 "SI Fix SGPR copies", false, false)static void *initializeSIFixSGPRCopiesPassOnce(PassRegistry &
Registry) {
115INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)initializeMachineDominatorTreePass(Registry);
116INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,PassInfo *PI = new PassInfo( "SI Fix SGPR copies", "si-fix-sgpr-copies"
, &SIFixSGPRCopies::ID, PassInfo::NormalCtor_t(callDefaultCtor
<SIFixSGPRCopies>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeSIFixSGPRCopiesPassFlag
; void llvm::initializeSIFixSGPRCopiesPass(PassRegistry &
Registry) { llvm::call_once(InitializeSIFixSGPRCopiesPassFlag
, initializeSIFixSGPRCopiesPassOnce, std::ref(Registry)); }
117 "SI Fix SGPR copies", false, false)PassInfo *PI = new PassInfo( "SI Fix SGPR copies", "si-fix-sgpr-copies"
, &SIFixSGPRCopies::ID, PassInfo::NormalCtor_t(callDefaultCtor
<SIFixSGPRCopies>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeSIFixSGPRCopiesPassFlag
; void llvm::initializeSIFixSGPRCopiesPass(PassRegistry &
Registry) { llvm::call_once(InitializeSIFixSGPRCopiesPassFlag
, initializeSIFixSGPRCopiesPassOnce, std::ref(Registry)); }
118
119char SIFixSGPRCopies::ID = 0;
120
121char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
122
123FunctionPass *llvm::createSIFixSGPRCopiesPass() {
124 return new SIFixSGPRCopies();
125}
126
127static bool hasVectorOperands(const MachineInstr &MI,
128 const SIRegisterInfo *TRI) {
129 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
130 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
131 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual())
132 continue;
133
134 if (TRI->hasVectorRegisters(MRI.getRegClass(MI.getOperand(i).getReg())))
135 return true;
136 }
137 return false;
138}
139
140static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
141getCopyRegClasses(const MachineInstr &Copy,
142 const SIRegisterInfo &TRI,
143 const MachineRegisterInfo &MRI) {
144 Register DstReg = Copy.getOperand(0).getReg();
145 Register SrcReg = Copy.getOperand(1).getReg();
146
147 const TargetRegisterClass *SrcRC = SrcReg.isVirtual()
148 ? MRI.getRegClass(SrcReg)
149 : TRI.getPhysRegClass(SrcReg);
150
151 // We don't really care about the subregister here.
152 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
153
154 const TargetRegisterClass *DstRC = DstReg.isVirtual()
155 ? MRI.getRegClass(DstReg)
156 : TRI.getPhysRegClass(DstReg);
157
158 return std::make_pair(SrcRC, DstRC);
159}
160
161static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
162 const TargetRegisterClass *DstRC,
163 const SIRegisterInfo &TRI) {
164 return SrcRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(DstRC) &&
165 TRI.hasVectorRegisters(SrcRC);
166}
167
168static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
169 const TargetRegisterClass *DstRC,
170 const SIRegisterInfo &TRI) {
171 return DstRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(SrcRC) &&
172 TRI.hasVectorRegisters(DstRC);
173}
174
175static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
176 const SIRegisterInfo *TRI,
177 const SIInstrInfo *TII) {
178 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
179 auto &Src = MI.getOperand(1);
180 Register DstReg = MI.getOperand(0).getReg();
181 Register SrcReg = Src.getReg();
182 if (!SrcReg.isVirtual() || !DstReg.isVirtual())
183 return false;
184
185 for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
186 const auto *UseMI = MO.getParent();
187 if (UseMI == &MI)
188 continue;
189 if (MO.isDef() || UseMI->getParent() != MI.getParent() ||
190 UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END)
191 return false;
192
193 unsigned OpIdx = UseMI->getOperandNo(&MO);
194 if (OpIdx >= UseMI->getDesc().getNumOperands() ||
195 !TII->isOperandLegal(*UseMI, OpIdx, &Src))
196 return false;
197 }
198 // Change VGPR to SGPR destination.
199 MRI.setRegClass(DstReg, TRI->getEquivalentSGPRClass(MRI.getRegClass(DstReg)));
200 return true;
201}
202
203// Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
204//
205// SGPRx = ...
206// SGPRy = REG_SEQUENCE SGPRx, sub0 ...
207// VGPRz = COPY SGPRy
208//
209// ==>
210//
211// VGPRx = COPY SGPRx
212// VGPRz = REG_SEQUENCE VGPRx, sub0
213//
214// This exposes immediate folding opportunities when materializing 64-bit
215// immediates.
216static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
217 const SIRegisterInfo *TRI,
218 const SIInstrInfo *TII,
219 MachineRegisterInfo &MRI) {
220 assert(MI.isRegSequence())((void)0);
221
222 Register DstReg = MI.getOperand(0).getReg();
223 if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
224 return false;
225
226 if (!MRI.hasOneUse(DstReg))
227 return false;
228
229 MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
230 if (!CopyUse.isCopy())
231 return false;
232
233 // It is illegal to have vreg inputs to a physreg defining reg_sequence.
234 if (CopyUse.getOperand(0).getReg().isPhysical())
235 return false;
236
237 const TargetRegisterClass *SrcRC, *DstRC;
238 std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
239
240 if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
241 return false;
242
243 if (tryChangeVGPRtoSGPRinCopy(CopyUse, TRI, TII))
244 return true;
245
246 // TODO: Could have multiple extracts?
247 unsigned SubReg = CopyUse.getOperand(1).getSubReg();
248 if (SubReg != AMDGPU::NoSubRegister)
249 return false;
250
251 MRI.setRegClass(DstReg, DstRC);
252
253 // SGPRx = ...
254 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
255 // VGPRz = COPY SGPRy
256
257 // =>
258 // VGPRx = COPY SGPRx
259 // VGPRz = REG_SEQUENCE VGPRx, sub0
260
261 MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
262 bool IsAGPR = TRI->hasAGPRs(DstRC);
263
264 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
265 Register SrcReg = MI.getOperand(I).getReg();
266 unsigned SrcSubReg = MI.getOperand(I).getSubReg();
267
268 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
269 assert(TRI->isSGPRClass(SrcRC) &&((void)0)
270 "Expected SGPR REG_SEQUENCE to only have SGPR inputs")((void)0);
271
272 SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
273 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
274
275 Register TmpReg = MRI.createVirtualRegister(NewSrcRC);
276
277 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
278 TmpReg)
279 .add(MI.getOperand(I));
280
281 if (IsAGPR) {
282 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentAGPRClass(SrcRC);
283 Register TmpAReg = MRI.createVirtualRegister(NewSrcRC);
284 unsigned Opc = NewSrcRC == &AMDGPU::AGPR_32RegClass ?
285 AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::COPY;
286 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(Opc),
287 TmpAReg)
288 .addReg(TmpReg, RegState::Kill);
289 TmpReg = TmpAReg;
290 }
291
292 MI.getOperand(I).setReg(TmpReg);
293 }
294
295 CopyUse.eraseFromParent();
296 return true;
297}
298
299static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
300 const MachineInstr *MoveImm,
301 const SIInstrInfo *TII,
302 unsigned &SMovOp,
303 int64_t &Imm) {
304 if (Copy->getOpcode() != AMDGPU::COPY)
305 return false;
306
307 if (!MoveImm->isMoveImmediate())
308 return false;
309
310 const MachineOperand *ImmOp =
311 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
312 if (!ImmOp->isImm())
313 return false;
314
315 // FIXME: Handle copies with sub-regs.
316 if (Copy->getOperand(0).getSubReg())
317 return false;
318
319 switch (MoveImm->getOpcode()) {
320 default:
321 return false;
322 case AMDGPU::V_MOV_B32_e32:
323 SMovOp = AMDGPU::S_MOV_B32;
324 break;
325 case AMDGPU::V_MOV_B64_PSEUDO:
326 SMovOp = AMDGPU::S_MOV_B64;
327 break;
328 }
329 Imm = ImmOp->getImm();
330 return true;
331}
332
333template <class UnaryPredicate>
334bool searchPredecessors(const MachineBasicBlock *MBB,
335 const MachineBasicBlock *CutOff,
336 UnaryPredicate Predicate) {
337 if (MBB == CutOff)
338 return false;
339
340 DenseSet<const MachineBasicBlock *> Visited;
341 SmallVector<MachineBasicBlock *, 4> Worklist(MBB->predecessors());
342
343 while (!Worklist.empty()) {
344 MachineBasicBlock *MBB = Worklist.pop_back_val();
345
346 if (!Visited.insert(MBB).second)
347 continue;
348 if (MBB == CutOff)
349 continue;
350 if (Predicate(MBB))
351 return true;
352
353 Worklist.append(MBB->pred_begin(), MBB->pred_end());
354 }
355
356 return false;
357}
358
359// Checks if there is potential path From instruction To instruction.
360// If CutOff is specified and it sits in between of that path we ignore
361// a higher portion of the path and report it is not reachable.
362static bool isReachable(const MachineInstr *From,
363 const MachineInstr *To,
364 const MachineBasicBlock *CutOff,
365 MachineDominatorTree &MDT) {
366 if (MDT.dominates(From, To))
367 return true;
368
369 const MachineBasicBlock *MBBFrom = From->getParent();
370 const MachineBasicBlock *MBBTo = To->getParent();
371
372 // Do predecessor search.
373 // We should almost never get here since we do not usually produce M0 stores
374 // other than -1.
375 return searchPredecessors(MBBTo, CutOff, [MBBFrom]
376 (const MachineBasicBlock *MBB) { return MBB == MBBFrom; });
377}
378
379// Return the first non-prologue instruction in the block.
380static MachineBasicBlock::iterator
381getFirstNonPrologue(MachineBasicBlock *MBB, const TargetInstrInfo *TII) {
382 MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
383 while (I != MBB->end() && TII->isBasicBlockPrologue(*I))
384 ++I;
385
386 return I;
387}
388
389// Hoist and merge identical SGPR initializations into a common predecessor.
390// This is intended to combine M0 initializations, but can work with any
391// SGPR. A VGPR cannot be processed since we cannot guarantee vector
392// executioon.
393static bool hoistAndMergeSGPRInits(unsigned Reg,
394 const MachineRegisterInfo &MRI,
395 const TargetRegisterInfo *TRI,
396 MachineDominatorTree &MDT,
397 const TargetInstrInfo *TII) {
398 // List of inits by immediate value.
399 using InitListMap = std::map<unsigned, std::list<MachineInstr *>>;
400 InitListMap Inits;
401 // List of clobbering instructions.
402 SmallVector<MachineInstr*, 8> Clobbers;
403 // List of instructions marked for deletion.
404 SmallSet<MachineInstr*, 8> MergedInstrs;
405
406 bool Changed = false;
407
408 for (auto &MI : MRI.def_instructions(Reg)) {
409 MachineOperand *Imm = nullptr;
410 for (auto &MO : MI.operands()) {
411 if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
412 (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
413 Imm = nullptr;
414 break;
415 } else if (MO.isImm())
416 Imm = &MO;
417 }
418 if (Imm)
419 Inits[Imm->getImm()].push_front(&MI);
420 else
421 Clobbers.push_back(&MI);
422 }
423
424 for (auto &Init : Inits) {
425 auto &Defs = Init.second;
426
427 for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
7
Loop condition is true. Entering loop body
428 MachineInstr *MI1 = *I1;
429
430 for (auto I2 = std::next(I1); I2 != E; ) {
8
Loop condition is true. Entering loop body
431 MachineInstr *MI2 = *I2;
432
433 // Check any possible interference
434 auto interferes = [&](MachineBasicBlock::iterator From,
435 MachineBasicBlock::iterator To) -> bool {
436
437 assert(MDT.dominates(&*To, &*From))((void)0);
438
439 auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool {
440 const MachineBasicBlock *MBBFrom = From->getParent();
441 const MachineBasicBlock *MBBTo = To->getParent();
442 bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT);
443 bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT);
444 if (!MayClobberFrom && !MayClobberTo)
445 return false;
446 if ((MayClobberFrom && !MayClobberTo) ||
447 (!MayClobberFrom && MayClobberTo))
448 return true;
449 // Both can clobber, this is not an interference only if both are
450 // dominated by Clobber and belong to the same block or if Clobber
451 // properly dominates To, given that To >> From, so it dominates
452 // both and located in a common dominator.
453 return !((MBBFrom == MBBTo &&
454 MDT.dominates(Clobber, &*From) &&
455 MDT.dominates(Clobber, &*To)) ||
456 MDT.properlyDominates(Clobber->getParent(), MBBTo));
457 };
458
459 return (llvm::any_of(Clobbers, interferes)) ||
460 (llvm::any_of(Inits, [&](InitListMap::value_type &C) {
461 return C.first != Init.first &&
462 llvm::any_of(C.second, interferes);
463 }));
464 };
465
466 if (MDT.dominates(MI1, MI2)) {
9
Taking false branch
467 if (!interferes(MI2, MI1)) {
468 LLVM_DEBUG(dbgs()do { } while (false)
469 << "Erasing from "do { } while (false)
470 << printMBBReference(*MI2->getParent()) << " " << *MI2)do { } while (false);
471 MergedInstrs.insert(MI2);
472 Changed = true;
473 ++I2;
474 continue;
475 }
476 } else if (MDT.dominates(MI2, MI1)) {
10
Taking false branch
477 if (!interferes(MI1, MI2)) {
478 LLVM_DEBUG(dbgs()do { } while (false)
479 << "Erasing from "do { } while (false)
480 << printMBBReference(*MI1->getParent()) << " " << *MI1)do { } while (false);
481 MergedInstrs.insert(MI1);
482 Changed = true;
483 ++I1;
484 break;
485 }
486 } else {
487 auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(),
11
Calling 'MachineDominatorTree::findNearestCommonDominator'
488 MI2->getParent());
489 if (!MBB) {
490 ++I2;
491 continue;
492 }
493
494 MachineBasicBlock::iterator I = getFirstNonPrologue(MBB, TII);
495 if (!interferes(MI1, I) && !interferes(MI2, I)) {
496 LLVM_DEBUG(dbgs()do { } while (false)
497 << "Erasing from "do { } while (false)
498 << printMBBReference(*MI1->getParent()) << " " << *MI1do { } while (false)
499 << "and moving from "do { } while (false)
500 << printMBBReference(*MI2->getParent()) << " to "do { } while (false)
501 << printMBBReference(*I->getParent()) << " " << *MI2)do { } while (false);
502 I->getParent()->splice(I, MI2->getParent(), MI2);
503 MergedInstrs.insert(MI1);
504 Changed = true;
505 ++I1;
506 break;
507 }
508 }
509 ++I2;
510 }
511 ++I1;
512 }
513 }
514
515 // Remove initializations that were merged into another.
516 for (auto &Init : Inits) {
517 auto &Defs = Init.second;
518 auto I = Defs.begin();
519 while (I != Defs.end()) {
520 if (MergedInstrs.count(*I)) {
521 (*I)->eraseFromParent();
522 I = Defs.erase(I);
523 } else
524 ++I;
525 }
526 }
527
528 // Try to schedule SGPR initializations as early as possible in the MBB.
529 for (auto &Init : Inits) {
530 auto &Defs = Init.second;
531 for (auto MI : Defs) {
532 auto MBB = MI->getParent();
533 MachineInstr &BoundaryMI = *getFirstNonPrologue(MBB, TII);
534 MachineBasicBlock::reverse_iterator B(BoundaryMI);
535 // Check if B should actually be a boundary. If not set the previous
536 // instruction as the boundary instead.
537 if (!TII->isBasicBlockPrologue(*B))
538 B++;
539
540 auto R = std::next(MI->getReverseIterator());
541 const unsigned Threshold = 50;
542 // Search until B or Threshold for a place to insert the initialization.
543 for (unsigned I = 0; R != B && I < Threshold; ++R, ++I)
544 if (R->readsRegister(Reg, TRI) || R->definesRegister(Reg, TRI) ||
545 TII->isSchedulingBoundary(*R, MBB, *MBB->getParent()))
546 break;
547
548 // Move to directly after R.
549 if (&*--R != MI)
550 MBB->splice(*R, MBB, MI);
551 }
552 }
553
554 if (Changed)
555 MRI.clearKillFlags(Reg);
556
557 return Changed;
558}
559
560bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
561 // Only need to run this in SelectionDAG path.
562 if (MF.getProperties().hasProperty(
1
Taking false branch
563 MachineFunctionProperties::Property::Selected))
564 return false;
565
566 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
567 MRI = &MF.getRegInfo();
568 TRI = ST.getRegisterInfo();
569 TII = ST.getInstrInfo();
570 MDT = &getAnalysis<MachineDominatorTree>();
571
572 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
2
Loop condition is false. Execution continues on line 769
573 BI != BE; ++BI) {
574 MachineBasicBlock *MBB = &*BI;
575 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
576 ++I) {
577 MachineInstr &MI = *I;
578
579 switch (MI.getOpcode()) {
580 default:
581 continue;
582 case AMDGPU::COPY:
583 case AMDGPU::WQM:
584 case AMDGPU::STRICT_WQM:
585 case AMDGPU::SOFT_WQM:
586 case AMDGPU::STRICT_WWM: {
587 Register DstReg = MI.getOperand(0).getReg();
588
589 const TargetRegisterClass *SrcRC, *DstRC;
590 std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, *MRI);
591
592 if (!DstReg.isVirtual()) {
593 // If the destination register is a physical register there isn't
594 // really much we can do to fix this.
595 // Some special instructions use M0 as an input. Some even only use
596 // the first lane. Insert a readfirstlane and hope for the best.
597 if (DstReg == AMDGPU::M0 && TRI->hasVectorRegisters(SrcRC)) {
598 Register TmpReg
599 = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
600
601 BuildMI(*MBB, MI, MI.getDebugLoc(),
602 TII->get(AMDGPU::V_READFIRSTLANE_B32), TmpReg)
603 .add(MI.getOperand(1));
604 MI.getOperand(1).setReg(TmpReg);
605 }
606
607 continue;
608 }
609
610 if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
611 Register SrcReg = MI.getOperand(1).getReg();
612 if (!SrcReg.isVirtual()) {
613 MachineBasicBlock *NewBB = TII->moveToVALU(MI, MDT);
614 if (NewBB && NewBB != MBB) {
615 MBB = NewBB;
616 E = MBB->end();
617 BI = MachineFunction::iterator(MBB);
618 BE = MF.end();
619 }
620 assert((!NewBB || NewBB == I->getParent()) &&((void)0)
621 "moveToVALU did not return the right basic block")((void)0);
622 break;
623 }
624
625 MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
626 unsigned SMovOp;
627 int64_t Imm;
628 // If we are just copying an immediate, we can replace the copy with
629 // s_mov_b32.
630 if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
631 MI.getOperand(1).ChangeToImmediate(Imm);
632 MI.addImplicitDefUseOperands(MF);
633 MI.setDesc(TII->get(SMovOp));
634 break;
635 }
636 MachineBasicBlock *NewBB = TII->moveToVALU(MI, MDT);
637 if (NewBB && NewBB != MBB) {
638 MBB = NewBB;
639 E = MBB->end();
640 BI = MachineFunction::iterator(MBB);
641 BE = MF.end();
642 }
643 assert((!NewBB || NewBB == I->getParent()) &&((void)0)
644 "moveToVALU did not return the right basic block")((void)0);
645 } else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) {
646 tryChangeVGPRtoSGPRinCopy(MI, TRI, TII);
647 }
648
649 break;
650 }
651 case AMDGPU::PHI: {
652 MachineBasicBlock *NewBB = processPHINode(MI);
653 if (NewBB && NewBB != MBB) {
654 MBB = NewBB;
655 E = MBB->end();
656 BI = MachineFunction::iterator(MBB);
657 BE = MF.end();
658 }
659 assert((!NewBB || NewBB == I->getParent()) &&((void)0)
660 "moveToVALU did not return the right basic block")((void)0);
661 break;
662 }
663 case AMDGPU::REG_SEQUENCE: {
664 if (TRI->hasVectorRegisters(TII->getOpRegClass(MI, 0)) ||
665 !hasVectorOperands(MI, TRI)) {
666 foldVGPRCopyIntoRegSequence(MI, TRI, TII, *MRI);
667 continue;
668 }
669
670 LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI)do { } while (false);
671
672 MachineBasicBlock *NewBB = TII->moveToVALU(MI, MDT);
673 if (NewBB && NewBB != MBB) {
674 MBB = NewBB;
675 E = MBB->end();
676 BI = MachineFunction::iterator(MBB);
677 BE = MF.end();
678 }
679 assert((!NewBB || NewBB == I->getParent()) &&((void)0)
680 "moveToVALU did not return the right basic block")((void)0);
681 break;
682 }
683 case AMDGPU::INSERT_SUBREG: {
684 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
685 DstRC = MRI->getRegClass(MI.getOperand(0).getReg());
686 Src0RC = MRI->getRegClass(MI.getOperand(1).getReg());
687 Src1RC = MRI->getRegClass(MI.getOperand(2).getReg());
688 if (TRI->isSGPRClass(DstRC) &&
689 (TRI->hasVectorRegisters(Src0RC) ||
690 TRI->hasVectorRegisters(Src1RC))) {
691 LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI)do { } while (false);
692 MachineBasicBlock *NewBB = TII->moveToVALU(MI, MDT);
693 if (NewBB && NewBB != MBB) {
694 MBB = NewBB;
695 E = MBB->end();
696 BI = MachineFunction::iterator(MBB);
697 BE = MF.end();
698 }
699 assert((!NewBB || NewBB == I->getParent()) &&((void)0)
700 "moveToVALU did not return the right basic block")((void)0);
701 }
702 break;
703 }
704 case AMDGPU::V_WRITELANE_B32: {
705 // Some architectures allow more than one constant bus access without
706 // SGPR restriction
707 if (ST.getConstantBusLimit(MI.getOpcode()) != 1)
708 break;
709
710 // Writelane is special in that it can use SGPR and M0 (which would
711 // normally count as using the constant bus twice - but in this case it
712 // is allowed since the lane selector doesn't count as a use of the
713 // constant bus). However, it is still required to abide by the 1 SGPR
714 // rule. Apply a fix here as we might have multiple SGPRs after
715 // legalizing VGPRs to SGPRs
716 int Src0Idx =
717 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
718 int Src1Idx =
719 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
720 MachineOperand &Src0 = MI.getOperand(Src0Idx);
721 MachineOperand &Src1 = MI.getOperand(Src1Idx);
722
723 // Check to see if the instruction violates the 1 SGPR rule
724 if ((Src0.isReg() && TRI->isSGPRReg(*MRI, Src0.getReg()) &&
725 Src0.getReg() != AMDGPU::M0) &&
726 (Src1.isReg() && TRI->isSGPRReg(*MRI, Src1.getReg()) &&
727 Src1.getReg() != AMDGPU::M0)) {
728
729 // Check for trivially easy constant prop into one of the operands
730 // If this is the case then perform the operation now to resolve SGPR
731 // issue. If we don't do that here we will always insert a mov to m0
732 // that can't be resolved in later operand folding pass
733 bool Resolved = false;
734 for (MachineOperand *MO : {&Src0, &Src1}) {
735 if (MO->getReg().isVirtual()) {
736 MachineInstr *DefMI = MRI->getVRegDef(MO->getReg());
737 if (DefMI && TII->isFoldableCopy(*DefMI)) {
738 const MachineOperand &Def = DefMI->getOperand(0);
739 if (Def.isReg() &&
740 MO->getReg() == Def.getReg() &&
741 MO->getSubReg() == Def.getSubReg()) {
742 const MachineOperand &Copied = DefMI->getOperand(1);
743 if (Copied.isImm() &&
744 TII->isInlineConstant(APInt(64, Copied.getImm(), true))) {
745 MO->ChangeToImmediate(Copied.getImm());
746 Resolved = true;
747 break;
748 }
749 }
750 }
751 }
752 }
753
754 if (!Resolved) {
755 // Haven't managed to resolve by replacing an SGPR with an immediate
756 // Move src1 to be in M0
757 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
758 TII->get(AMDGPU::COPY), AMDGPU::M0)
759 .add(Src1);
760 Src1.ChangeToRegister(AMDGPU::M0, false);
761 }
762 }
763 break;
764 }
765 }
766 }
767 }
768
769 if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge)
3
Assuming the condition is true
4
Assuming the condition is true
5
Taking true branch
770 hoistAndMergeSGPRInits(AMDGPU::M0, *MRI, TRI, *MDT, TII);
6
Calling 'hoistAndMergeSGPRInits'
771
772 return true;
773}
774
775MachineBasicBlock *SIFixSGPRCopies::processPHINode(MachineInstr &MI) {
776 unsigned numVGPRUses = 0;
777 bool AllAGPRUses = true;
778 SetVector<const MachineInstr *> worklist;
779 SmallSet<const MachineInstr *, 4> Visited;
780 SetVector<MachineInstr *> PHIOperands;
781 MachineBasicBlock *CreatedBB = nullptr;
782 worklist.insert(&MI);
783 Visited.insert(&MI);
784 while (!worklist.empty()) {
785 const MachineInstr *Instr = worklist.pop_back_val();
786 Register Reg = Instr->getOperand(0).getReg();
787 for (const auto &Use : MRI->use_operands(Reg)) {
788 const MachineInstr *UseMI = Use.getParent();
789 AllAGPRUses &= (UseMI->isCopy() &&
790 TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg())) ||
791 TRI->isAGPR(*MRI, Use.getReg());
792 if (UseMI->isCopy() || UseMI->isRegSequence()) {
793 if (UseMI->isCopy() &&
794 UseMI->getOperand(0).getReg().isPhysical() &&
795 !TRI->isSGPRReg(*MRI, UseMI->getOperand(0).getReg())) {
796 numVGPRUses++;
797 }
798 if (Visited.insert(UseMI).second)
799 worklist.insert(UseMI);
800
801 continue;
802 }
803
804 if (UseMI->isPHI()) {
805 const TargetRegisterClass *UseRC = MRI->getRegClass(Use.getReg());
806 if (!TRI->isSGPRReg(*MRI, Use.getReg()) &&
807 UseRC != &AMDGPU::VReg_1RegClass)
808 numVGPRUses++;
809 continue;
810 }
811
812 const TargetRegisterClass *OpRC =
813 TII->getOpRegClass(*UseMI, UseMI->getOperandNo(&Use));
814 if (!TRI->isSGPRClass(OpRC) && OpRC != &AMDGPU::VS_32RegClass &&
815 OpRC != &AMDGPU::VS_64RegClass) {
816 numVGPRUses++;
817 }
818 }
819 }
820
821 Register PHIRes = MI.getOperand(0).getReg();
822 const TargetRegisterClass *RC0 = MRI->getRegClass(PHIRes);
823 if (AllAGPRUses && numVGPRUses && !TRI->hasAGPRs(RC0)) {
824 LLVM_DEBUG(dbgs() << "Moving PHI to AGPR: " << MI)do { } while (false);
825 MRI->setRegClass(PHIRes, TRI->getEquivalentAGPRClass(RC0));
826 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
827 MachineInstr *DefMI = MRI->getVRegDef(MI.getOperand(I).getReg());
828 if (DefMI && DefMI->isPHI())
829 PHIOperands.insert(DefMI);
830 }
831 }
832
833 bool hasVGPRInput = false;
834 for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
835 Register InputReg = MI.getOperand(i).getReg();
836 MachineInstr *Def = MRI->getVRegDef(InputReg);
837 if (TRI->isVectorRegister(*MRI, InputReg)) {
838 if (Def->isCopy()) {
839 Register SrcReg = Def->getOperand(1).getReg();
840 const TargetRegisterClass *RC =
841 TRI->getRegClassForReg(*MRI, SrcReg);
842 if (TRI->isSGPRClass(RC))
843 continue;
844 }
845 hasVGPRInput = true;
846 break;
847 }
848 else if (Def->isCopy() &&
849 TRI->isVectorRegister(*MRI, Def->getOperand(1).getReg())) {
850 Register SrcReg = Def->getOperand(1).getReg();
851 MachineInstr *SrcDef = MRI->getVRegDef(SrcReg);
852 unsigned SMovOp;
853 int64_t Imm;
854 if (!isSafeToFoldImmIntoCopy(Def, SrcDef, TII, SMovOp, Imm)) {
855 hasVGPRInput = true;
856 break;
857 } else {
858 // Formally, if we did not do this right away
859 // it would be done on the next iteration of the
860 // runOnMachineFunction main loop. But why not if we can?
861 MachineFunction *MF = MI.getParent()->getParent();
862 Def->getOperand(1).ChangeToImmediate(Imm);
863 Def->addImplicitDefUseOperands(*MF);
864 Def->setDesc(TII->get(SMovOp));
865 }
866 }
867 }
868
869 if ((!TRI->isVectorRegister(*MRI, PHIRes) &&
870 RC0 != &AMDGPU::VReg_1RegClass) &&
871 (hasVGPRInput || numVGPRUses > 1)) {
872 LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI)do { } while (false);
873 CreatedBB = TII->moveToVALU(MI);
874 }
875 else {
876 LLVM_DEBUG(dbgs() << "Legalizing PHI: " << MI)do { } while (false);
877 TII->legalizeOperands(MI, MDT);
878 }
879
880 // Propagate register class back to PHI operands which are PHI themselves.
881 while (!PHIOperands.empty()) {
882 processPHINode(*PHIOperands.pop_back_val());
883 }
884 return CreatedBB;
885}

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/MachineDominators.h

1//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
10// but for target-specific code rather than target-independent IR.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
15#define LLVM_CODEGEN_MACHINEDOMINATORS_H
16
17#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/CodeGen/MachineBasicBlock.h"
20#include "llvm/CodeGen/MachineFunctionPass.h"
21#include "llvm/CodeGen/MachineInstr.h"
22#include "llvm/Support/GenericDomTree.h"
23#include "llvm/Support/GenericDomTreeConstruction.h"
24#include <cassert>
25#include <memory>
26
27namespace llvm {
28
29template <>
30inline void DominatorTreeBase<MachineBasicBlock, false>::addRoot(
31 MachineBasicBlock *MBB) {
32 this->Roots.push_back(MBB);
33}
34
35extern template class DomTreeNodeBase<MachineBasicBlock>;
36extern template class DominatorTreeBase<MachineBasicBlock, false>; // DomTree
37extern template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTree
38
39using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
40
41//===-------------------------------------
42/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
43/// compute a normal dominator tree.
44///
45class MachineDominatorTree : public MachineFunctionPass {
46 using DomTreeT = DomTreeBase<MachineBasicBlock>;
47
48 /// Helper structure used to hold all the basic blocks
49 /// involved in the split of a critical edge.
50 struct CriticalEdge {
51 MachineBasicBlock *FromBB;
52 MachineBasicBlock *ToBB;
53 MachineBasicBlock *NewBB;
54 };
55
56 /// Pile up all the critical edges to be split.
57 /// The splitting of a critical edge is local and thus, it is possible
58 /// to apply several of those changes at the same time.
59 mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
60
61 /// Remember all the basic blocks that are inserted during
62 /// edge splitting.
63 /// Invariant: NewBBs == all the basic blocks contained in the NewBB
64 /// field of all the elements of CriticalEdgesToSplit.
65 /// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
66 /// such as BB == elt.NewBB.
67 mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
68
69 /// The DominatorTreeBase that is used to compute a normal dominator tree.
70 std::unique_ptr<DomTreeT> DT;
71
72 /// Apply all the recorded critical edges to the DT.
73 /// This updates the underlying DT information in a way that uses
74 /// the fast query path of DT as much as possible.
75 ///
76 /// \post CriticalEdgesToSplit.empty().
77 void applySplitCriticalEdges() const;
78
79public:
80 static char ID; // Pass ID, replacement for typeid
81
82 MachineDominatorTree();
83 explicit MachineDominatorTree(MachineFunction &MF) : MachineFunctionPass(ID) {
84 calculate(MF);
85 }
86
87 DomTreeT &getBase() {
88 if (!DT) DT.reset(new DomTreeT());
89 applySplitCriticalEdges();
90 return *DT;
91 }
92
93 void getAnalysisUsage(AnalysisUsage &AU) const override;
94
95 MachineBasicBlock *getRoot() const {
96 applySplitCriticalEdges();
97 return DT->getRoot();
98 }
99
100 MachineDomTreeNode *getRootNode() const {
101 applySplitCriticalEdges();
102 return DT->getRootNode();
103 }
104
105 bool runOnMachineFunction(MachineFunction &F) override;
106
107 void calculate(MachineFunction &F);
108
109 bool dominates(const MachineDomTreeNode *A,
110 const MachineDomTreeNode *B) const {
111 applySplitCriticalEdges();
112 return DT->dominates(A, B);
113 }
114
115 bool dominates(const MachineBasicBlock *A, const MachineBasicBlock *B) const {
116 applySplitCriticalEdges();
117 return DT->dominates(A, B);
118 }
119
120 // dominates - Return true if A dominates B. This performs the
121 // special checks necessary if A and B are in the same basic block.
122 bool dominates(const MachineInstr *A, const MachineInstr *B) const {
123 applySplitCriticalEdges();
124 const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
125 if (BBA != BBB) return DT->dominates(BBA, BBB);
126
127 // Loop through the basic block until we find A or B.
128 MachineBasicBlock::const_iterator I = BBA->begin();
129 for (; &*I != A && &*I != B; ++I)
130 /*empty*/ ;
131
132 return &*I == A;
133 }
134
135 bool properlyDominates(const MachineDomTreeNode *A,
136 const MachineDomTreeNode *B) const {
137 applySplitCriticalEdges();
138 return DT->properlyDominates(A, B);
139 }
140
141 bool properlyDominates(const MachineBasicBlock *A,
142 const MachineBasicBlock *B) const {
143 applySplitCriticalEdges();
144 return DT->properlyDominates(A, B);
145 }
146
147 /// findNearestCommonDominator - Find nearest common dominator basic block
148 /// for basic block A and B. If there is no such block then return NULL.
149 MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
150 MachineBasicBlock *B) {
151 applySplitCriticalEdges();
152 return DT->findNearestCommonDominator(A, B);
12
Calling 'DominatorTreeBase::findNearestCommonDominator'
153 }
154
155 MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
156 applySplitCriticalEdges();
157 return DT->getNode(BB);
158 }
159
160 /// getNode - return the (Post)DominatorTree node for the specified basic
161 /// block. This is the same as using operator[] on this class.
162 ///
163 MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
164 applySplitCriticalEdges();
165 return DT->getNode(BB);
166 }
167
168 /// addNewBlock - Add a new node to the dominator tree information. This
169 /// creates a new node as a child of DomBB dominator node,linking it into
170 /// the children list of the immediate dominator.
171 MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
172 MachineBasicBlock *DomBB) {
173 applySplitCriticalEdges();
174 return DT->addNewBlock(BB, DomBB);
175 }
176
177 /// changeImmediateDominator - This method is used to update the dominator
178 /// tree information when a node's immediate dominator changes.
179 ///
180 void changeImmediateDominator(MachineBasicBlock *N,
181 MachineBasicBlock *NewIDom) {
182 applySplitCriticalEdges();
183 DT->changeImmediateDominator(N, NewIDom);
184 }
185
186 void changeImmediateDominator(MachineDomTreeNode *N,
187 MachineDomTreeNode *NewIDom) {
188 applySplitCriticalEdges();
189 DT->changeImmediateDominator(N, NewIDom);
190 }
191
192 /// eraseNode - Removes a node from the dominator tree. Block must not
193 /// dominate any other blocks. Removes node from its immediate dominator's
194 /// children list. Deletes dominator node associated with basic block BB.
195 void eraseNode(MachineBasicBlock *BB) {
196 applySplitCriticalEdges();
197 DT->eraseNode(BB);
198 }
199
200 /// splitBlock - BB is split and now it has one successor. Update dominator
201 /// tree to reflect this change.
202 void splitBlock(MachineBasicBlock* NewBB) {
203 applySplitCriticalEdges();
204 DT->splitBlock(NewBB);
205 }
206
207 /// isReachableFromEntry - Return true if A is dominated by the entry
208 /// block of the function containing it.
209 bool isReachableFromEntry(const MachineBasicBlock *A) {
210 applySplitCriticalEdges();
211 return DT->isReachableFromEntry(A);
212 }
213
214 void releaseMemory() override;
215
216 void verifyAnalysis() const override;
217
218 void print(raw_ostream &OS, const Module*) const override;
219
220 /// Record that the critical edge (FromBB, ToBB) has been
221 /// split with NewBB.
222 /// This is best to use this method instead of directly update the
223 /// underlying information, because this helps mitigating the
224 /// number of time the DT information is invalidated.
225 ///
226 /// \note Do not use this method with regular edges.
227 ///
228 /// \note To benefit from the compile time improvement incurred by this
229 /// method, the users of this method have to limit the queries to the DT
230 /// interface between two edges splitting. In other words, they have to
231 /// pack the splitting of critical edges as much as possible.
232 void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
233 MachineBasicBlock *ToBB,
234 MachineBasicBlock *NewBB) {
235 bool Inserted = NewBBs.insert(NewBB).second;
236 (void)Inserted;
237 assert(Inserted &&((void)0)
238 "A basic block inserted via edge splitting cannot appear twice")((void)0);
239 CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
240 }
241};
242
243//===-------------------------------------
244/// DominatorTree GraphTraits specialization so the DominatorTree can be
245/// iterable by generic graph iterators.
246///
247
248template <class Node, class ChildIterator>
249struct MachineDomTreeGraphTraitsBase {
250 using NodeRef = Node *;
251 using ChildIteratorType = ChildIterator;
252
253 static NodeRef getEntryNode(NodeRef N) { return N; }
254 static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
255 static ChildIteratorType child_end(NodeRef N) { return N->end(); }
256};
257
258template <class T> struct GraphTraits;
259
260template <>
261struct GraphTraits<MachineDomTreeNode *>
262 : public MachineDomTreeGraphTraitsBase<MachineDomTreeNode,
263 MachineDomTreeNode::const_iterator> {
264};
265
266template <>
267struct GraphTraits<const MachineDomTreeNode *>
268 : public MachineDomTreeGraphTraitsBase<const MachineDomTreeNode,
269 MachineDomTreeNode::const_iterator> {
270};
271
272template <> struct GraphTraits<MachineDominatorTree*>
273 : public GraphTraits<MachineDomTreeNode *> {
274 static NodeRef getEntryNode(MachineDominatorTree *DT) {
275 return DT->getRootNode();
276 }
277};
278
279} // end namespace llvm
280
281#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/GenericDomTree.h

1//===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file defines a set of templates that efficiently compute a dominator
11/// tree over a generic graph. This is used typically in LLVM for fast
12/// dominance queries on the CFG, but is fully generic w.r.t. the underlying
13/// graph types.
14///
15/// Unlike ADT/* graph algorithms, generic dominator tree has more requirements
16/// on the graph's NodeRef. The NodeRef should be a pointer and,
17/// NodeRef->getParent() must return the parent node that is also a pointer.
18///
19/// FIXME: Maybe GenericDomTree needs a TreeTraits, instead of GraphTraits.
20///
21//===----------------------------------------------------------------------===//
22
23#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
24#define LLVM_SUPPORT_GENERICDOMTREE_H
25
26#include "llvm/ADT/DenseMap.h"
27#include "llvm/ADT/GraphTraits.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/SmallPtrSet.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/Support/CFGDiff.h"
32#include "llvm/Support/CFGUpdate.h"
33#include "llvm/Support/raw_ostream.h"
34#include <algorithm>
35#include <cassert>
36#include <cstddef>
37#include <iterator>
38#include <memory>
39#include <type_traits>
40#include <utility>
41
42namespace llvm {
43
44template <typename NodeT, bool IsPostDom>
45class DominatorTreeBase;
46
47namespace DomTreeBuilder {
48template <typename DomTreeT>
49struct SemiNCAInfo;
50} // namespace DomTreeBuilder
51
52/// Base class for the actual dominator tree node.
53template <class NodeT> class DomTreeNodeBase {
54 friend class PostDominatorTree;
55 friend class DominatorTreeBase<NodeT, false>;
56 friend class DominatorTreeBase<NodeT, true>;
57 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, false>>;
58 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, true>>;
59
60 NodeT *TheBB;
61 DomTreeNodeBase *IDom;
62 unsigned Level;
63 SmallVector<DomTreeNodeBase *, 4> Children;
64 mutable unsigned DFSNumIn = ~0;
65 mutable unsigned DFSNumOut = ~0;
66
67 public:
68 DomTreeNodeBase(NodeT *BB, DomTreeNodeBase *iDom)
69 : TheBB(BB), IDom(iDom), Level(IDom ? IDom->Level + 1 : 0) {}
70
71 using iterator = typename SmallVector<DomTreeNodeBase *, 4>::iterator;
72 using const_iterator =
73 typename SmallVector<DomTreeNodeBase *, 4>::const_iterator;
74
75 iterator begin() { return Children.begin(); }
76 iterator end() { return Children.end(); }
77 const_iterator begin() const { return Children.begin(); }
78 const_iterator end() const { return Children.end(); }
79
80 DomTreeNodeBase *const &back() const { return Children.back(); }
81 DomTreeNodeBase *&back() { return Children.back(); }
82
83 iterator_range<iterator> children() { return make_range(begin(), end()); }
84 iterator_range<const_iterator> children() const {
85 return make_range(begin(), end());
86 }
87
88 NodeT *getBlock() const { return TheBB; }
89 DomTreeNodeBase *getIDom() const { return IDom; }
90 unsigned getLevel() const { return Level; }
91
92 std::unique_ptr<DomTreeNodeBase> addChild(
93 std::unique_ptr<DomTreeNodeBase> C) {
94 Children.push_back(C.get());
95 return C;
96 }
97
98 bool isLeaf() const { return Children.empty(); }
99 size_t getNumChildren() const { return Children.size(); }
100
101 void clearAllChildren() { Children.clear(); }
102
103 bool compare(const DomTreeNodeBase *Other) const {
104 if (getNumChildren() != Other->getNumChildren())
105 return true;
106
107 if (Level != Other->Level) return true;
108
109 SmallPtrSet<const NodeT *, 4> OtherChildren;
110 for (const DomTreeNodeBase *I : *Other) {
111 const NodeT *Nd = I->getBlock();
112 OtherChildren.insert(Nd);
113 }
114
115 for (const DomTreeNodeBase *I : *this) {
116 const NodeT *N = I->getBlock();
117 if (OtherChildren.count(N) == 0)
118 return true;
119 }
120 return false;
121 }
122
123 void setIDom(DomTreeNodeBase *NewIDom) {
124 assert(IDom && "No immediate dominator?")((void)0);
125 if (IDom == NewIDom) return;
126
127 auto I = find(IDom->Children, this);
128 assert(I != IDom->Children.end() &&((void)0)
129 "Not in immediate dominator children set!")((void)0);
130 // I am no longer your child...
131 IDom->Children.erase(I);
132
133 // Switch to new dominator
134 IDom = NewIDom;
135 IDom->Children.push_back(this);
136
137 UpdateLevel();
138 }
139
140 /// getDFSNumIn/getDFSNumOut - These return the DFS visitation order for nodes
141 /// in the dominator tree. They are only guaranteed valid if
142 /// updateDFSNumbers() has been called.
143 unsigned getDFSNumIn() const { return DFSNumIn; }
144 unsigned getDFSNumOut() const { return DFSNumOut; }
145
146private:
147 // Return true if this node is dominated by other. Use this only if DFS info
148 // is valid.
149 bool DominatedBy(const DomTreeNodeBase *other) const {
150 return this->DFSNumIn >= other->DFSNumIn &&
151 this->DFSNumOut <= other->DFSNumOut;
152 }
153
154 void UpdateLevel() {
155 assert(IDom)((void)0);
156 if (Level == IDom->Level + 1) return;
157
158 SmallVector<DomTreeNodeBase *, 64> WorkStack = {this};
159
160 while (!WorkStack.empty()) {
161 DomTreeNodeBase *Current = WorkStack.pop_back_val();
162 Current->Level = Current->IDom->Level + 1;
163
164 for (DomTreeNodeBase *C : *Current) {
165 assert(C->IDom)((void)0);
166 if (C->Level != C->IDom->Level + 1) WorkStack.push_back(C);
167 }
168 }
169 }
170};
171
172template <class NodeT>
173raw_ostream &operator<<(raw_ostream &O, const DomTreeNodeBase<NodeT> *Node) {
174 if (Node->getBlock())
175 Node->getBlock()->printAsOperand(O, false);
176 else
177 O << " <<exit node>>";
178
179 O << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "} ["
180 << Node->getLevel() << "]\n";
181
182 return O;
183}
184
185template <class NodeT>
186void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &O,
187 unsigned Lev) {
188 O.indent(2 * Lev) << "[" << Lev << "] " << N;
189 for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
190 E = N->end();
191 I != E; ++I)
192 PrintDomTree<NodeT>(*I, O, Lev + 1);
193}
194
195namespace DomTreeBuilder {
196// The routines below are provided in a separate header but referenced here.
197template <typename DomTreeT>
198void Calculate(DomTreeT &DT);
199
200template <typename DomTreeT>
201void CalculateWithUpdates(DomTreeT &DT,
202 ArrayRef<typename DomTreeT::UpdateType> Updates);
203
204template <typename DomTreeT>
205void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
206 typename DomTreeT::NodePtr To);
207
208template <typename DomTreeT>
209void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
210 typename DomTreeT::NodePtr To);
211
212template <typename DomTreeT>
213void ApplyUpdates(DomTreeT &DT,
214 GraphDiff<typename DomTreeT::NodePtr,
215 DomTreeT::IsPostDominator> &PreViewCFG,
216 GraphDiff<typename DomTreeT::NodePtr,
217 DomTreeT::IsPostDominator> *PostViewCFG);
218
219template <typename DomTreeT>
220bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL);
221} // namespace DomTreeBuilder
222
223/// Core dominator tree base class.
224///
225/// This class is a generic template over graph nodes. It is instantiated for
226/// various graphs in the LLVM IR or in the code generator.
227template <typename NodeT, bool IsPostDom>
228class DominatorTreeBase {
229 public:
230 static_assert(std::is_pointer<typename GraphTraits<NodeT *>::NodeRef>::value,
231 "Currently DominatorTreeBase supports only pointer nodes");
232 using NodeType = NodeT;
233 using NodePtr = NodeT *;
234 using ParentPtr = decltype(std::declval<NodeT *>()->getParent());
235 static_assert(std::is_pointer<ParentPtr>::value,
236 "Currently NodeT's parent must be a pointer type");
237 using ParentType = std::remove_pointer_t<ParentPtr>;
238 static constexpr bool IsPostDominator = IsPostDom;
239
240 using UpdateType = cfg::Update<NodePtr>;
241 using UpdateKind = cfg::UpdateKind;
242 static constexpr UpdateKind Insert = UpdateKind::Insert;
243 static constexpr UpdateKind Delete = UpdateKind::Delete;
244
245 enum class VerificationLevel { Fast, Basic, Full };
246
247protected:
248 // Dominators always have a single root, postdominators can have more.
249 SmallVector<NodeT *, IsPostDom ? 4 : 1> Roots;
250
251 using DomTreeNodeMapType =
252 DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>;
253 DomTreeNodeMapType DomTreeNodes;
254 DomTreeNodeBase<NodeT> *RootNode = nullptr;
255 ParentPtr Parent = nullptr;
256
257 mutable bool DFSInfoValid = false;
258 mutable unsigned int SlowQueries = 0;
259
260 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>;
261
262 public:
263 DominatorTreeBase() {}
264
265 DominatorTreeBase(DominatorTreeBase &&Arg)
266 : Roots(std::move(Arg.Roots)),
267 DomTreeNodes(std::move(Arg.DomTreeNodes)),
268 RootNode(Arg.RootNode),
269 Parent(Arg.Parent),
270 DFSInfoValid(Arg.DFSInfoValid),
271 SlowQueries(Arg.SlowQueries) {
272 Arg.wipe();
273 }
274
275 DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
276 Roots = std::move(RHS.Roots);
277 DomTreeNodes = std::move(RHS.DomTreeNodes);
278 RootNode = RHS.RootNode;
279 Parent = RHS.Parent;
280 DFSInfoValid = RHS.DFSInfoValid;
281 SlowQueries = RHS.SlowQueries;
282 RHS.wipe();
283 return *this;
284 }
285
286 DominatorTreeBase(const DominatorTreeBase &) = delete;
287 DominatorTreeBase &operator=(const DominatorTreeBase &) = delete;
288
289 /// Iteration over roots.
290 ///
291 /// This may include multiple blocks if we are computing post dominators.
292 /// For forward dominators, this will always be a single block (the entry
293 /// block).
294 using root_iterator = typename SmallVectorImpl<NodeT *>::iterator;
295 using const_root_iterator = typename SmallVectorImpl<NodeT *>::const_iterator;
296
297 root_iterator root_begin() { return Roots.begin(); }
298 const_root_iterator root_begin() const { return Roots.begin(); }
299 root_iterator root_end() { return Roots.end(); }
300 const_root_iterator root_end() const { return Roots.end(); }
301
302 size_t root_size() const { return Roots.size(); }
303
304 iterator_range<root_iterator> roots() {
305 return make_range(root_begin(), root_end());
306 }
307 iterator_range<const_root_iterator> roots() const {
308 return make_range(root_begin(), root_end());
309 }
310
311 /// isPostDominator - Returns true if analysis based of postdoms
312 ///
313 bool isPostDominator() const { return IsPostDominator; }
14
Returning zero (loaded from 'IsPostDominator'), which participates in a condition later
314
315 /// compare - Return false if the other dominator tree base matches this
316 /// dominator tree base. Otherwise return true.
317 bool compare(const DominatorTreeBase &Other) const {
318 if (Parent != Other.Parent) return true;
319
320 if (Roots.size() != Other.Roots.size())
321 return true;
322
323 if (!std::is_permutation(Roots.begin(), Roots.end(), Other.Roots.begin()))
324 return true;
325
326 const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
327 if (DomTreeNodes.size() != OtherDomTreeNodes.size())
328 return true;
329
330 for (const auto &DomTreeNode : DomTreeNodes) {
331 NodeT *BB = DomTreeNode.first;
332 typename DomTreeNodeMapType::const_iterator OI =
333 OtherDomTreeNodes.find(BB);
334 if (OI == OtherDomTreeNodes.end())
335 return true;
336
337 DomTreeNodeBase<NodeT> &MyNd = *DomTreeNode.second;
338 DomTreeNodeBase<NodeT> &OtherNd = *OI->second;
339
340 if (MyNd.compare(&OtherNd))
341 return true;
342 }
343
344 return false;
345 }
346
347 /// getNode - return the (Post)DominatorTree node for the specified basic
348 /// block. This is the same as using operator[] on this class. The result
349 /// may (but is not required to) be null for a forward (backwards)
350 /// statically unreachable block.
351 DomTreeNodeBase<NodeT> *getNode(const NodeT *BB) const {
352 auto I = DomTreeNodes.find(BB);
353 if (I != DomTreeNodes.end())
20
Calling 'operator!='
26
Returning from 'operator!='
27
Taking true branch
354 return I->second.get();
28
Returning pointer
355 return nullptr;
356 }
357
358 /// See getNode.
359 DomTreeNodeBase<NodeT> *operator[](const NodeT *BB) const {
360 return getNode(BB);
361 }
362
363 /// getRootNode - This returns the entry node for the CFG of the function. If
364 /// this tree represents the post-dominance relations for a function, however,
365 /// this root may be a node with the block == NULL. This is the case when
366 /// there are multiple exit nodes from a particular function. Consumers of
367 /// post-dominance information must be capable of dealing with this
368 /// possibility.
369 ///
370 DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; }
371 const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; }
372
373 /// Get all nodes dominated by R, including R itself.
374 void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const {
375 Result.clear();
376 const DomTreeNodeBase<NodeT> *RN = getNode(R);
377 if (!RN)
378 return; // If R is unreachable, it will not be present in the DOM tree.
379 SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL;
380 WL.push_back(RN);
381
382 while (!WL.empty()) {
383 const DomTreeNodeBase<NodeT> *N = WL.pop_back_val();
384 Result.push_back(N->getBlock());
385 WL.append(N->begin(), N->end());
386 }
387 }
388
389 /// properlyDominates - Returns true iff A dominates B and A != B.
390 /// Note that this is not a constant time operation!
391 ///
392 bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
393 const DomTreeNodeBase<NodeT> *B) const {
394 if (!A || !B)
395 return false;
396 if (A == B)
397 return false;
398 return dominates(A, B);
399 }
400
401 bool properlyDominates(const NodeT *A, const NodeT *B) const;
402
403 /// isReachableFromEntry - Return true if A is dominated by the entry
404 /// block of the function containing it.
405 bool isReachableFromEntry(const NodeT *A) const {
406 assert(!this->isPostDominator() &&((void)0)
407 "This is not implemented for post dominators")((void)0);
408 return isReachableFromEntry(getNode(const_cast<NodeT *>(A)));
409 }
410
411 bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; }
412
413 /// dominates - Returns true iff A dominates B. Note that this is not a
414 /// constant time operation!
415 ///
416 bool dominates(const DomTreeNodeBase<NodeT> *A,
417 const DomTreeNodeBase<NodeT> *B) const {
418 // A node trivially dominates itself.
419 if (B == A)
420 return true;
421
422 // An unreachable node is dominated by anything.
423 if (!isReachableFromEntry(B))
424 return true;
425
426 // And dominates nothing.
427 if (!isReachableFromEntry(A))
428 return false;
429
430 if (B->getIDom() == A) return true;
431
432 if (A->getIDom() == B) return false;
433
434 // A can only dominate B if it is higher in the tree.
435 if (A->getLevel() >= B->getLevel()) return false;
436
437 // Compare the result of the tree walk and the dfs numbers, if expensive
438 // checks are enabled.
439#ifdef EXPENSIVE_CHECKS
440 assert((!DFSInfoValid ||((void)0)
441 (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&((void)0)
442 "Tree walk disagrees with dfs numbers!")((void)0);
443#endif
444
445 if (DFSInfoValid)
446 return B->DominatedBy(A);
447
448 // If we end up with too many slow queries, just update the
449 // DFS numbers on the theory that we are going to keep querying.
450 SlowQueries++;
451 if (SlowQueries > 32) {
452 updateDFSNumbers();
453 return B->DominatedBy(A);
454 }
455
456 return dominatedBySlowTreeWalk(A, B);
457 }
458
459 bool dominates(const NodeT *A, const NodeT *B) const;
460
461 NodeT *getRoot() const {
462 assert(this->Roots.size() == 1 && "Should always have entry node!")((void)0);
463 return this->Roots[0];
464 }
465
466 /// Find nearest common dominator basic block for basic block A and B. A and B
467 /// must have tree nodes.
468 NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) const {
469 assert(A && B && "Pointers are not valid")((void)0);
470 assert(A->getParent() == B->getParent() &&((void)0)
471 "Two blocks are not in same function")((void)0);
472
473 // If either A or B is a entry block then it is nearest common dominator
474 // (for forward-dominators).
475 if (!isPostDominator()) {
13
Calling 'DominatorTreeBase::isPostDominator'
15
Returning from 'DominatorTreeBase::isPostDominator'
16
Taking true branch
476 NodeT &Entry = A->getParent()->front();
477 if (A == &Entry || B == &Entry)
17
Assuming the condition is false
18
Taking false branch
478 return &Entry;
479 }
480
481 DomTreeNodeBase<NodeT> *NodeA = getNode(A);
19
Calling 'DominatorTreeBase::getNode'
29
Returning from 'DominatorTreeBase::getNode'
30
'NodeA' initialized here
482 DomTreeNodeBase<NodeT> *NodeB = getNode(B);
483 assert(NodeA && "A must be in the tree")((void)0);
484 assert(NodeB && "B must be in the tree")((void)0);
485
486 // Use level information to go up the tree until the levels match. Then
487 // continue going up til we arrive at the same node.
488 while (NodeA != NodeB) {
31
Assuming 'NodeA' is equal to 'NodeB'
32
Assuming pointer value is null
33
Loop condition is false. Execution continues on line 494
489 if (NodeA->getLevel() < NodeB->getLevel()) std::swap(NodeA, NodeB);
490
491 NodeA = NodeA->IDom;
492 }
493
494 return NodeA->getBlock();
34
Called C++ object pointer is null
495 }
496
497 const NodeT *findNearestCommonDominator(const NodeT *A,
498 const NodeT *B) const {
499 // Cast away the const qualifiers here. This is ok since
500 // const is re-introduced on the return type.
501 return findNearestCommonDominator(const_cast<NodeT *>(A),
502 const_cast<NodeT *>(B));
503 }
504
505 bool isVirtualRoot(const DomTreeNodeBase<NodeT> *A) const {
506 return isPostDominator() && !A->getBlock();
507 }
508
509 //===--------------------------------------------------------------------===//
510 // API to update (Post)DominatorTree information based on modifications to
511 // the CFG...
512
513 /// Inform the dominator tree about a sequence of CFG edge insertions and
514 /// deletions and perform a batch update on the tree.
515 ///
516 /// This function should be used when there were multiple CFG updates after
517 /// the last dominator tree update. It takes care of performing the updates
518 /// in sync with the CFG and optimizes away the redundant operations that
519 /// cancel each other.
520 /// The functions expects the sequence of updates to be balanced. Eg.:
521 /// - {{Insert, A, B}, {Delete, A, B}, {Insert, A, B}} is fine, because
522 /// logically it results in a single insertions.
523 /// - {{Insert, A, B}, {Insert, A, B}} is invalid, because it doesn't make
524 /// sense to insert the same edge twice.
525 ///
526 /// What's more, the functions assumes that it's safe to ask every node in the
527 /// CFG about its children and inverse children. This implies that deletions
528 /// of CFG edges must not delete the CFG nodes before calling this function.
529 ///
530 /// The applyUpdates function can reorder the updates and remove redundant
531 /// ones internally. The batch updater is also able to detect sequences of
532 /// zero and exactly one update -- it's optimized to do less work in these
533 /// cases.
534 ///
535 /// Note that for postdominators it automatically takes care of applying
536 /// updates on reverse edges internally (so there's no need to swap the
537 /// From and To pointers when constructing DominatorTree::UpdateType).
538 /// The type of updates is the same for DomTreeBase<T> and PostDomTreeBase<T>
539 /// with the same template parameter T.
540 ///
541 /// \param Updates An unordered sequence of updates to perform. The current
542 /// CFG and the reverse of these updates provides the pre-view of the CFG.
543 ///
544 void applyUpdates(ArrayRef<UpdateType> Updates) {
545 GraphDiff<NodePtr, IsPostDominator> PreViewCFG(
546 Updates, /*ReverseApplyUpdates=*/true);
547 DomTreeBuilder::ApplyUpdates(*this, PreViewCFG, nullptr);
548 }
549
550 /// \param Updates An unordered sequence of updates to perform. The current
551 /// CFG and the reverse of these updates provides the pre-view of the CFG.
552 /// \param PostViewUpdates An unordered sequence of update to perform in order
553 /// to obtain a post-view of the CFG. The DT will be updated assuming the
554 /// obtained PostViewCFG is the desired end state.
555 void applyUpdates(ArrayRef<UpdateType> Updates,
556 ArrayRef<UpdateType> PostViewUpdates) {
557 if (Updates.empty()) {
558 GraphDiff<NodePtr, IsPostDom> PostViewCFG(PostViewUpdates);
559 DomTreeBuilder::ApplyUpdates(*this, PostViewCFG, &PostViewCFG);
560 } else {
561 // PreViewCFG needs to merge Updates and PostViewCFG. The updates in
562 // Updates need to be reversed, and match the direction in PostViewCFG.
563 // The PostViewCFG is created with updates reversed (equivalent to changes
564 // made to the CFG), so the PreViewCFG needs all the updates reverse
565 // applied.
566 SmallVector<UpdateType> AllUpdates(Updates.begin(), Updates.end());
567 append_range(AllUpdates, PostViewUpdates);
568 GraphDiff<NodePtr, IsPostDom> PreViewCFG(AllUpdates,
569 /*ReverseApplyUpdates=*/true);
570 GraphDiff<NodePtr, IsPostDom> PostViewCFG(PostViewUpdates);
571 DomTreeBuilder::ApplyUpdates(*this, PreViewCFG, &PostViewCFG);
572 }
573 }
574
575 /// Inform the dominator tree about a CFG edge insertion and update the tree.
576 ///
577 /// This function has to be called just before or just after making the update
578 /// on the actual CFG. There cannot be any other updates that the dominator
579 /// tree doesn't know about.
580 ///
581 /// Note that for postdominators it automatically takes care of inserting
582 /// a reverse edge internally (so there's no need to swap the parameters).
583 ///
584 void insertEdge(NodeT *From, NodeT *To) {
585 assert(From)((void)0);
586 assert(To)((void)0);
587 assert(From->getParent() == Parent)((void)0);
588 assert(To->getParent() == Parent)((void)0);
589 DomTreeBuilder::InsertEdge(*this, From, To);
590 }
591
592 /// Inform the dominator tree about a CFG edge deletion and update the tree.
593 ///
594 /// This function has to be called just after making the update on the actual
595 /// CFG. An internal functions checks if the edge doesn't exist in the CFG in
596 /// DEBUG mode. There cannot be any other updates that the
597 /// dominator tree doesn't know about.
598 ///
599 /// Note that for postdominators it automatically takes care of deleting
600 /// a reverse edge internally (so there's no need to swap the parameters).
601 ///
602 void deleteEdge(NodeT *From, NodeT *To) {
603 assert(From)((void)0);
604 assert(To)((void)0);
605 assert(From->getParent() == Parent)((void)0);
606 assert(To->getParent() == Parent)((void)0);
607 DomTreeBuilder::DeleteEdge(*this, From, To);
608 }
609
610 /// Add a new node to the dominator tree information.
611 ///
612 /// This creates a new node as a child of DomBB dominator node, linking it
613 /// into the children list of the immediate dominator.
614 ///
615 /// \param BB New node in CFG.
616 /// \param DomBB CFG node that is dominator for BB.
617 /// \returns New dominator tree node that represents new CFG node.
618 ///
619 DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
620 assert(getNode(BB) == nullptr && "Block already in dominator tree!")((void)0);
621 DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
622 assert(IDomNode && "Not immediate dominator specified for block!")((void)0);
623 DFSInfoValid = false;
624 return createChild(BB, IDomNode);
625 }
626
627 /// Add a new node to the forward dominator tree and make it a new root.
628 ///
629 /// \param BB New node in CFG.
630 /// \returns New dominator tree node that represents new CFG node.
631 ///
632 DomTreeNodeBase<NodeT> *setNewRoot(NodeT *BB) {
633 assert(getNode(BB) == nullptr && "Block already in dominator tree!")((void)0);
634 assert(!this->isPostDominator() &&((void)0)
635 "Cannot change root of post-dominator tree")((void)0);
636 DFSInfoValid = false;
637 DomTreeNodeBase<NodeT> *NewNode = createNode(BB);
638 if (Roots.empty()) {
639 addRoot(BB);
640 } else {
641 assert(Roots.size() == 1)((void)0);
642 NodeT *OldRoot = Roots.front();
643 auto &OldNode = DomTreeNodes[OldRoot];
644 OldNode = NewNode->addChild(std::move(DomTreeNodes[OldRoot]));
645 OldNode->IDom = NewNode;
646 OldNode->UpdateLevel();
647 Roots[0] = BB;
648 }
649 return RootNode = NewNode;
650 }
651
652 /// changeImmediateDominator - This method is used to update the dominator
653 /// tree information when a node's immediate dominator changes.
654 ///
655 void changeImmediateDominator(DomTreeNodeBase<NodeT> *N,
656 DomTreeNodeBase<NodeT> *NewIDom) {
657 assert(N && NewIDom && "Cannot change null node pointers!")((void)0);
658 DFSInfoValid = false;
659 N->setIDom(NewIDom);
660 }
661
662 void changeImmediateDominator(NodeT *BB, NodeT *NewBB) {
663 changeImmediateDominator(getNode(BB), getNode(NewBB));
664 }
665
666 /// eraseNode - Removes a node from the dominator tree. Block must not
667 /// dominate any other blocks. Removes node from its immediate dominator's
668 /// children list. Deletes dominator node associated with basic block BB.
669 void eraseNode(NodeT *BB) {
670 DomTreeNodeBase<NodeT> *Node = getNode(BB);
671 assert(Node && "Removing node that isn't in dominator tree.")((void)0);
672 assert(Node->isLeaf() && "Node is not a leaf node.")((void)0);
673
674 DFSInfoValid = false;
675
676 // Remove node from immediate dominator's children list.
677 DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
678 if (IDom) {
679 const auto I = find(IDom->Children, Node);
680 assert(I != IDom->Children.end() &&((void)0)
681 "Not in immediate dominator children set!")((void)0);
682 // I am no longer your child...
683 IDom->Children.erase(I);
684 }
685
686 DomTreeNodes.erase(BB);
687
688 if (!IsPostDom) return;
689
690 // Remember to update PostDominatorTree roots.
691 auto RIt = llvm::find(Roots, BB);
692 if (RIt != Roots.end()) {
693 std::swap(*RIt, Roots.back());
694 Roots.pop_back();
695 }
696 }
697
698 /// splitBlock - BB is split and now it has one successor. Update dominator
699 /// tree to reflect this change.
700 void splitBlock(NodeT *NewBB) {
701 if (IsPostDominator)
702 Split<Inverse<NodeT *>>(NewBB);
703 else
704 Split<NodeT *>(NewBB);
705 }
706
707 /// print - Convert to human readable form
708 ///
709 void print(raw_ostream &O) const {
710 O << "=============================--------------------------------\n";
711 if (IsPostDominator)
712 O << "Inorder PostDominator Tree: ";
713 else
714 O << "Inorder Dominator Tree: ";
715 if (!DFSInfoValid)
716 O << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
717 O << "\n";
718
719 // The postdom tree can have a null root if there are no returns.
720 if (getRootNode()) PrintDomTree<NodeT>(getRootNode(), O, 1);
721 O << "Roots: ";
722 for (const NodePtr Block : Roots) {
723 Block->printAsOperand(O, false);
724 O << " ";
725 }
726 O << "\n";
727 }
728
729public:
730 /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking
731 /// dominator tree in dfs order.
732 void updateDFSNumbers() const {
733 if (DFSInfoValid) {
734 SlowQueries = 0;
735 return;
736 }
737
738 SmallVector<std::pair<const DomTreeNodeBase<NodeT> *,
739 typename DomTreeNodeBase<NodeT>::const_iterator>,
740 32> WorkStack;
741
742 const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode();
743 assert((!Parent || ThisRoot) && "Empty constructed DomTree")((void)0);
744 if (!ThisRoot)
745 return;
746
747 // Both dominators and postdominators have a single root node. In the case
748 // case of PostDominatorTree, this node is a virtual root.
749 WorkStack.push_back({ThisRoot, ThisRoot->begin()});
750
751 unsigned DFSNum = 0;
752 ThisRoot->DFSNumIn = DFSNum++;
753
754 while (!WorkStack.empty()) {
755 const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first;
756 const auto ChildIt = WorkStack.back().second;
757
758 // If we visited all of the children of this node, "recurse" back up the
759 // stack setting the DFOutNum.
760 if (ChildIt == Node->end()) {
761 Node->DFSNumOut = DFSNum++;
762 WorkStack.pop_back();
763 } else {
764 // Otherwise, recursively visit this child.
765 const DomTreeNodeBase<NodeT> *Child = *ChildIt;
766 ++WorkStack.back().second;
767
768 WorkStack.push_back({Child, Child->begin()});
769 Child->DFSNumIn = DFSNum++;
770 }
771 }
772
773 SlowQueries = 0;
774 DFSInfoValid = true;
775 }
776
777 /// recalculate - compute a dominator tree for the given function
778 void recalculate(ParentType &Func) {
779 Parent = &Func;
780 DomTreeBuilder::Calculate(*this);
781 }
782
783 void recalculate(ParentType &Func, ArrayRef<UpdateType> Updates) {
784 Parent = &Func;
785 DomTreeBuilder::CalculateWithUpdates(*this, Updates);
786 }
787
788 /// verify - checks if the tree is correct. There are 3 level of verification:
789 /// - Full -- verifies if the tree is correct by making sure all the
790 /// properties (including the parent and the sibling property)
791 /// hold.
792 /// Takes O(N^3) time.
793 ///
794 /// - Basic -- checks if the tree is correct, but compares it to a freshly
795 /// constructed tree instead of checking the sibling property.
796 /// Takes O(N^2) time.
797 ///
798 /// - Fast -- checks basic tree structure and compares it with a freshly
799 /// constructed tree.
800 /// Takes O(N^2) time worst case, but is faster in practise (same
801 /// as tree construction).
802 bool verify(VerificationLevel VL = VerificationLevel::Full) const {
803 return DomTreeBuilder::Verify(*this, VL);
804 }
805
806 void reset() {
807 DomTreeNodes.clear();
808 Roots.clear();
809 RootNode = nullptr;
810 Parent = nullptr;
811 DFSInfoValid = false;
812 SlowQueries = 0;
813 }
814
815protected:
816 void addRoot(NodeT *BB) { this->Roots.push_back(BB); }
817
818 DomTreeNodeBase<NodeT> *createChild(NodeT *BB, DomTreeNodeBase<NodeT> *IDom) {
819 return (DomTreeNodes[BB] = IDom->addChild(
820 std::make_unique<DomTreeNodeBase<NodeT>>(BB, IDom)))
821 .get();
822 }
823
824 DomTreeNodeBase<NodeT> *createNode(NodeT *BB) {
825 return (DomTreeNodes[BB] =
826 std::make_unique<DomTreeNodeBase<NodeT>>(BB, nullptr))
827 .get();
828 }
829
830 // NewBB is split and now it has one successor. Update dominator tree to
831 // reflect this change.
832 template <class N>
833 void Split(typename GraphTraits<N>::NodeRef NewBB) {
834 using GraphT = GraphTraits<N>;
835 using NodeRef = typename GraphT::NodeRef;
836 assert(std::distance(GraphT::child_begin(NewBB),((void)0)
837 GraphT::child_end(NewBB)) == 1 &&((void)0)
838 "NewBB should have a single successor!")((void)0);
839 NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
840
841 SmallVector<NodeRef, 4> PredBlocks(children<Inverse<N>>(NewBB));
842
843 assert(!PredBlocks.empty() && "No predblocks?")((void)0);
844
845 bool NewBBDominatesNewBBSucc = true;
846 for (auto Pred : children<Inverse<N>>(NewBBSucc)) {
847 if (Pred != NewBB && !dominates(NewBBSucc, Pred) &&
848 isReachableFromEntry(Pred)) {
849 NewBBDominatesNewBBSucc = false;
850 break;
851 }
852 }
853
854 // Find NewBB's immediate dominator and create new dominator tree node for
855 // NewBB.
856 NodeT *NewBBIDom = nullptr;
857 unsigned i = 0;
858 for (i = 0; i < PredBlocks.size(); ++i)
859 if (isReachableFromEntry(PredBlocks[i])) {
860 NewBBIDom = PredBlocks[i];
861 break;
862 }
863
864 // It's possible that none of the predecessors of NewBB are reachable;
865 // in that case, NewBB itself is unreachable, so nothing needs to be
866 // changed.
867 if (!NewBBIDom) return;
868
869 for (i = i + 1; i < PredBlocks.size(); ++i) {
870 if (isReachableFromEntry(PredBlocks[i]))
871 NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
872 }
873
874 // Create the new dominator tree node... and set the idom of NewBB.
875 DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom);
876
877 // If NewBB strictly dominates other blocks, then it is now the immediate
878 // dominator of NewBBSucc. Update the dominator tree as appropriate.
879 if (NewBBDominatesNewBBSucc) {
880 DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc);
881 changeImmediateDominator(NewBBSuccNode, NewBBNode);
882 }
883 }
884
885 private:
886 bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
887 const DomTreeNodeBase<NodeT> *B) const {
888 assert(A != B)((void)0);
889 assert(isReachableFromEntry(B))((void)0);
890 assert(isReachableFromEntry(A))((void)0);
891
892 const unsigned ALevel = A->getLevel();
893 const DomTreeNodeBase<NodeT> *IDom;
894
895 // Don't walk nodes above A's subtree. When we reach A's level, we must
896 // either find A or be in some other subtree not dominated by A.
897 while ((IDom = B->getIDom()) != nullptr && IDom->getLevel() >= ALevel)
898 B = IDom; // Walk up the tree
899
900 return B == A;
901 }
902
903 /// Wipe this tree's state without releasing any resources.
904 ///
905 /// This is essentially a post-move helper only. It leaves the object in an
906 /// assignable and destroyable state, but otherwise invalid.
907 void wipe() {
908 DomTreeNodes.clear();
909 RootNode = nullptr;
910 Parent = nullptr;
911 }
912};
913
914template <typename T>
915using DomTreeBase = DominatorTreeBase<T, false>;
916
917template <typename T>
918using PostDomTreeBase = DominatorTreeBase<T, true>;
919
920// These two functions are declared out of line as a workaround for building
921// with old (< r147295) versions of clang because of pr11642.
922template <typename NodeT, bool IsPostDom>
923bool DominatorTreeBase<NodeT, IsPostDom>::dominates(const NodeT *A,
924 const NodeT *B) const {
925 if (A == B)
926 return true;
927
928 // Cast away the const qualifiers here. This is ok since
929 // this function doesn't actually return the values returned
930 // from getNode.
931 return dominates(getNode(const_cast<NodeT *>(A)),
932 getNode(const_cast<NodeT *>(B)));
933}
934template <typename NodeT, bool IsPostDom>
935bool DominatorTreeBase<NodeT, IsPostDom>::properlyDominates(
936 const NodeT *A, const NodeT *B) const {
937 if (A == B)
938 return false;
939
940 // Cast away the const qualifiers here. This is ok since
941 // this function doesn't actually return the values returned
942 // from getNode.
943 return dominates(getNode(const_cast<NodeT *>(A)),
944 getNode(const_cast<NodeT *>(B)));
945}
946
947} // end namespace llvm
948
949#endif // LLVM_SUPPORT_GENERICDOMTREE_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT/DenseMap.h

1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the DenseMap class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_DENSEMAP_H
14#define LLVM_ADT_DENSEMAP_H
15
16#include "llvm/ADT/DenseMapInfo.h"
17#include "llvm/ADT/EpochTracker.h"
18#include "llvm/Support/AlignOf.h"
19#include "llvm/Support/Compiler.h"
20#include "llvm/Support/MathExtras.h"
21#include "llvm/Support/MemAlloc.h"
22#include "llvm/Support/ReverseIteration.h"
23#include "llvm/Support/type_traits.h"
24#include <algorithm>
25#include <cassert>
26#include <cstddef>
27#include <cstring>
28#include <initializer_list>
29#include <iterator>
30#include <new>
31#include <type_traits>
32#include <utility>
33
34namespace llvm {
35
36namespace detail {
37
38// We extend a pair to allow users to override the bucket type with their own
39// implementation without requiring two members.
40template <typename KeyT, typename ValueT>
41struct DenseMapPair : public std::pair<KeyT, ValueT> {
42 using std::pair<KeyT, ValueT>::pair;
43
44 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
45 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
46 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
47 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
48};
49
50} // end namespace detail
51
52template <typename KeyT, typename ValueT,
53 typename KeyInfoT = DenseMapInfo<KeyT>,
54 typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
55 bool IsConst = false>
56class DenseMapIterator;
57
58template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
59 typename BucketT>
60class DenseMapBase : public DebugEpochBase {
61 template <typename T>
62 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
63
64public:
65 using size_type = unsigned;
66 using key_type = KeyT;
67 using mapped_type = ValueT;
68 using value_type = BucketT;
69
70 using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
71 using const_iterator =
72 DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
73
74 inline iterator begin() {
75 // When the map is empty, avoid the overhead of advancing/retreating past
76 // empty buckets.
77 if (empty())
78 return end();
79 if (shouldReverseIterate<KeyT>())
80 return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
81 return makeIterator(getBuckets(), getBucketsEnd(), *this);
82 }
83 inline iterator end() {
84 return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
85 }
86 inline const_iterator begin() const {
87 if (empty())
88 return end();
89 if (shouldReverseIterate<KeyT>())
90 return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
91 return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
92 }
93 inline const_iterator end() const {
94 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
95 }
96
97 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const {
98 return getNumEntries() == 0;
99 }
100 unsigned size() const { return getNumEntries(); }
101
102 /// Grow the densemap so that it can contain at least \p NumEntries items
103 /// before resizing again.
104 void reserve(size_type NumEntries) {
105 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
106 incrementEpoch();
107 if (NumBuckets > getNumBuckets())
108 grow(NumBuckets);
109 }
110
111 void clear() {
112 incrementEpoch();
113 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
114
115 // If the capacity of the array is huge, and the # elements used is small,
116 // shrink the array.
117 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
118 shrink_and_clear();
119 return;
120 }
121
122 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
123 if (std::is_trivially_destructible<ValueT>::value) {
124 // Use a simpler loop when values don't need destruction.
125 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
126 P->getFirst() = EmptyKey;
127 } else {
128 unsigned NumEntries = getNumEntries();
129 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
130 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
131 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
132 P->getSecond().~ValueT();
133 --NumEntries;
134 }
135 P->getFirst() = EmptyKey;
136 }
137 }
138 assert(NumEntries == 0 && "Node count imbalance!")((void)0);
139 }
140 setNumEntries(0);
141 setNumTombstones(0);
142 }
143
144 /// Return 1 if the specified key is in the map, 0 otherwise.
145 size_type count(const_arg_type_t<KeyT> Val) const {
146 const BucketT *TheBucket;
147 return LookupBucketFor(Val, TheBucket) ? 1 : 0;
148 }
149
150 iterator find(const_arg_type_t<KeyT> Val) {
151 BucketT *TheBucket;
152 if (LookupBucketFor(Val, TheBucket))
153 return makeIterator(TheBucket,
154 shouldReverseIterate<KeyT>() ? getBuckets()
155 : getBucketsEnd(),
156 *this, true);
157 return end();
158 }
159 const_iterator find(const_arg_type_t<KeyT> Val) const {
160 const BucketT *TheBucket;
161 if (LookupBucketFor(Val, TheBucket))
162 return makeConstIterator(TheBucket,
163 shouldReverseIterate<KeyT>() ? getBuckets()
164 : getBucketsEnd(),
165 *this, true);
166 return end();
167 }
168
169 /// Alternate version of find() which allows a different, and possibly
170 /// less expensive, key type.
171 /// The DenseMapInfo is responsible for supplying methods
172 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
173 /// type used.
174 template<class LookupKeyT>
175 iterator find_as(const LookupKeyT &Val) {
176 BucketT *TheBucket;
177 if (LookupBucketFor(Val, TheBucket))
178 return makeIterator(TheBucket,
179 shouldReverseIterate<KeyT>() ? getBuckets()
180 : getBucketsEnd(),
181 *this, true);
182 return end();
183 }
184 template<class LookupKeyT>
185 const_iterator find_as(const LookupKeyT &Val) const {
186 const BucketT *TheBucket;
187 if (LookupBucketFor(Val, TheBucket))
188 return makeConstIterator(TheBucket,
189 shouldReverseIterate<KeyT>() ? getBuckets()
190 : getBucketsEnd(),
191 *this, true);
192 return end();
193 }
194
195 /// lookup - Return the entry for the specified key, or a default
196 /// constructed value if no such entry exists.
197 ValueT lookup(const_arg_type_t<KeyT> Val) const {
198 const BucketT *TheBucket;
199 if (LookupBucketFor(Val, TheBucket))
200 return TheBucket->getSecond();
201 return ValueT();
202 }
203
204 // Inserts key,value pair into the map if the key isn't already in the map.
205 // If the key is already in the map, it returns false and doesn't update the
206 // value.
207 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
208 return try_emplace(KV.first, KV.second);
209 }
210
211 // Inserts key,value pair into the map if the key isn't already in the map.
212 // If the key is already in the map, it returns false and doesn't update the
213 // value.
214 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
215 return try_emplace(std::move(KV.first), std::move(KV.second));
216 }
217
218 // Inserts key,value pair into the map if the key isn't already in the map.
219 // The value is constructed in-place if the key is not in the map, otherwise
220 // it is not moved.
221 template <typename... Ts>
222 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
223 BucketT *TheBucket;
224 if (LookupBucketFor(Key, TheBucket))
225 return std::make_pair(makeIterator(TheBucket,
226 shouldReverseIterate<KeyT>()
227 ? getBuckets()
228 : getBucketsEnd(),
229 *this, true),
230 false); // Already in map.
231
232 // Otherwise, insert the new element.
233 TheBucket =
234 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
235 return std::make_pair(makeIterator(TheBucket,
236 shouldReverseIterate<KeyT>()
237 ? getBuckets()
238 : getBucketsEnd(),
239 *this, true),
240 true);
241 }
242
243 // Inserts key,value pair into the map if the key isn't already in the map.
244 // The value is constructed in-place if the key is not in the map, otherwise
245 // it is not moved.
246 template <typename... Ts>
247 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
248 BucketT *TheBucket;
249 if (LookupBucketFor(Key, TheBucket))
250 return std::make_pair(makeIterator(TheBucket,
251 shouldReverseIterate<KeyT>()
252 ? getBuckets()
253 : getBucketsEnd(),
254 *this, true),
255 false); // Already in map.
256
257 // Otherwise, insert the new element.
258 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
259 return std::make_pair(makeIterator(TheBucket,
260 shouldReverseIterate<KeyT>()
261 ? getBuckets()
262 : getBucketsEnd(),
263 *this, true),
264 true);
265 }
266
267 /// Alternate version of insert() which allows a different, and possibly
268 /// less expensive, key type.
269 /// The DenseMapInfo is responsible for supplying methods
270 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
271 /// type used.
272 template <typename LookupKeyT>
273 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
274 const LookupKeyT &Val) {
275 BucketT *TheBucket;
276 if (LookupBucketFor(Val, TheBucket))
277 return std::make_pair(makeIterator(TheBucket,
278 shouldReverseIterate<KeyT>()
279 ? getBuckets()
280 : getBucketsEnd(),
281 *this, true),
282 false); // Already in map.
283
284 // Otherwise, insert the new element.
285 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
286 std::move(KV.second), Val);
287 return std::make_pair(makeIterator(TheBucket,
288 shouldReverseIterate<KeyT>()
289 ? getBuckets()
290 : getBucketsEnd(),
291 *this, true),
292 true);
293 }
294
295 /// insert - Range insertion of pairs.
296 template<typename InputIt>
297 void insert(InputIt I, InputIt E) {
298 for (; I != E; ++I)
299 insert(*I);
300 }
301
302 bool erase(const KeyT &Val) {
303 BucketT *TheBucket;
304 if (!LookupBucketFor(Val, TheBucket))
305 return false; // not in map.
306
307 TheBucket->getSecond().~ValueT();
308 TheBucket->getFirst() = getTombstoneKey();
309 decrementNumEntries();
310 incrementNumTombstones();
311 return true;
312 }
313 void erase(iterator I) {
314 BucketT *TheBucket = &*I;
315 TheBucket->getSecond().~ValueT();
316 TheBucket->getFirst() = getTombstoneKey();
317 decrementNumEntries();
318 incrementNumTombstones();
319 }
320
321 value_type& FindAndConstruct(const KeyT &Key) {
322 BucketT *TheBucket;
323 if (LookupBucketFor(Key, TheBucket))
324 return *TheBucket;
325
326 return *InsertIntoBucket(TheBucket, Key);
327 }
328
329 ValueT &operator[](const KeyT &Key) {
330 return FindAndConstruct(Key).second;
331 }
332
333 value_type& FindAndConstruct(KeyT &&Key) {
334 BucketT *TheBucket;
335 if (LookupBucketFor(Key, TheBucket))
336 return *TheBucket;
337
338 return *InsertIntoBucket(TheBucket, std::move(Key));
339 }
340
341 ValueT &operator[](KeyT &&Key) {
342 return FindAndConstruct(std::move(Key)).second;
343 }
344
345 /// isPointerIntoBucketsArray - Return true if the specified pointer points
346 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
347 /// value in the DenseMap).
348 bool isPointerIntoBucketsArray(const void *Ptr) const {
349 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
350 }
351
352 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
353 /// array. In conjunction with the previous method, this can be used to
354 /// determine whether an insertion caused the DenseMap to reallocate.
355 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
356
357protected:
358 DenseMapBase() = default;
359
360 void destroyAll() {
361 if (getNumBuckets() == 0) // Nothing to do.
362 return;
363
364 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
365 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
366 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
367 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
368 P->getSecond().~ValueT();
369 P->getFirst().~KeyT();
370 }
371 }
372
373 void initEmpty() {
374 setNumEntries(0);
375 setNumTombstones(0);
376
377 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&((void)0)
378 "# initial buckets must be a power of two!")((void)0);
379 const KeyT EmptyKey = getEmptyKey();
380 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
381 ::new (&B->getFirst()) KeyT(EmptyKey);
382 }
383
384 /// Returns the number of buckets to allocate to ensure that the DenseMap can
385 /// accommodate \p NumEntries without need to grow().
386 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
387 // Ensure that "NumEntries * 4 < NumBuckets * 3"
388 if (NumEntries == 0)
389 return 0;
390 // +1 is required because of the strict equality.
391 // For example if NumEntries is 48, we need to return 401.
392 return NextPowerOf2(NumEntries * 4 / 3 + 1);
393 }
394
395 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
396 initEmpty();
397
398 // Insert all the old elements.
399 const KeyT EmptyKey = getEmptyKey();
400 const KeyT TombstoneKey = getTombstoneKey();
401 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
402 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
403 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
404 // Insert the key/value into the new table.
405 BucketT *DestBucket;
406 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
407 (void)FoundVal; // silence warning.
408 assert(!FoundVal && "Key already in new map?")((void)0);
409 DestBucket->getFirst() = std::move(B->getFirst());
410 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
411 incrementNumEntries();
412
413 // Free the value.
414 B->getSecond().~ValueT();
415 }
416 B->getFirst().~KeyT();
417 }
418 }
419
420 template <typename OtherBaseT>
421 void copyFrom(
422 const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
423 assert(&other != this)((void)0);
424 assert(getNumBuckets() == other.getNumBuckets())((void)0);
425
426 setNumEntries(other.getNumEntries());
427 setNumTombstones(other.getNumTombstones());
428
429 if (std::is_trivially_copyable<KeyT>::value &&
430 std::is_trivially_copyable<ValueT>::value)
431 memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
432 getNumBuckets() * sizeof(BucketT));
433 else
434 for (size_t i = 0; i < getNumBuckets(); ++i) {
435 ::new (&getBuckets()[i].getFirst())
436 KeyT(other.getBuckets()[i].getFirst());
437 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
438 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
439 ::new (&getBuckets()[i].getSecond())
440 ValueT(other.getBuckets()[i].getSecond());
441 }
442 }
443
444 static unsigned getHashValue(const KeyT &Val) {
445 return KeyInfoT::getHashValue(Val);
446 }
447
448 template<typename LookupKeyT>
449 static unsigned getHashValue(const LookupKeyT &Val) {
450 return KeyInfoT::getHashValue(Val);
451 }
452
453 static const KeyT getEmptyKey() {
454 static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
455 "Must pass the derived type to this template!");
456 return KeyInfoT::getEmptyKey();
457 }
458
459 static const KeyT getTombstoneKey() {
460 return KeyInfoT::getTombstoneKey();
461 }
462
463private:
464 iterator makeIterator(BucketT *P, BucketT *E,
465 DebugEpochBase &Epoch,
466 bool NoAdvance=false) {
467 if (shouldReverseIterate<KeyT>()) {
468 BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
469 return iterator(B, E, Epoch, NoAdvance);
470 }
471 return iterator(P, E, Epoch, NoAdvance);
472 }
473
474 const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
475 const DebugEpochBase &Epoch,
476 const bool NoAdvance=false) const {
477 if (shouldReverseIterate<KeyT>()) {
478 const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
479 return const_iterator(B, E, Epoch, NoAdvance);
480 }
481 return const_iterator(P, E, Epoch, NoAdvance);
482 }
483
484 unsigned getNumEntries() const {
485 return static_cast<const DerivedT *>(this)->getNumEntries();
486 }
487
488 void setNumEntries(unsigned Num) {
489 static_cast<DerivedT *>(this)->setNumEntries(Num);
490 }
491
492 void incrementNumEntries() {
493 setNumEntries(getNumEntries() + 1);
494 }
495
496 void decrementNumEntries() {
497 setNumEntries(getNumEntries() - 1);
498 }
499
500 unsigned getNumTombstones() const {
501 return static_cast<const DerivedT *>(this)->getNumTombstones();
502 }
503
504 void setNumTombstones(unsigned Num) {
505 static_cast<DerivedT *>(this)->setNumTombstones(Num);
506 }
507
508 void incrementNumTombstones() {
509 setNumTombstones(getNumTombstones() + 1);
510 }
511
512 void decrementNumTombstones() {
513 setNumTombstones(getNumTombstones() - 1);
514 }
515
516 const BucketT *getBuckets() const {
517 return static_cast<const DerivedT *>(this)->getBuckets();
518 }
519
520 BucketT *getBuckets() {
521 return static_cast<DerivedT *>(this)->getBuckets();
522 }
523
524 unsigned getNumBuckets() const {
525 return static_cast<const DerivedT *>(this)->getNumBuckets();
526 }
527
528 BucketT *getBucketsEnd() {
529 return getBuckets() + getNumBuckets();
530 }
531
532 const BucketT *getBucketsEnd() const {
533 return getBuckets() + getNumBuckets();
534 }
535
536 void grow(unsigned AtLeast) {
537 static_cast<DerivedT *>(this)->grow(AtLeast);
538 }
539
540 void shrink_and_clear() {
541 static_cast<DerivedT *>(this)->shrink_and_clear();
542 }
543
544 template <typename KeyArg, typename... ValueArgs>
545 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
546 ValueArgs &&... Values) {
547 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
548
549 TheBucket->getFirst() = std::forward<KeyArg>(Key);
550 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
551 return TheBucket;
552 }
553
554 template <typename LookupKeyT>
555 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
556 ValueT &&Value, LookupKeyT &Lookup) {
557 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
558
559 TheBucket->getFirst() = std::move(Key);
560 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
561 return TheBucket;
562 }
563
564 template <typename LookupKeyT>
565 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
566 BucketT *TheBucket) {
567 incrementEpoch();
568
569 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
570 // the buckets are empty (meaning that many are filled with tombstones),
571 // grow the table.
572 //
573 // The later case is tricky. For example, if we had one empty bucket with
574 // tons of tombstones, failing lookups (e.g. for insertion) would have to
575 // probe almost the entire table until it found the empty bucket. If the
576 // table completely filled with tombstones, no lookup would ever succeed,
577 // causing infinite loops in lookup.
578 unsigned NewNumEntries = getNumEntries() + 1;
579 unsigned NumBuckets = getNumBuckets();
580 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)__builtin_expect((bool)(NewNumEntries * 4 >= NumBuckets * 3
), false)
) {
581 this->grow(NumBuckets * 2);
582 LookupBucketFor(Lookup, TheBucket);
583 NumBuckets = getNumBuckets();
584 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=__builtin_expect((bool)(NumBuckets-(NewNumEntries+getNumTombstones
()) <= NumBuckets/8), false)
585 NumBuckets/8)__builtin_expect((bool)(NumBuckets-(NewNumEntries+getNumTombstones
()) <= NumBuckets/8), false)
) {
586 this->grow(NumBuckets);
587 LookupBucketFor(Lookup, TheBucket);
588 }
589 assert(TheBucket)((void)0);
590
591 // Only update the state after we've grown our bucket space appropriately
592 // so that when growing buckets we have self-consistent entry count.
593 incrementNumEntries();
594
595 // If we are writing over a tombstone, remember this.
596 const KeyT EmptyKey = getEmptyKey();
597 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
598 decrementNumTombstones();
599
600 return TheBucket;
601 }
602
603 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
604 /// FoundBucket. If the bucket contains the key and a value, this returns
605 /// true, otherwise it returns a bucket with an empty marker or tombstone and
606 /// returns false.
607 template<typename LookupKeyT>
608 bool LookupBucketFor(const LookupKeyT &Val,
609 const BucketT *&FoundBucket) const {
610 const BucketT *BucketsPtr = getBuckets();
611 const unsigned NumBuckets = getNumBuckets();
612
613 if (NumBuckets == 0) {
614 FoundBucket = nullptr;
615 return false;
616 }
617
618 // FoundTombstone - Keep track of whether we find a tombstone while probing.
619 const BucketT *FoundTombstone = nullptr;
620 const KeyT EmptyKey = getEmptyKey();
621 const KeyT TombstoneKey = getTombstoneKey();
622 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&((void)0)
623 !KeyInfoT::isEqual(Val, TombstoneKey) &&((void)0)
624 "Empty/Tombstone value shouldn't be inserted into map!")((void)0);
625
626 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
627 unsigned ProbeAmt = 1;
628 while (true) {
629 const BucketT *ThisBucket = BucketsPtr + BucketNo;
630 // Found Val's bucket? If so, return it.
631 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))__builtin_expect((bool)(KeyInfoT::isEqual(Val, ThisBucket->
getFirst())), true)
) {
632 FoundBucket = ThisBucket;
633 return true;
634 }
635
636 // If we found an empty bucket, the key doesn't exist in the set.
637 // Insert it and return the default value.
638 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))__builtin_expect((bool)(KeyInfoT::isEqual(ThisBucket->getFirst
(), EmptyKey)), true)
) {
639 // If we've already seen a tombstone while probing, fill it in instead
640 // of the empty bucket we eventually probed to.
641 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
642 return false;
643 }
644
645 // If this is a tombstone, remember it. If Val ends up not in the map, we
646 // prefer to return it than something that would require more probing.
647 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
648 !FoundTombstone)
649 FoundTombstone = ThisBucket; // Remember the first tombstone found.
650
651 // Otherwise, it's a hash collision or a tombstone, continue quadratic
652 // probing.
653 BucketNo += ProbeAmt++;
654 BucketNo &= (NumBuckets-1);
655 }
656 }
657
658 template <typename LookupKeyT>
659 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
660 const BucketT *ConstFoundBucket;
661 bool Result = const_cast<const DenseMapBase *>(this)
662 ->LookupBucketFor(Val, ConstFoundBucket);
663 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
664 return Result;
665 }
666
667public:
668 /// Return the approximate size (in bytes) of the actual map.
669 /// This is just the raw memory used by DenseMap.
670 /// If entries are pointers to objects, the size of the referenced objects
671 /// are not included.
672 size_t getMemorySize() const {
673 return getNumBuckets() * sizeof(BucketT);
674 }
675};
676
677/// Equality comparison for DenseMap.
678///
679/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
680/// is also in RHS, and that no additional pairs are in RHS.
681/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
682/// complexity is linear, worst case is O(N^2) (if every hash collides).
683template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
684 typename BucketT>
685bool operator==(
686 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
687 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
688 if (LHS.size() != RHS.size())
689 return false;
690
691 for (auto &KV : LHS) {
692 auto I = RHS.find(KV.first);
693 if (I == RHS.end() || I->second != KV.second)
694 return false;
695 }
696
697 return true;
698}
699
700/// Inequality comparison for DenseMap.
701///
702/// Equivalent to !(LHS == RHS). See operator== for performance notes.
703template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
704 typename BucketT>
705bool operator!=(
706 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
707 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
708 return !(LHS == RHS);
709}
710
711template <typename KeyT, typename ValueT,
712 typename KeyInfoT = DenseMapInfo<KeyT>,
713 typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
714class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
715 KeyT, ValueT, KeyInfoT, BucketT> {
716 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
717
718 // Lift some types from the dependent base class into this class for
719 // simplicity of referring to them.
720 using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
721
722 BucketT *Buckets;
723 unsigned NumEntries;
724 unsigned NumTombstones;
725 unsigned NumBuckets;
726
727public:
728 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
729 /// this number of elements can be inserted in the map without grow()
730 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
731
732 DenseMap(const DenseMap &other) : BaseT() {
733 init(0);
734 copyFrom(other);
735 }
736
737 DenseMap(DenseMap &&other) : BaseT() {
738 init(0);
739 swap(other);
740 }
741
742 template<typename InputIt>
743 DenseMap(const InputIt &I, const InputIt &E) {
744 init(std::distance(I, E));
745 this->insert(I, E);
746 }
747
748 DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
749 init(Vals.size());
750 this->insert(Vals.begin(), Vals.end());
751 }
752
753 ~DenseMap() {
754 this->destroyAll();
755 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
756 }
757
758 void swap(DenseMap& RHS) {
759 this->incrementEpoch();
760 RHS.incrementEpoch();
761 std::swap(Buckets, RHS.Buckets);
762 std::swap(NumEntries, RHS.NumEntries);
763 std::swap(NumTombstones, RHS.NumTombstones);
764 std::swap(NumBuckets, RHS.NumBuckets);
765 }
766
767 DenseMap& operator=(const DenseMap& other) {
768 if (&other != this)
769 copyFrom(other);
770 return *this;
771 }
772
773 DenseMap& operator=(DenseMap &&other) {
774 this->destroyAll();
775 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
776 init(0);
777 swap(other);
778 return *this;
779 }
780
781 void copyFrom(const DenseMap& other) {
782 this->destroyAll();
783 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
784 if (allocateBuckets(other.NumBuckets)) {
785 this->BaseT::copyFrom(other);
786 } else {
787 NumEntries = 0;
788 NumTombstones = 0;
789 }
790 }
791
792 void init(unsigned InitNumEntries) {
793 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
794 if (allocateBuckets(InitBuckets)) {
795 this->BaseT::initEmpty();
796 } else {
797 NumEntries = 0;
798 NumTombstones = 0;
799 }
800 }
801
802 void grow(unsigned AtLeast) {
803 unsigned OldNumBuckets = NumBuckets;
804 BucketT *OldBuckets = Buckets;
805
806 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
807 assert(Buckets)((void)0);
808 if (!OldBuckets) {
809 this->BaseT::initEmpty();
810 return;
811 }
812
813 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
814
815 // Free the old table.
816 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
817 alignof(BucketT));
818 }
819
820 void shrink_and_clear() {
821 unsigned OldNumBuckets = NumBuckets;
822 unsigned OldNumEntries = NumEntries;
823 this->destroyAll();
824
825 // Reduce the number of buckets.
826 unsigned NewNumBuckets = 0;
827 if (OldNumEntries)
828 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
829 if (NewNumBuckets == NumBuckets) {
830 this->BaseT::initEmpty();
831 return;
832 }
833
834 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
835 alignof(BucketT));
836 init(NewNumBuckets);
837 }
838
839private:
840 unsigned getNumEntries() const {
841 return NumEntries;
842 }
843
844 void setNumEntries(unsigned Num) {
845 NumEntries = Num;
846 }
847
848 unsigned getNumTombstones() const {
849 return NumTombstones;
850 }
851
852 void setNumTombstones(unsigned Num) {
853 NumTombstones = Num;
854 }
855
856 BucketT *getBuckets() const {
857 return Buckets;
858 }
859
860 unsigned getNumBuckets() const {
861 return NumBuckets;
862 }
863
864 bool allocateBuckets(unsigned Num) {
865 NumBuckets = Num;
866 if (NumBuckets == 0) {
867 Buckets = nullptr;
868 return false;
869 }
870
871 Buckets = static_cast<BucketT *>(
872 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
873 return true;
874 }
875};
876
877template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
878 typename KeyInfoT = DenseMapInfo<KeyT>,
879 typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
880class SmallDenseMap
881 : public DenseMapBase<
882 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
883 ValueT, KeyInfoT, BucketT> {
884 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
885
886 // Lift some types from the dependent base class into this class for
887 // simplicity of referring to them.
888 using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
889
890 static_assert(isPowerOf2_64(InlineBuckets),
891 "InlineBuckets must be a power of 2.");
892
893 unsigned Small : 1;
894 unsigned NumEntries : 31;
895 unsigned NumTombstones;
896
897 struct LargeRep {
898 BucketT *Buckets;
899 unsigned NumBuckets;
900 };
901
902 /// A "union" of an inline bucket array and the struct representing
903 /// a large bucket. This union will be discriminated by the 'Small' bit.
904 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
905
906public:
907 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
908 init(NumInitBuckets);
909 }
910
911 SmallDenseMap(const SmallDenseMap &other) : BaseT() {
912 init(0);
913 copyFrom(other);
914 }
915
916 SmallDenseMap(SmallDenseMap &&other) : BaseT() {
917 init(0);
918 swap(other);
919 }
920
921 template<typename InputIt>
922 SmallDenseMap(const InputIt &I, const InputIt &E) {
923 init(NextPowerOf2(std::distance(I, E)));
924 this->insert(I, E);
925 }
926
927 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
928 : SmallDenseMap(Vals.begin(), Vals.end()) {}
929
930 ~SmallDenseMap() {
931 this->destroyAll();
932 deallocateBuckets();
933 }
934
935 void swap(SmallDenseMap& RHS) {
936 unsigned TmpNumEntries = RHS.NumEntries;
937 RHS.NumEntries = NumEntries;
938 NumEntries = TmpNumEntries;
939 std::swap(NumTombstones, RHS.NumTombstones);
940
941 const KeyT EmptyKey = this->getEmptyKey();
942 const KeyT TombstoneKey = this->getTombstoneKey();
943 if (Small && RHS.Small) {
944 // If we're swapping inline bucket arrays, we have to cope with some of
945 // the tricky bits of DenseMap's storage system: the buckets are not
946 // fully initialized. Thus we swap every key, but we may have
947 // a one-directional move of the value.
948 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
949 BucketT *LHSB = &getInlineBuckets()[i],
950 *RHSB = &RHS.getInlineBuckets()[i];
951 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
952 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
953 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
954 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
955 if (hasLHSValue && hasRHSValue) {
956 // Swap together if we can...
957 std::swap(*LHSB, *RHSB);
958 continue;
959 }
960 // Swap separately and handle any asymmetry.
961 std::swap(LHSB->getFirst(), RHSB->getFirst());
962 if (hasLHSValue) {
963 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
964 LHSB->getSecond().~ValueT();
965 } else if (hasRHSValue) {
966 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
967 RHSB->getSecond().~ValueT();
968 }
969 }
970 return;
971 }
972 if (!Small && !RHS.Small) {
973 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
974 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
975 return;
976 }
977
978 SmallDenseMap &SmallSide = Small ? *this : RHS;
979 SmallDenseMap &LargeSide = Small ? RHS : *this;
980
981 // First stash the large side's rep and move the small side across.
982 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
983 LargeSide.getLargeRep()->~LargeRep();
984 LargeSide.Small = true;
985 // This is similar to the standard move-from-old-buckets, but the bucket
986 // count hasn't actually rotated in this case. So we have to carefully
987 // move construct the keys and values into their new locations, but there
988 // is no need to re-hash things.
989 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
990 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
991 *OldB = &SmallSide.getInlineBuckets()[i];
992 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
993 OldB->getFirst().~KeyT();
994 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
995 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
996 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
997 OldB->getSecond().~ValueT();
998 }
999 }
1000
1001 // The hard part of moving the small buckets across is done, just move
1002 // the TmpRep into its new home.
1003 SmallSide.Small = false;
1004 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1005 }
1006
1007 SmallDenseMap& operator=(const SmallDenseMap& other) {
1008 if (&other != this)
1009 copyFrom(other);
1010 return *this;
1011 }
1012
1013 SmallDenseMap& operator=(SmallDenseMap &&other) {
1014 this->destroyAll();
1015 deallocateBuckets();
1016 init(0);
1017 swap(other);
1018 return *this;
1019 }
1020
1021 void copyFrom(const SmallDenseMap& other) {
1022 this->destroyAll();
1023 deallocateBuckets();
1024 Small = true;
1025 if (other.getNumBuckets() > InlineBuckets) {
1026 Small = false;
1027 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1028 }
1029 this->BaseT::copyFrom(other);
1030 }
1031
1032 void init(unsigned InitBuckets) {
1033 Small = true;
1034 if (InitBuckets > InlineBuckets) {
1035 Small = false;
1036 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1037 }
1038 this->BaseT::initEmpty();
1039 }
1040
1041 void grow(unsigned AtLeast) {
1042 if (AtLeast > InlineBuckets)
1043 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1044
1045 if (Small) {
1046 // First move the inline buckets into a temporary storage.
1047 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1048 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1049 BucketT *TmpEnd = TmpBegin;
1050
1051 // Loop over the buckets, moving non-empty, non-tombstones into the
1052 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1053 const KeyT EmptyKey = this->getEmptyKey();
1054 const KeyT TombstoneKey = this->getTombstoneKey();
1055 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1056 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1057 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1058 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&((void)0)
1059 "Too many inline buckets!")((void)0);
1060 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1061 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1062 ++TmpEnd;
1063 P->getSecond().~ValueT();
1064 }
1065 P->getFirst().~KeyT();
1066 }
1067
1068 // AtLeast == InlineBuckets can happen if there are many tombstones,
1069 // and grow() is used to remove them. Usually we always switch to the
1070 // large rep here.
1071 if (AtLeast > InlineBuckets) {
1072 Small = false;
1073 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1074 }
1075 this->moveFromOldBuckets(TmpBegin, TmpEnd);
1076 return;
1077 }
1078
1079 LargeRep OldRep = std::move(*getLargeRep());
1080 getLargeRep()->~LargeRep();
1081 if (AtLeast <= InlineBuckets) {
1082 Small = true;
1083 } else {
1084 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1085 }
1086
1087 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1088
1089 // Free the old table.
1090 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1091 alignof(BucketT));
1092 }
1093
1094 void shrink_and_clear() {
1095 unsigned OldSize = this->size();
1096 this->destroyAll();
1097
1098 // Reduce the number of buckets.
1099 unsigned NewNumBuckets = 0;
1100 if (OldSize) {
1101 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1102 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1103 NewNumBuckets = 64;
1104 }
1105 if ((Small && NewNumBuckets <= InlineBuckets) ||
1106 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1107 this->BaseT::initEmpty();
1108 return;
1109 }
1110
1111 deallocateBuckets();
1112 init(NewNumBuckets);
1113 }
1114
1115private:
1116 unsigned getNumEntries() const {
1117 return NumEntries;
1118 }
1119
1120 void setNumEntries(unsigned Num) {
1121 // NumEntries is hardcoded to be 31 bits wide.
1122 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries")((void)0);
1123 NumEntries = Num;
1124 }
1125
1126 unsigned getNumTombstones() const {
1127 return NumTombstones;
1128 }
1129
1130 void setNumTombstones(unsigned Num) {
1131 NumTombstones = Num;
1132 }
1133
1134 const BucketT *getInlineBuckets() const {
1135 assert(Small)((void)0);
1136 // Note that this cast does not violate aliasing rules as we assert that
1137 // the memory's dynamic type is the small, inline bucket buffer, and the
1138 // 'storage' is a POD containing a char buffer.
1139 return reinterpret_cast<const BucketT *>(&storage);
1140 }
1141
1142 BucketT *getInlineBuckets() {
1143 return const_cast<BucketT *>(
1144 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1145 }
1146
1147 const LargeRep *getLargeRep() const {
1148 assert(!Small)((void)0);
1149 // Note, same rule about aliasing as with getInlineBuckets.
1150 return reinterpret_cast<const LargeRep *>(&storage);
1151 }
1152
1153 LargeRep *getLargeRep() {
1154 return const_cast<LargeRep *>(
1155 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1156 }
1157
1158 const BucketT *getBuckets() const {
1159 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1160 }
1161
1162 BucketT *getBuckets() {
1163 return const_cast<BucketT *>(
1164 const_cast<const SmallDenseMap *>(this)->getBuckets());
1165 }
1166
1167 unsigned getNumBuckets() const {
1168 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1169 }
1170
1171 void deallocateBuckets() {
1172 if (Small)
1173 return;
1174
1175 deallocate_buffer(getLargeRep()->Buckets,
1176 sizeof(BucketT) * getLargeRep()->NumBuckets,
1177 alignof(BucketT));
1178 getLargeRep()->~LargeRep();
1179 }
1180
1181 LargeRep allocateBuckets(unsigned Num) {
1182 assert(Num > InlineBuckets && "Must allocate more buckets than are inline")((void)0);
1183 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1184 sizeof(BucketT) * Num, alignof(BucketT))),
1185 Num};
1186 return Rep;
1187 }
1188};
1189
1190template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1191 bool IsConst>
1192class DenseMapIterator : DebugEpochBase::HandleBase {
1193 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1194 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1195
1196public:
1197 using difference_type = ptrdiff_t;
1198 using value_type =
1199 typename std::conditional<IsConst, const Bucket, Bucket>::type;
1200 using pointer = value_type *;
1201 using reference = value_type &;
1202 using iterator_category = std::forward_iterator_tag;
1203
1204private:
1205 pointer Ptr = nullptr;
1206 pointer End = nullptr;
1207
1208public:
1209 DenseMapIterator() = default;
1210
1211 DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1212 bool NoAdvance = false)
1213 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1214 assert(isHandleInSync() && "invalid construction!")((void)0);
1215
1216 if (NoAdvance) return;
1217 if (shouldReverseIterate<KeyT>()) {
1218 RetreatPastEmptyBuckets();
1219 return;
1220 }
1221 AdvancePastEmptyBuckets();
1222 }
1223
1224 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1225 // for const iterator destinations so it doesn't end up as a user defined copy
1226 // constructor.
1227 template <bool IsConstSrc,
1228 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1229 DenseMapIterator(
1230 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1231 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1232
1233 reference operator*() const {
1234 assert(isHandleInSync() && "invalid iterator access!")((void)0);
1235 assert(Ptr != End && "dereferencing end() iterator")((void)0);
1236 if (shouldReverseIterate<KeyT>())
1237 return Ptr[-1];
1238 return *Ptr;
1239 }
1240 pointer operator->() const {
1241 assert(isHandleInSync() && "invalid iterator access!")((void)0);
1242 assert(Ptr != End && "dereferencing end() iterator")((void)0);
1243 if (shouldReverseIterate<KeyT>())
1244 return &(Ptr[-1]);
1245 return Ptr;
1246 }
1247
1248 friend bool operator==(const DenseMapIterator &LHS,
1249 const DenseMapIterator &RHS) {
1250 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!")((void)0);
1251 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!")((void)0);
1252 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&((void)0)
1253 "comparing incomparable iterators!")((void)0);
1254 return LHS.Ptr == RHS.Ptr;
22
Assuming 'LHS.Ptr' is not equal to 'RHS.Ptr'
23
Returning zero, which participates in a condition later
1255 }
1256
1257 friend bool operator!=(const DenseMapIterator &LHS,
1258 const DenseMapIterator &RHS) {
1259 return !(LHS == RHS);
21
Calling 'operator=='
24
Returning from 'operator=='
25
Returning the value 1, which participates in a condition later
1260 }
1261
1262 inline DenseMapIterator& operator++() { // Preincrement
1263 assert(isHandleInSync() && "invalid iterator access!")((void)0);
1264 assert(Ptr != End && "incrementing end() iterator")((void)0);
1265 if (shouldReverseIterate<KeyT>()) {
1266 --Ptr;
1267 RetreatPastEmptyBuckets();
1268 return *this;
1269 }
1270 ++Ptr;
1271 AdvancePastEmptyBuckets();
1272 return *this;
1273 }
1274 DenseMapIterator operator++(int) { // Postincrement
1275 assert(isHandleInSync() && "invalid iterator access!")((void)0);
1276 DenseMapIterator tmp = *this; ++*this; return tmp;
1277 }
1278
1279private:
1280 void AdvancePastEmptyBuckets() {
1281 assert(Ptr <= End)((void)0);
1282 const KeyT Empty = KeyInfoT::getEmptyKey();
1283 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1284
1285 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1286 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1287 ++Ptr;
1288 }
1289
1290 void RetreatPastEmptyBuckets() {
1291 assert(Ptr >= End)((void)0);
1292 const KeyT Empty = KeyInfoT::getEmptyKey();
1293 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1294
1295 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1296 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1297 --Ptr;
1298 }
1299};
1300
1301template <typename KeyT, typename ValueT, typename KeyInfoT>
1302inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1303 return X.getMemorySize();
1304}
1305
1306} // end namespace llvm
1307
1308#endif // LLVM_ADT_DENSEMAP_H