| File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/RegisterCoalescer.cpp |
| Warning: | line 2586, column 11 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===- RegisterCoalescer.cpp - Generic Register Coalescing Interface ------===// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | // | |||
| 9 | // This file implements the generic RegisterCoalescer interface which | |||
| 10 | // is used as the common interface used by all clients and | |||
| 11 | // implementations of register coalescing. | |||
| 12 | // | |||
| 13 | //===----------------------------------------------------------------------===// | |||
| 14 | ||||
| 15 | #include "RegisterCoalescer.h" | |||
| 16 | #include "llvm/ADT/ArrayRef.h" | |||
| 17 | #include "llvm/ADT/BitVector.h" | |||
| 18 | #include "llvm/ADT/DenseSet.h" | |||
| 19 | #include "llvm/ADT/STLExtras.h" | |||
| 20 | #include "llvm/ADT/SmallPtrSet.h" | |||
| 21 | #include "llvm/ADT/SmallVector.h" | |||
| 22 | #include "llvm/ADT/Statistic.h" | |||
| 23 | #include "llvm/Analysis/AliasAnalysis.h" | |||
| 24 | #include "llvm/CodeGen/LiveInterval.h" | |||
| 25 | #include "llvm/CodeGen/LiveIntervals.h" | |||
| 26 | #include "llvm/CodeGen/LiveRangeEdit.h" | |||
| 27 | #include "llvm/CodeGen/MachineBasicBlock.h" | |||
| 28 | #include "llvm/CodeGen/MachineFunction.h" | |||
| 29 | #include "llvm/CodeGen/MachineFunctionPass.h" | |||
| 30 | #include "llvm/CodeGen/MachineInstr.h" | |||
| 31 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
| 32 | #include "llvm/CodeGen/MachineLoopInfo.h" | |||
| 33 | #include "llvm/CodeGen/MachineOperand.h" | |||
| 34 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
| 35 | #include "llvm/CodeGen/Passes.h" | |||
| 36 | #include "llvm/CodeGen/RegisterClassInfo.h" | |||
| 37 | #include "llvm/CodeGen/SlotIndexes.h" | |||
| 38 | #include "llvm/CodeGen/TargetInstrInfo.h" | |||
| 39 | #include "llvm/CodeGen/TargetOpcodes.h" | |||
| 40 | #include "llvm/CodeGen/TargetRegisterInfo.h" | |||
| 41 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
| 42 | #include "llvm/IR/DebugLoc.h" | |||
| 43 | #include "llvm/InitializePasses.h" | |||
| 44 | #include "llvm/MC/LaneBitmask.h" | |||
| 45 | #include "llvm/MC/MCInstrDesc.h" | |||
| 46 | #include "llvm/MC/MCRegisterInfo.h" | |||
| 47 | #include "llvm/Pass.h" | |||
| 48 | #include "llvm/Support/CommandLine.h" | |||
| 49 | #include "llvm/Support/Compiler.h" | |||
| 50 | #include "llvm/Support/Debug.h" | |||
| 51 | #include "llvm/Support/ErrorHandling.h" | |||
| 52 | #include "llvm/Support/raw_ostream.h" | |||
| 53 | #include <algorithm> | |||
| 54 | #include <cassert> | |||
| 55 | #include <iterator> | |||
| 56 | #include <limits> | |||
| 57 | #include <tuple> | |||
| 58 | #include <utility> | |||
| 59 | #include <vector> | |||
| 60 | ||||
| 61 | using namespace llvm; | |||
| 62 | ||||
| 63 | #define DEBUG_TYPE"regalloc" "regalloc" | |||
| 64 | ||||
| 65 | STATISTIC(numJoins , "Number of interval joins performed")static llvm::Statistic numJoins = {"regalloc", "numJoins", "Number of interval joins performed" }; | |||
| 66 | STATISTIC(numCrossRCs , "Number of cross class joins performed")static llvm::Statistic numCrossRCs = {"regalloc", "numCrossRCs" , "Number of cross class joins performed"}; | |||
| 67 | STATISTIC(numCommutes , "Number of instruction commuting performed")static llvm::Statistic numCommutes = {"regalloc", "numCommutes" , "Number of instruction commuting performed"}; | |||
| 68 | STATISTIC(numExtends , "Number of copies extended")static llvm::Statistic numExtends = {"regalloc", "numExtends" , "Number of copies extended"}; | |||
| 69 | STATISTIC(NumReMats , "Number of instructions re-materialized")static llvm::Statistic NumReMats = {"regalloc", "NumReMats", "Number of instructions re-materialized" }; | |||
| 70 | STATISTIC(NumInflated , "Number of register classes inflated")static llvm::Statistic NumInflated = {"regalloc", "NumInflated" , "Number of register classes inflated"}; | |||
| 71 | STATISTIC(NumLaneConflicts, "Number of dead lane conflicts tested")static llvm::Statistic NumLaneConflicts = {"regalloc", "NumLaneConflicts" , "Number of dead lane conflicts tested"}; | |||
| 72 | STATISTIC(NumLaneResolves, "Number of dead lane conflicts resolved")static llvm::Statistic NumLaneResolves = {"regalloc", "NumLaneResolves" , "Number of dead lane conflicts resolved"}; | |||
| 73 | STATISTIC(NumShrinkToUses, "Number of shrinkToUses called")static llvm::Statistic NumShrinkToUses = {"regalloc", "NumShrinkToUses" , "Number of shrinkToUses called"}; | |||
| 74 | ||||
| 75 | static cl::opt<bool> EnableJoining("join-liveintervals", | |||
| 76 | cl::desc("Coalesce copies (default=true)"), | |||
| 77 | cl::init(true), cl::Hidden); | |||
| 78 | ||||
| 79 | static cl::opt<bool> UseTerminalRule("terminal-rule", | |||
| 80 | cl::desc("Apply the terminal rule"), | |||
| 81 | cl::init(false), cl::Hidden); | |||
| 82 | ||||
| 83 | /// Temporary flag to test critical edge unsplitting. | |||
| 84 | static cl::opt<bool> | |||
| 85 | EnableJoinSplits("join-splitedges", | |||
| 86 | cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden); | |||
| 87 | ||||
| 88 | /// Temporary flag to test global copy optimization. | |||
| 89 | static cl::opt<cl::boolOrDefault> | |||
| 90 | EnableGlobalCopies("join-globalcopies", | |||
| 91 | cl::desc("Coalesce copies that span blocks (default=subtarget)"), | |||
| 92 | cl::init(cl::BOU_UNSET), cl::Hidden); | |||
| 93 | ||||
| 94 | static cl::opt<bool> | |||
| 95 | VerifyCoalescing("verify-coalescing", | |||
| 96 | cl::desc("Verify machine instrs before and after register coalescing"), | |||
| 97 | cl::Hidden); | |||
| 98 | ||||
| 99 | static cl::opt<unsigned> LateRematUpdateThreshold( | |||
| 100 | "late-remat-update-threshold", cl::Hidden, | |||
| 101 | cl::desc("During rematerialization for a copy, if the def instruction has " | |||
| 102 | "many other copy uses to be rematerialized, delay the multiple " | |||
| 103 | "separate live interval update work and do them all at once after " | |||
| 104 | "all those rematerialization are done. It will save a lot of " | |||
| 105 | "repeated work. "), | |||
| 106 | cl::init(100)); | |||
| 107 | ||||
| 108 | static cl::opt<unsigned> LargeIntervalSizeThreshold( | |||
| 109 | "large-interval-size-threshold", cl::Hidden, | |||
| 110 | cl::desc("If the valnos size of an interval is larger than the threshold, " | |||
| 111 | "it is regarded as a large interval. "), | |||
| 112 | cl::init(100)); | |||
| 113 | ||||
| 114 | static cl::opt<unsigned> LargeIntervalFreqThreshold( | |||
| 115 | "large-interval-freq-threshold", cl::Hidden, | |||
| 116 | cl::desc("For a large interval, if it is coalesed with other live " | |||
| 117 | "intervals many times more than the threshold, stop its " | |||
| 118 | "coalescing to control the compile time. "), | |||
| 119 | cl::init(100)); | |||
| 120 | ||||
| 121 | namespace { | |||
| 122 | ||||
| 123 | class JoinVals; | |||
| 124 | ||||
| 125 | class RegisterCoalescer : public MachineFunctionPass, | |||
| 126 | private LiveRangeEdit::Delegate { | |||
| 127 | MachineFunction* MF = nullptr; | |||
| 128 | MachineRegisterInfo* MRI = nullptr; | |||
| 129 | const TargetRegisterInfo* TRI = nullptr; | |||
| 130 | const TargetInstrInfo* TII = nullptr; | |||
| 131 | LiveIntervals *LIS = nullptr; | |||
| 132 | const MachineLoopInfo* Loops = nullptr; | |||
| 133 | AliasAnalysis *AA = nullptr; | |||
| 134 | RegisterClassInfo RegClassInfo; | |||
| 135 | ||||
| 136 | /// Position and VReg of a PHI instruction during coalescing. | |||
| 137 | struct PHIValPos { | |||
| 138 | SlotIndex SI; ///< Slot where this PHI occurs. | |||
| 139 | Register Reg; ///< VReg the PHI occurs in. | |||
| 140 | unsigned SubReg; ///< Qualifying subregister for Reg. | |||
| 141 | }; | |||
| 142 | ||||
| 143 | /// Map from debug instruction number to PHI position during coalescing. | |||
| 144 | DenseMap<unsigned, PHIValPos> PHIValToPos; | |||
| 145 | /// Index of, for each VReg, which debug instruction numbers and | |||
| 146 | /// corresponding PHIs are sensitive to coalescing. Each VReg may have | |||
| 147 | /// multiple PHI defs, at different positions. | |||
| 148 | DenseMap<Register, SmallVector<unsigned, 2>> RegToPHIIdx; | |||
| 149 | ||||
| 150 | /// Debug variable location tracking -- for each VReg, maintain an | |||
| 151 | /// ordered-by-slot-index set of DBG_VALUEs, to help quick | |||
| 152 | /// identification of whether coalescing may change location validity. | |||
| 153 | using DbgValueLoc = std::pair<SlotIndex, MachineInstr*>; | |||
| 154 | DenseMap<Register, std::vector<DbgValueLoc>> DbgVRegToValues; | |||
| 155 | ||||
| 156 | /// VRegs may be repeatedly coalesced, and have many DBG_VALUEs attached. | |||
| 157 | /// To avoid repeatedly merging sets of DbgValueLocs, instead record | |||
| 158 | /// which vregs have been coalesced, and where to. This map is from | |||
| 159 | /// vreg => {set of vregs merged in}. | |||
| 160 | DenseMap<Register, SmallVector<Register, 4>> DbgMergedVRegNums; | |||
| 161 | ||||
| 162 | /// A LaneMask to remember on which subregister live ranges we need to call | |||
| 163 | /// shrinkToUses() later. | |||
| 164 | LaneBitmask ShrinkMask; | |||
| 165 | ||||
| 166 | /// True if the main range of the currently coalesced intervals should be | |||
| 167 | /// checked for smaller live intervals. | |||
| 168 | bool ShrinkMainRange = false; | |||
| 169 | ||||
| 170 | /// True if the coalescer should aggressively coalesce global copies | |||
| 171 | /// in favor of keeping local copies. | |||
| 172 | bool JoinGlobalCopies = false; | |||
| 173 | ||||
| 174 | /// True if the coalescer should aggressively coalesce fall-thru | |||
| 175 | /// blocks exclusively containing copies. | |||
| 176 | bool JoinSplitEdges = false; | |||
| 177 | ||||
| 178 | /// Copy instructions yet to be coalesced. | |||
| 179 | SmallVector<MachineInstr*, 8> WorkList; | |||
| 180 | SmallVector<MachineInstr*, 8> LocalWorkList; | |||
| 181 | ||||
| 182 | /// Set of instruction pointers that have been erased, and | |||
| 183 | /// that may be present in WorkList. | |||
| 184 | SmallPtrSet<MachineInstr*, 8> ErasedInstrs; | |||
| 185 | ||||
| 186 | /// Dead instructions that are about to be deleted. | |||
| 187 | SmallVector<MachineInstr*, 8> DeadDefs; | |||
| 188 | ||||
| 189 | /// Virtual registers to be considered for register class inflation. | |||
| 190 | SmallVector<Register, 8> InflateRegs; | |||
| 191 | ||||
| 192 | /// The collection of live intervals which should have been updated | |||
| 193 | /// immediately after rematerialiation but delayed until | |||
| 194 | /// lateLiveIntervalUpdate is called. | |||
| 195 | DenseSet<Register> ToBeUpdated; | |||
| 196 | ||||
| 197 | /// Record how many times the large live interval with many valnos | |||
| 198 | /// has been tried to join with other live interval. | |||
| 199 | DenseMap<Register, unsigned long> LargeLIVisitCounter; | |||
| 200 | ||||
| 201 | /// Recursively eliminate dead defs in DeadDefs. | |||
| 202 | void eliminateDeadDefs(); | |||
| 203 | ||||
| 204 | /// allUsesAvailableAt - Return true if all registers used by OrigMI at | |||
| 205 | /// OrigIdx are also available with the same value at UseIdx. | |||
| 206 | bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, | |||
| 207 | SlotIndex UseIdx); | |||
| 208 | ||||
| 209 | /// LiveRangeEdit callback for eliminateDeadDefs(). | |||
| 210 | void LRE_WillEraseInstruction(MachineInstr *MI) override; | |||
| 211 | ||||
| 212 | /// Coalesce the LocalWorkList. | |||
| 213 | void coalesceLocals(); | |||
| 214 | ||||
| 215 | /// Join compatible live intervals | |||
| 216 | void joinAllIntervals(); | |||
| 217 | ||||
| 218 | /// Coalesce copies in the specified MBB, putting | |||
| 219 | /// copies that cannot yet be coalesced into WorkList. | |||
| 220 | void copyCoalesceInMBB(MachineBasicBlock *MBB); | |||
| 221 | ||||
| 222 | /// Tries to coalesce all copies in CurrList. Returns true if any progress | |||
| 223 | /// was made. | |||
| 224 | bool copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList); | |||
| 225 | ||||
| 226 | /// If one def has many copy like uses, and those copy uses are all | |||
| 227 | /// rematerialized, the live interval update needed for those | |||
| 228 | /// rematerializations will be delayed and done all at once instead | |||
| 229 | /// of being done multiple times. This is to save compile cost because | |||
| 230 | /// live interval update is costly. | |||
| 231 | void lateLiveIntervalUpdate(); | |||
| 232 | ||||
| 233 | /// Check if the incoming value defined by a COPY at \p SLRQ in the subrange | |||
| 234 | /// has no value defined in the predecessors. If the incoming value is the | |||
| 235 | /// same as defined by the copy itself, the value is considered undefined. | |||
| 236 | bool copyValueUndefInPredecessors(LiveRange &S, | |||
| 237 | const MachineBasicBlock *MBB, | |||
| 238 | LiveQueryResult SLRQ); | |||
| 239 | ||||
| 240 | /// Set necessary undef flags on subregister uses after pruning out undef | |||
| 241 | /// lane segments from the subrange. | |||
| 242 | void setUndefOnPrunedSubRegUses(LiveInterval &LI, Register Reg, | |||
| 243 | LaneBitmask PrunedLanes); | |||
| 244 | ||||
| 245 | /// Attempt to join intervals corresponding to SrcReg/DstReg, which are the | |||
| 246 | /// src/dst of the copy instruction CopyMI. This returns true if the copy | |||
| 247 | /// was successfully coalesced away. If it is not currently possible to | |||
| 248 | /// coalesce this interval, but it may be possible if other things get | |||
| 249 | /// coalesced, then it returns true by reference in 'Again'. | |||
| 250 | bool joinCopy(MachineInstr *CopyMI, bool &Again); | |||
| 251 | ||||
| 252 | /// Attempt to join these two intervals. On failure, this | |||
| 253 | /// returns false. The output "SrcInt" will not have been modified, so we | |||
| 254 | /// can use this information below to update aliases. | |||
| 255 | bool joinIntervals(CoalescerPair &CP); | |||
| 256 | ||||
| 257 | /// Attempt joining two virtual registers. Return true on success. | |||
| 258 | bool joinVirtRegs(CoalescerPair &CP); | |||
| 259 | ||||
| 260 | /// If a live interval has many valnos and is coalesced with other | |||
| 261 | /// live intervals many times, we regard such live interval as having | |||
| 262 | /// high compile time cost. | |||
| 263 | bool isHighCostLiveInterval(LiveInterval &LI); | |||
| 264 | ||||
| 265 | /// Attempt joining with a reserved physreg. | |||
| 266 | bool joinReservedPhysReg(CoalescerPair &CP); | |||
| 267 | ||||
| 268 | /// Add the LiveRange @p ToMerge as a subregister liverange of @p LI. | |||
| 269 | /// Subranges in @p LI which only partially interfere with the desired | |||
| 270 | /// LaneMask are split as necessary. @p LaneMask are the lanes that | |||
| 271 | /// @p ToMerge will occupy in the coalescer register. @p LI has its subrange | |||
| 272 | /// lanemasks already adjusted to the coalesced register. | |||
| 273 | void mergeSubRangeInto(LiveInterval &LI, const LiveRange &ToMerge, | |||
| 274 | LaneBitmask LaneMask, CoalescerPair &CP, | |||
| 275 | unsigned DstIdx); | |||
| 276 | ||||
| 277 | /// Join the liveranges of two subregisters. Joins @p RRange into | |||
| 278 | /// @p LRange, @p RRange may be invalid afterwards. | |||
| 279 | void joinSubRegRanges(LiveRange &LRange, LiveRange &RRange, | |||
| 280 | LaneBitmask LaneMask, const CoalescerPair &CP); | |||
| 281 | ||||
| 282 | /// We found a non-trivially-coalescable copy. If the source value number is | |||
| 283 | /// defined by a copy from the destination reg see if we can merge these two | |||
| 284 | /// destination reg valno# into a single value number, eliminating a copy. | |||
| 285 | /// This returns true if an interval was modified. | |||
| 286 | bool adjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI); | |||
| 287 | ||||
| 288 | /// Return true if there are definitions of IntB | |||
| 289 | /// other than BValNo val# that can reach uses of AValno val# of IntA. | |||
| 290 | bool hasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB, | |||
| 291 | VNInfo *AValNo, VNInfo *BValNo); | |||
| 292 | ||||
| 293 | /// We found a non-trivially-coalescable copy. | |||
| 294 | /// If the source value number is defined by a commutable instruction and | |||
| 295 | /// its other operand is coalesced to the copy dest register, see if we | |||
| 296 | /// can transform the copy into a noop by commuting the definition. | |||
| 297 | /// This returns a pair of two flags: | |||
| 298 | /// - the first element is true if an interval was modified, | |||
| 299 | /// - the second element is true if the destination interval needs | |||
| 300 | /// to be shrunk after deleting the copy. | |||
| 301 | std::pair<bool,bool> removeCopyByCommutingDef(const CoalescerPair &CP, | |||
| 302 | MachineInstr *CopyMI); | |||
| 303 | ||||
| 304 | /// We found a copy which can be moved to its less frequent predecessor. | |||
| 305 | bool removePartialRedundancy(const CoalescerPair &CP, MachineInstr &CopyMI); | |||
| 306 | ||||
| 307 | /// If the source of a copy is defined by a | |||
| 308 | /// trivial computation, replace the copy by rematerialize the definition. | |||
| 309 | bool reMaterializeTrivialDef(const CoalescerPair &CP, MachineInstr *CopyMI, | |||
| 310 | bool &IsDefCopy); | |||
| 311 | ||||
| 312 | /// Return true if a copy involving a physreg should be joined. | |||
| 313 | bool canJoinPhys(const CoalescerPair &CP); | |||
| 314 | ||||
| 315 | /// Replace all defs and uses of SrcReg to DstReg and update the subregister | |||
| 316 | /// number if it is not zero. If DstReg is a physical register and the | |||
| 317 | /// existing subregister number of the def / use being updated is not zero, | |||
| 318 | /// make sure to set it to the correct physical subregister. | |||
| 319 | void updateRegDefsUses(Register SrcReg, Register DstReg, unsigned SubIdx); | |||
| 320 | ||||
| 321 | /// If the given machine operand reads only undefined lanes add an undef | |||
| 322 | /// flag. | |||
| 323 | /// This can happen when undef uses were previously concealed by a copy | |||
| 324 | /// which we coalesced. Example: | |||
| 325 | /// %0:sub0<def,read-undef> = ... | |||
| 326 | /// %1 = COPY %0 <-- Coalescing COPY reveals undef | |||
| 327 | /// = use %1:sub1 <-- hidden undef use | |||
| 328 | void addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx, | |||
| 329 | MachineOperand &MO, unsigned SubRegIdx); | |||
| 330 | ||||
| 331 | /// Handle copies of undef values. If the undef value is an incoming | |||
| 332 | /// PHI value, it will convert @p CopyMI to an IMPLICIT_DEF. | |||
| 333 | /// Returns nullptr if @p CopyMI was not in any way eliminable. Otherwise, | |||
| 334 | /// it returns @p CopyMI (which could be an IMPLICIT_DEF at this point). | |||
| 335 | MachineInstr *eliminateUndefCopy(MachineInstr *CopyMI); | |||
| 336 | ||||
| 337 | /// Check whether or not we should apply the terminal rule on the | |||
| 338 | /// destination (Dst) of \p Copy. | |||
| 339 | /// When the terminal rule applies, Copy is not profitable to | |||
| 340 | /// coalesce. | |||
| 341 | /// Dst is terminal if it has exactly one affinity (Dst, Src) and | |||
| 342 | /// at least one interference (Dst, Dst2). If Dst is terminal, the | |||
| 343 | /// terminal rule consists in checking that at least one of | |||
| 344 | /// interfering node, say Dst2, has an affinity of equal or greater | |||
| 345 | /// weight with Src. | |||
| 346 | /// In that case, Dst2 and Dst will not be able to be both coalesced | |||
| 347 | /// with Src. Since Dst2 exposes more coalescing opportunities than | |||
| 348 | /// Dst, we can drop \p Copy. | |||
| 349 | bool applyTerminalRule(const MachineInstr &Copy) const; | |||
| 350 | ||||
| 351 | /// Wrapper method for \see LiveIntervals::shrinkToUses. | |||
| 352 | /// This method does the proper fixing of the live-ranges when the afore | |||
| 353 | /// mentioned method returns true. | |||
| 354 | void shrinkToUses(LiveInterval *LI, | |||
| 355 | SmallVectorImpl<MachineInstr * > *Dead = nullptr) { | |||
| 356 | NumShrinkToUses++; | |||
| 357 | if (LIS->shrinkToUses(LI, Dead)) { | |||
| 358 | /// Check whether or not \p LI is composed by multiple connected | |||
| 359 | /// components and if that is the case, fix that. | |||
| 360 | SmallVector<LiveInterval*, 8> SplitLIs; | |||
| 361 | LIS->splitSeparateComponents(*LI, SplitLIs); | |||
| 362 | } | |||
| 363 | } | |||
| 364 | ||||
| 365 | /// Wrapper Method to do all the necessary work when an Instruction is | |||
| 366 | /// deleted. | |||
| 367 | /// Optimizations should use this to make sure that deleted instructions | |||
| 368 | /// are always accounted for. | |||
| 369 | void deleteInstr(MachineInstr* MI) { | |||
| 370 | ErasedInstrs.insert(MI); | |||
| 371 | LIS->RemoveMachineInstrFromMaps(*MI); | |||
| 372 | MI->eraseFromParent(); | |||
| 373 | } | |||
| 374 | ||||
| 375 | /// Walk over function and initialize the DbgVRegToValues map. | |||
| 376 | void buildVRegToDbgValueMap(MachineFunction &MF); | |||
| 377 | ||||
| 378 | /// Test whether, after merging, any DBG_VALUEs would refer to a | |||
| 379 | /// different value number than before merging, and whether this can | |||
| 380 | /// be resolved. If not, mark the DBG_VALUE as being undef. | |||
| 381 | void checkMergingChangesDbgValues(CoalescerPair &CP, LiveRange &LHS, | |||
| 382 | JoinVals &LHSVals, LiveRange &RHS, | |||
| 383 | JoinVals &RHSVals); | |||
| 384 | ||||
| 385 | void checkMergingChangesDbgValuesImpl(Register Reg, LiveRange &OtherRange, | |||
| 386 | LiveRange &RegRange, JoinVals &Vals2); | |||
| 387 | ||||
| 388 | public: | |||
| 389 | static char ID; ///< Class identification, replacement for typeinfo | |||
| 390 | ||||
| 391 | RegisterCoalescer() : MachineFunctionPass(ID) { | |||
| 392 | initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); | |||
| 393 | } | |||
| 394 | ||||
| 395 | void getAnalysisUsage(AnalysisUsage &AU) const override; | |||
| 396 | ||||
| 397 | void releaseMemory() override; | |||
| 398 | ||||
| 399 | /// This is the pass entry point. | |||
| 400 | bool runOnMachineFunction(MachineFunction&) override; | |||
| 401 | ||||
| 402 | /// Implement the dump method. | |||
| 403 | void print(raw_ostream &O, const Module* = nullptr) const override; | |||
| 404 | }; | |||
| 405 | ||||
| 406 | } // end anonymous namespace | |||
| 407 | ||||
| 408 | char RegisterCoalescer::ID = 0; | |||
| 409 | ||||
| 410 | char &llvm::RegisterCoalescerID = RegisterCoalescer::ID; | |||
| 411 | ||||
| 412 | INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing",static void *initializeRegisterCoalescerPassOnce(PassRegistry &Registry) { | |||
| 413 | "Simple Register Coalescing", false, false)static void *initializeRegisterCoalescerPassOnce(PassRegistry &Registry) { | |||
| 414 | INITIALIZE_PASS_DEPENDENCY(LiveIntervals)initializeLiveIntervalsPass(Registry); | |||
| 415 | INITIALIZE_PASS_DEPENDENCY(SlotIndexes)initializeSlotIndexesPass(Registry); | |||
| 416 | INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)initializeMachineLoopInfoPass(Registry); | |||
| 417 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | |||
| 418 | INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing",PassInfo *PI = new PassInfo( "Simple Register Coalescing", "simple-register-coalescing" , &RegisterCoalescer::ID, PassInfo::NormalCtor_t(callDefaultCtor <RegisterCoalescer>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeRegisterCoalescerPassFlag ; void llvm::initializeRegisterCoalescerPass(PassRegistry & Registry) { llvm::call_once(InitializeRegisterCoalescerPassFlag , initializeRegisterCoalescerPassOnce, std::ref(Registry)); } | |||
| 419 | "Simple Register Coalescing", false, false)PassInfo *PI = new PassInfo( "Simple Register Coalescing", "simple-register-coalescing" , &RegisterCoalescer::ID, PassInfo::NormalCtor_t(callDefaultCtor <RegisterCoalescer>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeRegisterCoalescerPassFlag ; void llvm::initializeRegisterCoalescerPass(PassRegistry & Registry) { llvm::call_once(InitializeRegisterCoalescerPassFlag , initializeRegisterCoalescerPassOnce, std::ref(Registry)); } | |||
| 420 | ||||
| 421 | LLVM_NODISCARD[[clang::warn_unused_result]] static bool isMoveInstr(const TargetRegisterInfo &tri, | |||
| 422 | const MachineInstr *MI, Register &Src, | |||
| 423 | Register &Dst, unsigned &SrcSub, | |||
| 424 | unsigned &DstSub) { | |||
| 425 | if (MI->isCopy()) { | |||
| 426 | Dst = MI->getOperand(0).getReg(); | |||
| 427 | DstSub = MI->getOperand(0).getSubReg(); | |||
| 428 | Src = MI->getOperand(1).getReg(); | |||
| 429 | SrcSub = MI->getOperand(1).getSubReg(); | |||
| 430 | } else if (MI->isSubregToReg()) { | |||
| 431 | Dst = MI->getOperand(0).getReg(); | |||
| 432 | DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(), | |||
| 433 | MI->getOperand(3).getImm()); | |||
| 434 | Src = MI->getOperand(2).getReg(); | |||
| 435 | SrcSub = MI->getOperand(2).getSubReg(); | |||
| 436 | } else | |||
| 437 | return false; | |||
| 438 | return true; | |||
| 439 | } | |||
| 440 | ||||
| 441 | /// Return true if this block should be vacated by the coalescer to eliminate | |||
| 442 | /// branches. The important cases to handle in the coalescer are critical edges | |||
| 443 | /// split during phi elimination which contain only copies. Simple blocks that | |||
| 444 | /// contain non-branches should also be vacated, but this can be handled by an | |||
| 445 | /// earlier pass similar to early if-conversion. | |||
| 446 | static bool isSplitEdge(const MachineBasicBlock *MBB) { | |||
| 447 | if (MBB->pred_size() != 1 || MBB->succ_size() != 1) | |||
| 448 | return false; | |||
| 449 | ||||
| 450 | for (const auto &MI : *MBB) { | |||
| 451 | if (!MI.isCopyLike() && !MI.isUnconditionalBranch()) | |||
| 452 | return false; | |||
| 453 | } | |||
| 454 | return true; | |||
| 455 | } | |||
| 456 | ||||
| 457 | bool CoalescerPair::setRegisters(const MachineInstr *MI) { | |||
| 458 | SrcReg = DstReg = Register(); | |||
| 459 | SrcIdx = DstIdx = 0; | |||
| 460 | NewRC = nullptr; | |||
| 461 | Flipped = CrossClass = false; | |||
| 462 | ||||
| 463 | Register Src, Dst; | |||
| 464 | unsigned SrcSub = 0, DstSub = 0; | |||
| 465 | if (!isMoveInstr(TRI, MI, Src, Dst, SrcSub, DstSub)) | |||
| 466 | return false; | |||
| 467 | Partial = SrcSub || DstSub; | |||
| 468 | ||||
| 469 | // If one register is a physreg, it must be Dst. | |||
| 470 | if (Register::isPhysicalRegister(Src)) { | |||
| 471 | if (Register::isPhysicalRegister(Dst)) | |||
| 472 | return false; | |||
| 473 | std::swap(Src, Dst); | |||
| 474 | std::swap(SrcSub, DstSub); | |||
| 475 | Flipped = true; | |||
| 476 | } | |||
| 477 | ||||
| 478 | const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo(); | |||
| 479 | ||||
| 480 | if (Register::isPhysicalRegister(Dst)) { | |||
| 481 | // Eliminate DstSub on a physreg. | |||
| 482 | if (DstSub) { | |||
| 483 | Dst = TRI.getSubReg(Dst, DstSub); | |||
| 484 | if (!Dst) return false; | |||
| 485 | DstSub = 0; | |||
| 486 | } | |||
| 487 | ||||
| 488 | // Eliminate SrcSub by picking a corresponding Dst superregister. | |||
| 489 | if (SrcSub) { | |||
| 490 | Dst = TRI.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src)); | |||
| 491 | if (!Dst) return false; | |||
| 492 | } else if (!MRI.getRegClass(Src)->contains(Dst)) { | |||
| 493 | return false; | |||
| 494 | } | |||
| 495 | } else { | |||
| 496 | // Both registers are virtual. | |||
| 497 | const TargetRegisterClass *SrcRC = MRI.getRegClass(Src); | |||
| 498 | const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); | |||
| 499 | ||||
| 500 | // Both registers have subreg indices. | |||
| 501 | if (SrcSub && DstSub) { | |||
| 502 | // Copies between different sub-registers are never coalescable. | |||
| 503 | if (Src == Dst && SrcSub != DstSub) | |||
| 504 | return false; | |||
| 505 | ||||
| 506 | NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub, | |||
| 507 | SrcIdx, DstIdx); | |||
| 508 | if (!NewRC) | |||
| 509 | return false; | |||
| 510 | } else if (DstSub) { | |||
| 511 | // SrcReg will be merged with a sub-register of DstReg. | |||
| 512 | SrcIdx = DstSub; | |||
| 513 | NewRC = TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSub); | |||
| 514 | } else if (SrcSub) { | |||
| 515 | // DstReg will be merged with a sub-register of SrcReg. | |||
| 516 | DstIdx = SrcSub; | |||
| 517 | NewRC = TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSub); | |||
| 518 | } else { | |||
| 519 | // This is a straight copy without sub-registers. | |||
| 520 | NewRC = TRI.getCommonSubClass(DstRC, SrcRC); | |||
| 521 | } | |||
| 522 | ||||
| 523 | // The combined constraint may be impossible to satisfy. | |||
| 524 | if (!NewRC) | |||
| 525 | return false; | |||
| 526 | ||||
| 527 | // Prefer SrcReg to be a sub-register of DstReg. | |||
| 528 | // FIXME: Coalescer should support subregs symmetrically. | |||
| 529 | if (DstIdx && !SrcIdx) { | |||
| 530 | std::swap(Src, Dst); | |||
| 531 | std::swap(SrcIdx, DstIdx); | |||
| 532 | Flipped = !Flipped; | |||
| 533 | } | |||
| 534 | ||||
| 535 | CrossClass = NewRC != DstRC || NewRC != SrcRC; | |||
| 536 | } | |||
| 537 | // Check our invariants | |||
| 538 | assert(Register::isVirtualRegister(Src) && "Src must be virtual")((void)0); | |||
| 539 | assert(!(Register::isPhysicalRegister(Dst) && DstSub) &&((void)0) | |||
| 540 | "Cannot have a physical SubIdx")((void)0); | |||
| 541 | SrcReg = Src; | |||
| 542 | DstReg = Dst; | |||
| 543 | return true; | |||
| 544 | } | |||
| 545 | ||||
| 546 | bool CoalescerPair::flip() { | |||
| 547 | if (Register::isPhysicalRegister(DstReg)) | |||
| 548 | return false; | |||
| 549 | std::swap(SrcReg, DstReg); | |||
| 550 | std::swap(SrcIdx, DstIdx); | |||
| 551 | Flipped = !Flipped; | |||
| 552 | return true; | |||
| 553 | } | |||
| 554 | ||||
| 555 | bool CoalescerPair::isCoalescable(const MachineInstr *MI) const { | |||
| 556 | if (!MI) | |||
| 557 | return false; | |||
| 558 | Register Src, Dst; | |||
| 559 | unsigned SrcSub = 0, DstSub = 0; | |||
| 560 | if (!isMoveInstr(TRI, MI, Src, Dst, SrcSub, DstSub)) | |||
| 561 | return false; | |||
| 562 | ||||
| 563 | // Find the virtual register that is SrcReg. | |||
| 564 | if (Dst == SrcReg) { | |||
| 565 | std::swap(Src, Dst); | |||
| 566 | std::swap(SrcSub, DstSub); | |||
| 567 | } else if (Src != SrcReg) { | |||
| 568 | return false; | |||
| 569 | } | |||
| 570 | ||||
| 571 | // Now check that Dst matches DstReg. | |||
| 572 | if (DstReg.isPhysical()) { | |||
| 573 | if (!Dst.isPhysical()) | |||
| 574 | return false; | |||
| 575 | assert(!DstIdx && !SrcIdx && "Inconsistent CoalescerPair state.")((void)0); | |||
| 576 | // DstSub could be set for a physreg from INSERT_SUBREG. | |||
| 577 | if (DstSub) | |||
| 578 | Dst = TRI.getSubReg(Dst, DstSub); | |||
| 579 | // Full copy of Src. | |||
| 580 | if (!SrcSub) | |||
| 581 | return DstReg == Dst; | |||
| 582 | // This is a partial register copy. Check that the parts match. | |||
| 583 | return Register(TRI.getSubReg(DstReg, SrcSub)) == Dst; | |||
| 584 | } else { | |||
| 585 | // DstReg is virtual. | |||
| 586 | if (DstReg != Dst) | |||
| 587 | return false; | |||
| 588 | // Registers match, do the subregisters line up? | |||
| 589 | return TRI.composeSubRegIndices(SrcIdx, SrcSub) == | |||
| 590 | TRI.composeSubRegIndices(DstIdx, DstSub); | |||
| 591 | } | |||
| 592 | } | |||
| 593 | ||||
| 594 | void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const { | |||
| 595 | AU.setPreservesCFG(); | |||
| 596 | AU.addRequired<AAResultsWrapperPass>(); | |||
| 597 | AU.addRequired<LiveIntervals>(); | |||
| 598 | AU.addPreserved<LiveIntervals>(); | |||
| 599 | AU.addPreserved<SlotIndexes>(); | |||
| 600 | AU.addRequired<MachineLoopInfo>(); | |||
| 601 | AU.addPreserved<MachineLoopInfo>(); | |||
| 602 | AU.addPreservedID(MachineDominatorsID); | |||
| 603 | MachineFunctionPass::getAnalysisUsage(AU); | |||
| 604 | } | |||
| 605 | ||||
| 606 | void RegisterCoalescer::eliminateDeadDefs() { | |||
| 607 | SmallVector<Register, 8> NewRegs; | |||
| 608 | LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, | |||
| 609 | nullptr, this).eliminateDeadDefs(DeadDefs); | |||
| 610 | } | |||
| 611 | ||||
| 612 | bool RegisterCoalescer::allUsesAvailableAt(const MachineInstr *OrigMI, | |||
| 613 | SlotIndex OrigIdx, | |||
| 614 | SlotIndex UseIdx) { | |||
| 615 | SmallVector<Register, 8> NewRegs; | |||
| 616 | return LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, nullptr, this) | |||
| 617 | .allUsesAvailableAt(OrigMI, OrigIdx, UseIdx); | |||
| 618 | } | |||
| 619 | ||||
| 620 | void RegisterCoalescer::LRE_WillEraseInstruction(MachineInstr *MI) { | |||
| 621 | // MI may be in WorkList. Make sure we don't visit it. | |||
| 622 | ErasedInstrs.insert(MI); | |||
| 623 | } | |||
| 624 | ||||
| 625 | bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP, | |||
| 626 | MachineInstr *CopyMI) { | |||
| 627 | assert(!CP.isPartial() && "This doesn't work for partial copies.")((void)0); | |||
| 628 | assert(!CP.isPhys() && "This doesn't work for physreg copies.")((void)0); | |||
| 629 | ||||
| 630 | LiveInterval &IntA = | |||
| 631 | LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg()); | |||
| 632 | LiveInterval &IntB = | |||
| 633 | LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg()); | |||
| 634 | SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI).getRegSlot(); | |||
| 635 | ||||
| 636 | // We have a non-trivially-coalescable copy with IntA being the source and | |||
| 637 | // IntB being the dest, thus this defines a value number in IntB. If the | |||
| 638 | // source value number (in IntA) is defined by a copy from B, see if we can | |||
| 639 | // merge these two pieces of B into a single value number, eliminating a copy. | |||
| 640 | // For example: | |||
| 641 | // | |||
| 642 | // A3 = B0 | |||
| 643 | // ... | |||
| 644 | // B1 = A3 <- this copy | |||
| 645 | // | |||
| 646 | // In this case, B0 can be extended to where the B1 copy lives, allowing the | |||
| 647 | // B1 value number to be replaced with B0 (which simplifies the B | |||
| 648 | // liveinterval). | |||
| 649 | ||||
| 650 | // BValNo is a value number in B that is defined by a copy from A. 'B1' in | |||
| 651 | // the example above. | |||
| 652 | LiveInterval::iterator BS = IntB.FindSegmentContaining(CopyIdx); | |||
| 653 | if (BS == IntB.end()) return false; | |||
| 654 | VNInfo *BValNo = BS->valno; | |||
| 655 | ||||
| 656 | // Get the location that B is defined at. Two options: either this value has | |||
| 657 | // an unknown definition point or it is defined at CopyIdx. If unknown, we | |||
| 658 | // can't process it. | |||
| 659 | if (BValNo->def != CopyIdx) return false; | |||
| 660 | ||||
| 661 | // AValNo is the value number in A that defines the copy, A3 in the example. | |||
| 662 | SlotIndex CopyUseIdx = CopyIdx.getRegSlot(true); | |||
| 663 | LiveInterval::iterator AS = IntA.FindSegmentContaining(CopyUseIdx); | |||
| 664 | // The live segment might not exist after fun with physreg coalescing. | |||
| 665 | if (AS == IntA.end()) return false; | |||
| 666 | VNInfo *AValNo = AS->valno; | |||
| 667 | ||||
| 668 | // If AValNo is defined as a copy from IntB, we can potentially process this. | |||
| 669 | // Get the instruction that defines this value number. | |||
| 670 | MachineInstr *ACopyMI = LIS->getInstructionFromIndex(AValNo->def); | |||
| 671 | // Don't allow any partial copies, even if isCoalescable() allows them. | |||
| 672 | if (!CP.isCoalescable(ACopyMI) || !ACopyMI->isFullCopy()) | |||
| 673 | return false; | |||
| 674 | ||||
| 675 | // Get the Segment in IntB that this value number starts with. | |||
| 676 | LiveInterval::iterator ValS = | |||
| 677 | IntB.FindSegmentContaining(AValNo->def.getPrevSlot()); | |||
| 678 | if (ValS == IntB.end()) | |||
| 679 | return false; | |||
| 680 | ||||
| 681 | // Make sure that the end of the live segment is inside the same block as | |||
| 682 | // CopyMI. | |||
| 683 | MachineInstr *ValSEndInst = | |||
| 684 | LIS->getInstructionFromIndex(ValS->end.getPrevSlot()); | |||
| 685 | if (!ValSEndInst || ValSEndInst->getParent() != CopyMI->getParent()) | |||
| 686 | return false; | |||
| 687 | ||||
| 688 | // Okay, we now know that ValS ends in the same block that the CopyMI | |||
| 689 | // live-range starts. If there are no intervening live segments between them | |||
| 690 | // in IntB, we can merge them. | |||
| 691 | if (ValS+1 != BS) return false; | |||
| 692 | ||||
| 693 | LLVM_DEBUG(dbgs() << "Extending: " << printReg(IntB.reg(), TRI))do { } while (false); | |||
| 694 | ||||
| 695 | SlotIndex FillerStart = ValS->end, FillerEnd = BS->start; | |||
| 696 | // We are about to delete CopyMI, so need to remove it as the 'instruction | |||
| 697 | // that defines this value #'. Update the valnum with the new defining | |||
| 698 | // instruction #. | |||
| 699 | BValNo->def = FillerStart; | |||
| 700 | ||||
| 701 | // Okay, we can merge them. We need to insert a new liverange: | |||
| 702 | // [ValS.end, BS.begin) of either value number, then we merge the | |||
| 703 | // two value numbers. | |||
| 704 | IntB.addSegment(LiveInterval::Segment(FillerStart, FillerEnd, BValNo)); | |||
| 705 | ||||
| 706 | // Okay, merge "B1" into the same value number as "B0". | |||
| 707 | if (BValNo != ValS->valno) | |||
| 708 | IntB.MergeValueNumberInto(BValNo, ValS->valno); | |||
| 709 | ||||
| 710 | // Do the same for the subregister segments. | |||
| 711 | for (LiveInterval::SubRange &S : IntB.subranges()) { | |||
| 712 | // Check for SubRange Segments of the form [1234r,1234d:0) which can be | |||
| 713 | // removed to prevent creating bogus SubRange Segments. | |||
| 714 | LiveInterval::iterator SS = S.FindSegmentContaining(CopyIdx); | |||
| 715 | if (SS != S.end() && SlotIndex::isSameInstr(SS->start, SS->end)) { | |||
| 716 | S.removeSegment(*SS, true); | |||
| 717 | continue; | |||
| 718 | } | |||
| 719 | // The subrange may have ended before FillerStart. If so, extend it. | |||
| 720 | if (!S.getVNInfoAt(FillerStart)) { | |||
| 721 | SlotIndex BBStart = | |||
| 722 | LIS->getMBBStartIdx(LIS->getMBBFromIndex(FillerStart)); | |||
| 723 | S.extendInBlock(BBStart, FillerStart); | |||
| 724 | } | |||
| 725 | VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx); | |||
| 726 | S.addSegment(LiveInterval::Segment(FillerStart, FillerEnd, SubBValNo)); | |||
| 727 | VNInfo *SubValSNo = S.getVNInfoAt(AValNo->def.getPrevSlot()); | |||
| 728 | if (SubBValNo != SubValSNo) | |||
| 729 | S.MergeValueNumberInto(SubBValNo, SubValSNo); | |||
| 730 | } | |||
| 731 | ||||
| 732 | LLVM_DEBUG(dbgs() << " result = " << IntB << '\n')do { } while (false); | |||
| 733 | ||||
| 734 | // If the source instruction was killing the source register before the | |||
| 735 | // merge, unset the isKill marker given the live range has been extended. | |||
| 736 | int UIdx = ValSEndInst->findRegisterUseOperandIdx(IntB.reg(), true); | |||
| 737 | if (UIdx != -1) { | |||
| 738 | ValSEndInst->getOperand(UIdx).setIsKill(false); | |||
| 739 | } | |||
| 740 | ||||
| 741 | // Rewrite the copy. | |||
| 742 | CopyMI->substituteRegister(IntA.reg(), IntB.reg(), 0, *TRI); | |||
| 743 | // If the copy instruction was killing the destination register or any | |||
| 744 | // subrange before the merge trim the live range. | |||
| 745 | bool RecomputeLiveRange = AS->end == CopyIdx; | |||
| 746 | if (!RecomputeLiveRange) { | |||
| 747 | for (LiveInterval::SubRange &S : IntA.subranges()) { | |||
| 748 | LiveInterval::iterator SS = S.FindSegmentContaining(CopyUseIdx); | |||
| 749 | if (SS != S.end() && SS->end == CopyIdx) { | |||
| 750 | RecomputeLiveRange = true; | |||
| 751 | break; | |||
| 752 | } | |||
| 753 | } | |||
| 754 | } | |||
| 755 | if (RecomputeLiveRange) | |||
| 756 | shrinkToUses(&IntA); | |||
| 757 | ||||
| 758 | ++numExtends; | |||
| 759 | return true; | |||
| 760 | } | |||
| 761 | ||||
| 762 | bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA, | |||
| 763 | LiveInterval &IntB, | |||
| 764 | VNInfo *AValNo, | |||
| 765 | VNInfo *BValNo) { | |||
| 766 | // If AValNo has PHI kills, conservatively assume that IntB defs can reach | |||
| 767 | // the PHI values. | |||
| 768 | if (LIS->hasPHIKill(IntA, AValNo)) | |||
| 769 | return true; | |||
| 770 | ||||
| 771 | for (LiveRange::Segment &ASeg : IntA.segments) { | |||
| 772 | if (ASeg.valno != AValNo) continue; | |||
| 773 | LiveInterval::iterator BI = llvm::upper_bound(IntB, ASeg.start); | |||
| 774 | if (BI != IntB.begin()) | |||
| 775 | --BI; | |||
| 776 | for (; BI != IntB.end() && ASeg.end >= BI->start; ++BI) { | |||
| 777 | if (BI->valno == BValNo) | |||
| 778 | continue; | |||
| 779 | if (BI->start <= ASeg.start && BI->end > ASeg.start) | |||
| 780 | return true; | |||
| 781 | if (BI->start > ASeg.start && BI->start < ASeg.end) | |||
| 782 | return true; | |||
| 783 | } | |||
| 784 | } | |||
| 785 | return false; | |||
| 786 | } | |||
| 787 | ||||
| 788 | /// Copy segments with value number @p SrcValNo from liverange @p Src to live | |||
| 789 | /// range @Dst and use value number @p DstValNo there. | |||
| 790 | static std::pair<bool,bool> | |||
| 791 | addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo, const LiveRange &Src, | |||
| 792 | const VNInfo *SrcValNo) { | |||
| 793 | bool Changed = false; | |||
| 794 | bool MergedWithDead = false; | |||
| 795 | for (const LiveRange::Segment &S : Src.segments) { | |||
| 796 | if (S.valno != SrcValNo) | |||
| 797 | continue; | |||
| 798 | // This is adding a segment from Src that ends in a copy that is about | |||
| 799 | // to be removed. This segment is going to be merged with a pre-existing | |||
| 800 | // segment in Dst. This works, except in cases when the corresponding | |||
| 801 | // segment in Dst is dead. For example: adding [192r,208r:1) from Src | |||
| 802 | // to [208r,208d:1) in Dst would create [192r,208d:1) in Dst. | |||
| 803 | // Recognized such cases, so that the segments can be shrunk. | |||
| 804 | LiveRange::Segment Added = LiveRange::Segment(S.start, S.end, DstValNo); | |||
| 805 | LiveRange::Segment &Merged = *Dst.addSegment(Added); | |||
| 806 | if (Merged.end.isDead()) | |||
| 807 | MergedWithDead = true; | |||
| 808 | Changed = true; | |||
| 809 | } | |||
| 810 | return std::make_pair(Changed, MergedWithDead); | |||
| 811 | } | |||
| 812 | ||||
| 813 | std::pair<bool,bool> | |||
| 814 | RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, | |||
| 815 | MachineInstr *CopyMI) { | |||
| 816 | assert(!CP.isPhys())((void)0); | |||
| 817 | ||||
| 818 | LiveInterval &IntA = | |||
| 819 | LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg()); | |||
| 820 | LiveInterval &IntB = | |||
| 821 | LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg()); | |||
| 822 | ||||
| 823 | // We found a non-trivially-coalescable copy with IntA being the source and | |||
| 824 | // IntB being the dest, thus this defines a value number in IntB. If the | |||
| 825 | // source value number (in IntA) is defined by a commutable instruction and | |||
| 826 | // its other operand is coalesced to the copy dest register, see if we can | |||
| 827 | // transform the copy into a noop by commuting the definition. For example, | |||
| 828 | // | |||
| 829 | // A3 = op A2 killed B0 | |||
| 830 | // ... | |||
| 831 | // B1 = A3 <- this copy | |||
| 832 | // ... | |||
| 833 | // = op A3 <- more uses | |||
| 834 | // | |||
| 835 | // ==> | |||
| 836 | // | |||
| 837 | // B2 = op B0 killed A2 | |||
| 838 | // ... | |||
| 839 | // B1 = B2 <- now an identity copy | |||
| 840 | // ... | |||
| 841 | // = op B2 <- more uses | |||
| 842 | ||||
| 843 | // BValNo is a value number in B that is defined by a copy from A. 'B1' in | |||
| 844 | // the example above. | |||
| 845 | SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI).getRegSlot(); | |||
| 846 | VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx); | |||
| 847 | assert(BValNo != nullptr && BValNo->def == CopyIdx)((void)0); | |||
| 848 | ||||
| 849 | // AValNo is the value number in A that defines the copy, A3 in the example. | |||
| 850 | VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getRegSlot(true)); | |||
| 851 | assert(AValNo && !AValNo->isUnused() && "COPY source not live")((void)0); | |||
| 852 | if (AValNo->isPHIDef()) | |||
| 853 | return { false, false }; | |||
| 854 | MachineInstr *DefMI = LIS->getInstructionFromIndex(AValNo->def); | |||
| 855 | if (!DefMI) | |||
| 856 | return { false, false }; | |||
| 857 | if (!DefMI->isCommutable()) | |||
| 858 | return { false, false }; | |||
| 859 | // If DefMI is a two-address instruction then commuting it will change the | |||
| 860 | // destination register. | |||
| 861 | int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg()); | |||
| 862 | assert(DefIdx != -1)((void)0); | |||
| 863 | unsigned UseOpIdx; | |||
| 864 | if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx)) | |||
| 865 | return { false, false }; | |||
| 866 | ||||
| 867 | // FIXME: The code below tries to commute 'UseOpIdx' operand with some other | |||
| 868 | // commutable operand which is expressed by 'CommuteAnyOperandIndex'value | |||
| 869 | // passed to the method. That _other_ operand is chosen by | |||
| 870 | // the findCommutedOpIndices() method. | |||
| 871 | // | |||
| 872 | // That is obviously an area for improvement in case of instructions having | |||
| 873 | // more than 2 operands. For example, if some instruction has 3 commutable | |||
| 874 | // operands then all possible variants (i.e. op#1<->op#2, op#1<->op#3, | |||
| 875 | // op#2<->op#3) of commute transformation should be considered/tried here. | |||
| 876 | unsigned NewDstIdx = TargetInstrInfo::CommuteAnyOperandIndex; | |||
| 877 | if (!TII->findCommutedOpIndices(*DefMI, UseOpIdx, NewDstIdx)) | |||
| 878 | return { false, false }; | |||
| 879 | ||||
| 880 | MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); | |||
| 881 | Register NewReg = NewDstMO.getReg(); | |||
| 882 | if (NewReg != IntB.reg() || !IntB.Query(AValNo->def).isKill()) | |||
| 883 | return { false, false }; | |||
| 884 | ||||
| 885 | // Make sure there are no other definitions of IntB that would reach the | |||
| 886 | // uses which the new definition can reach. | |||
| 887 | if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo)) | |||
| 888 | return { false, false }; | |||
| 889 | ||||
| 890 | // If some of the uses of IntA.reg is already coalesced away, return false. | |||
| 891 | // It's not possible to determine whether it's safe to perform the coalescing. | |||
| 892 | for (MachineOperand &MO : MRI->use_nodbg_operands(IntA.reg())) { | |||
| 893 | MachineInstr *UseMI = MO.getParent(); | |||
| 894 | unsigned OpNo = &MO - &UseMI->getOperand(0); | |||
| 895 | SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI); | |||
| 896 | LiveInterval::iterator US = IntA.FindSegmentContaining(UseIdx); | |||
| 897 | if (US == IntA.end() || US->valno != AValNo) | |||
| 898 | continue; | |||
| 899 | // If this use is tied to a def, we can't rewrite the register. | |||
| 900 | if (UseMI->isRegTiedToDefOperand(OpNo)) | |||
| 901 | return { false, false }; | |||
| 902 | } | |||
| 903 | ||||
| 904 | LLVM_DEBUG(dbgs() << "\tremoveCopyByCommutingDef: " << AValNo->def << '\t'do { } while (false) | |||
| 905 | << *DefMI)do { } while (false); | |||
| 906 | ||||
| 907 | // At this point we have decided that it is legal to do this | |||
| 908 | // transformation. Start by commuting the instruction. | |||
| 909 | MachineBasicBlock *MBB = DefMI->getParent(); | |||
| 910 | MachineInstr *NewMI = | |||
| 911 | TII->commuteInstruction(*DefMI, false, UseOpIdx, NewDstIdx); | |||
| 912 | if (!NewMI) | |||
| 913 | return { false, false }; | |||
| 914 | if (Register::isVirtualRegister(IntA.reg()) && | |||
| 915 | Register::isVirtualRegister(IntB.reg()) && | |||
| 916 | !MRI->constrainRegClass(IntB.reg(), MRI->getRegClass(IntA.reg()))) | |||
| 917 | return { false, false }; | |||
| 918 | if (NewMI != DefMI) { | |||
| 919 | LIS->ReplaceMachineInstrInMaps(*DefMI, *NewMI); | |||
| 920 | MachineBasicBlock::iterator Pos = DefMI; | |||
| 921 | MBB->insert(Pos, NewMI); | |||
| 922 | MBB->erase(DefMI); | |||
| 923 | } | |||
| 924 | ||||
| 925 | // If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g. | |||
| 926 | // A = or A, B | |||
| 927 | // ... | |||
| 928 | // B = A | |||
| 929 | // ... | |||
| 930 | // C = killed A | |||
| 931 | // ... | |||
| 932 | // = B | |||
| 933 | ||||
| 934 | // Update uses of IntA of the specific Val# with IntB. | |||
| 935 | for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(IntA.reg()), | |||
| 936 | UE = MRI->use_end(); | |||
| 937 | UI != UE; | |||
| 938 | /* ++UI is below because of possible MI removal */) { | |||
| 939 | MachineOperand &UseMO = *UI; | |||
| 940 | ++UI; | |||
| 941 | if (UseMO.isUndef()) | |||
| 942 | continue; | |||
| 943 | MachineInstr *UseMI = UseMO.getParent(); | |||
| 944 | if (UseMI->isDebugInstr()) { | |||
| 945 | // FIXME These don't have an instruction index. Not clear we have enough | |||
| 946 | // info to decide whether to do this replacement or not. For now do it. | |||
| 947 | UseMO.setReg(NewReg); | |||
| 948 | continue; | |||
| 949 | } | |||
| 950 | SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI).getRegSlot(true); | |||
| 951 | LiveInterval::iterator US = IntA.FindSegmentContaining(UseIdx); | |||
| 952 | assert(US != IntA.end() && "Use must be live")((void)0); | |||
| 953 | if (US->valno != AValNo) | |||
| 954 | continue; | |||
| 955 | // Kill flags are no longer accurate. They are recomputed after RA. | |||
| 956 | UseMO.setIsKill(false); | |||
| 957 | if (Register::isPhysicalRegister(NewReg)) | |||
| 958 | UseMO.substPhysReg(NewReg, *TRI); | |||
| 959 | else | |||
| 960 | UseMO.setReg(NewReg); | |||
| 961 | if (UseMI == CopyMI) | |||
| 962 | continue; | |||
| 963 | if (!UseMI->isCopy()) | |||
| 964 | continue; | |||
| 965 | if (UseMI->getOperand(0).getReg() != IntB.reg() || | |||
| 966 | UseMI->getOperand(0).getSubReg()) | |||
| 967 | continue; | |||
| 968 | ||||
| 969 | // This copy will become a noop. If it's defining a new val#, merge it into | |||
| 970 | // BValNo. | |||
| 971 | SlotIndex DefIdx = UseIdx.getRegSlot(); | |||
| 972 | VNInfo *DVNI = IntB.getVNInfoAt(DefIdx); | |||
| 973 | if (!DVNI) | |||
| 974 | continue; | |||
| 975 | LLVM_DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI)do { } while (false); | |||
| 976 | assert(DVNI->def == DefIdx)((void)0); | |||
| 977 | BValNo = IntB.MergeValueNumberInto(DVNI, BValNo); | |||
| 978 | for (LiveInterval::SubRange &S : IntB.subranges()) { | |||
| 979 | VNInfo *SubDVNI = S.getVNInfoAt(DefIdx); | |||
| 980 | if (!SubDVNI) | |||
| 981 | continue; | |||
| 982 | VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx); | |||
| 983 | assert(SubBValNo->def == CopyIdx)((void)0); | |||
| 984 | S.MergeValueNumberInto(SubDVNI, SubBValNo); | |||
| 985 | } | |||
| 986 | ||||
| 987 | deleteInstr(UseMI); | |||
| 988 | } | |||
| 989 | ||||
| 990 | // Extend BValNo by merging in IntA live segments of AValNo. Val# definition | |||
| 991 | // is updated. | |||
| 992 | bool ShrinkB = false; | |||
| 993 | BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator(); | |||
| 994 | if (IntA.hasSubRanges() || IntB.hasSubRanges()) { | |||
| 995 | if (!IntA.hasSubRanges()) { | |||
| 996 | LaneBitmask Mask = MRI->getMaxLaneMaskForVReg(IntA.reg()); | |||
| 997 | IntA.createSubRangeFrom(Allocator, Mask, IntA); | |||
| 998 | } else if (!IntB.hasSubRanges()) { | |||
| 999 | LaneBitmask Mask = MRI->getMaxLaneMaskForVReg(IntB.reg()); | |||
| 1000 | IntB.createSubRangeFrom(Allocator, Mask, IntB); | |||
| 1001 | } | |||
| 1002 | SlotIndex AIdx = CopyIdx.getRegSlot(true); | |||
| 1003 | LaneBitmask MaskA; | |||
| 1004 | const SlotIndexes &Indexes = *LIS->getSlotIndexes(); | |||
| 1005 | for (LiveInterval::SubRange &SA : IntA.subranges()) { | |||
| 1006 | VNInfo *ASubValNo = SA.getVNInfoAt(AIdx); | |||
| 1007 | // Even if we are dealing with a full copy, some lanes can | |||
| 1008 | // still be undefined. | |||
| 1009 | // E.g., | |||
| 1010 | // undef A.subLow = ... | |||
| 1011 | // B = COPY A <== A.subHigh is undefined here and does | |||
| 1012 | // not have a value number. | |||
| 1013 | if (!ASubValNo) | |||
| 1014 | continue; | |||
| 1015 | MaskA |= SA.LaneMask; | |||
| 1016 | ||||
| 1017 | IntB.refineSubRanges( | |||
| 1018 | Allocator, SA.LaneMask, | |||
| 1019 | [&Allocator, &SA, CopyIdx, ASubValNo, | |||
| 1020 | &ShrinkB](LiveInterval::SubRange &SR) { | |||
| 1021 | VNInfo *BSubValNo = SR.empty() ? SR.getNextValue(CopyIdx, Allocator) | |||
| 1022 | : SR.getVNInfoAt(CopyIdx); | |||
| 1023 | assert(BSubValNo != nullptr)((void)0); | |||
| 1024 | auto P = addSegmentsWithValNo(SR, BSubValNo, SA, ASubValNo); | |||
| 1025 | ShrinkB |= P.second; | |||
| 1026 | if (P.first) | |||
| 1027 | BSubValNo->def = ASubValNo->def; | |||
| 1028 | }, | |||
| 1029 | Indexes, *TRI); | |||
| 1030 | } | |||
| 1031 | // Go over all subranges of IntB that have not been covered by IntA, | |||
| 1032 | // and delete the segments starting at CopyIdx. This can happen if | |||
| 1033 | // IntA has undef lanes that are defined in IntB. | |||
| 1034 | for (LiveInterval::SubRange &SB : IntB.subranges()) { | |||
| 1035 | if ((SB.LaneMask & MaskA).any()) | |||
| 1036 | continue; | |||
| 1037 | if (LiveRange::Segment *S = SB.getSegmentContaining(CopyIdx)) | |||
| 1038 | if (S->start.getBaseIndex() == CopyIdx.getBaseIndex()) | |||
| 1039 | SB.removeSegment(*S, true); | |||
| 1040 | } | |||
| 1041 | } | |||
| 1042 | ||||
| 1043 | BValNo->def = AValNo->def; | |||
| 1044 | auto P = addSegmentsWithValNo(IntB, BValNo, IntA, AValNo); | |||
| 1045 | ShrinkB |= P.second; | |||
| 1046 | LLVM_DEBUG(dbgs() << "\t\textended: " << IntB << '\n')do { } while (false); | |||
| 1047 | ||||
| 1048 | LIS->removeVRegDefAt(IntA, AValNo->def); | |||
| 1049 | ||||
| 1050 | LLVM_DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n')do { } while (false); | |||
| 1051 | ++numCommutes; | |||
| 1052 | return { true, ShrinkB }; | |||
| 1053 | } | |||
| 1054 | ||||
| 1055 | /// For copy B = A in BB2, if A is defined by A = B in BB0 which is a | |||
| 1056 | /// predecessor of BB2, and if B is not redefined on the way from A = B | |||
| 1057 | /// in BB0 to B = A in BB2, B = A in BB2 is partially redundant if the | |||
| 1058 | /// execution goes through the path from BB0 to BB2. We may move B = A | |||
| 1059 | /// to the predecessor without such reversed copy. | |||
| 1060 | /// So we will transform the program from: | |||
| 1061 | /// BB0: | |||
| 1062 | /// A = B; BB1: | |||
| 1063 | /// ... ... | |||
| 1064 | /// / \ / | |||
| 1065 | /// BB2: | |||
| 1066 | /// ... | |||
| 1067 | /// B = A; | |||
| 1068 | /// | |||
| 1069 | /// to: | |||
| 1070 | /// | |||
| 1071 | /// BB0: BB1: | |||
| 1072 | /// A = B; ... | |||
| 1073 | /// ... B = A; | |||
| 1074 | /// / \ / | |||
| 1075 | /// BB2: | |||
| 1076 | /// ... | |||
| 1077 | /// | |||
| 1078 | /// A special case is when BB0 and BB2 are the same BB which is the only | |||
| 1079 | /// BB in a loop: | |||
| 1080 | /// BB1: | |||
| 1081 | /// ... | |||
| 1082 | /// BB0/BB2: ---- | |||
| 1083 | /// B = A; | | |||
| 1084 | /// ... | | |||
| 1085 | /// A = B; | | |||
| 1086 | /// |------- | |||
| 1087 | /// | | |||
| 1088 | /// We may hoist B = A from BB0/BB2 to BB1. | |||
| 1089 | /// | |||
| 1090 | /// The major preconditions for correctness to remove such partial | |||
| 1091 | /// redundancy include: | |||
| 1092 | /// 1. A in B = A in BB2 is defined by a PHI in BB2, and one operand of | |||
| 1093 | /// the PHI is defined by the reversed copy A = B in BB0. | |||
| 1094 | /// 2. No B is referenced from the start of BB2 to B = A. | |||
| 1095 | /// 3. No B is defined from A = B to the end of BB0. | |||
| 1096 | /// 4. BB1 has only one successor. | |||
| 1097 | /// | |||
| 1098 | /// 2 and 4 implicitly ensure B is not live at the end of BB1. | |||
| 1099 | /// 4 guarantees BB2 is hotter than BB1, so we can only move a copy to a | |||
| 1100 | /// colder place, which not only prevent endless loop, but also make sure | |||
| 1101 | /// the movement of copy is beneficial. | |||
| 1102 | bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP, | |||
| 1103 | MachineInstr &CopyMI) { | |||
| 1104 | assert(!CP.isPhys())((void)0); | |||
| 1105 | if (!CopyMI.isFullCopy()) | |||
| 1106 | return false; | |||
| 1107 | ||||
| 1108 | MachineBasicBlock &MBB = *CopyMI.getParent(); | |||
| 1109 | // If this block is the target of an invoke/inlineasm_br, moving the copy into | |||
| 1110 | // the predecessor is tricker, and we don't handle it. | |||
| 1111 | if (MBB.isEHPad() || MBB.isInlineAsmBrIndirectTarget()) | |||
| 1112 | return false; | |||
| 1113 | ||||
| 1114 | if (MBB.pred_size() != 2) | |||
| 1115 | return false; | |||
| 1116 | ||||
| 1117 | LiveInterval &IntA = | |||
| 1118 | LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg()); | |||
| 1119 | LiveInterval &IntB = | |||
| 1120 | LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg()); | |||
| 1121 | ||||
| 1122 | // A is defined by PHI at the entry of MBB. | |||
| 1123 | SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot(true); | |||
| 1124 | VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx); | |||
| 1125 | assert(AValNo && !AValNo->isUnused() && "COPY source not live")((void)0); | |||
| 1126 | if (!AValNo->isPHIDef()) | |||
| 1127 | return false; | |||
| 1128 | ||||
| 1129 | // No B is referenced before CopyMI in MBB. | |||
| 1130 | if (IntB.overlaps(LIS->getMBBStartIdx(&MBB), CopyIdx)) | |||
| 1131 | return false; | |||
| 1132 | ||||
| 1133 | // MBB has two predecessors: one contains A = B so no copy will be inserted | |||
| 1134 | // for it. The other one will have a copy moved from MBB. | |||
| 1135 | bool FoundReverseCopy = false; | |||
| 1136 | MachineBasicBlock *CopyLeftBB = nullptr; | |||
| 1137 | for (MachineBasicBlock *Pred : MBB.predecessors()) { | |||
| 1138 | VNInfo *PVal = IntA.getVNInfoBefore(LIS->getMBBEndIdx(Pred)); | |||
| 1139 | MachineInstr *DefMI = LIS->getInstructionFromIndex(PVal->def); | |||
| 1140 | if (!DefMI || !DefMI->isFullCopy()) { | |||
| 1141 | CopyLeftBB = Pred; | |||
| 1142 | continue; | |||
| 1143 | } | |||
| 1144 | // Check DefMI is a reverse copy and it is in BB Pred. | |||
| 1145 | if (DefMI->getOperand(0).getReg() != IntA.reg() || | |||
| 1146 | DefMI->getOperand(1).getReg() != IntB.reg() || | |||
| 1147 | DefMI->getParent() != Pred) { | |||
| 1148 | CopyLeftBB = Pred; | |||
| 1149 | continue; | |||
| 1150 | } | |||
| 1151 | // If there is any other def of B after DefMI and before the end of Pred, | |||
| 1152 | // we need to keep the copy of B = A at the end of Pred if we remove | |||
| 1153 | // B = A from MBB. | |||
| 1154 | bool ValB_Changed = false; | |||
| 1155 | for (auto VNI : IntB.valnos) { | |||
| 1156 | if (VNI->isUnused()) | |||
| 1157 | continue; | |||
| 1158 | if (PVal->def < VNI->def && VNI->def < LIS->getMBBEndIdx(Pred)) { | |||
| 1159 | ValB_Changed = true; | |||
| 1160 | break; | |||
| 1161 | } | |||
| 1162 | } | |||
| 1163 | if (ValB_Changed) { | |||
| 1164 | CopyLeftBB = Pred; | |||
| 1165 | continue; | |||
| 1166 | } | |||
| 1167 | FoundReverseCopy = true; | |||
| 1168 | } | |||
| 1169 | ||||
| 1170 | // If no reverse copy is found in predecessors, nothing to do. | |||
| 1171 | if (!FoundReverseCopy) | |||
| 1172 | return false; | |||
| 1173 | ||||
| 1174 | // If CopyLeftBB is nullptr, it means every predecessor of MBB contains | |||
| 1175 | // reverse copy, CopyMI can be removed trivially if only IntA/IntB is updated. | |||
| 1176 | // If CopyLeftBB is not nullptr, move CopyMI from MBB to CopyLeftBB and | |||
| 1177 | // update IntA/IntB. | |||
| 1178 | // | |||
| 1179 | // If CopyLeftBB is not nullptr, ensure CopyLeftBB has a single succ so | |||
| 1180 | // MBB is hotter than CopyLeftBB. | |||
| 1181 | if (CopyLeftBB && CopyLeftBB->succ_size() > 1) | |||
| 1182 | return false; | |||
| 1183 | ||||
| 1184 | // Now (almost sure it's) ok to move copy. | |||
| 1185 | if (CopyLeftBB) { | |||
| 1186 | // Position in CopyLeftBB where we should insert new copy. | |||
| 1187 | auto InsPos = CopyLeftBB->getFirstTerminator(); | |||
| 1188 | ||||
| 1189 | // Make sure that B isn't referenced in the terminators (if any) at the end | |||
| 1190 | // of the predecessor since we're about to insert a new definition of B | |||
| 1191 | // before them. | |||
| 1192 | if (InsPos != CopyLeftBB->end()) { | |||
| 1193 | SlotIndex InsPosIdx = LIS->getInstructionIndex(*InsPos).getRegSlot(true); | |||
| 1194 | if (IntB.overlaps(InsPosIdx, LIS->getMBBEndIdx(CopyLeftBB))) | |||
| 1195 | return false; | |||
| 1196 | } | |||
| 1197 | ||||
| 1198 | LLVM_DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to "do { } while (false) | |||
| 1199 | << printMBBReference(*CopyLeftBB) << '\t' << CopyMI)do { } while (false); | |||
| 1200 | ||||
| 1201 | // Insert new copy to CopyLeftBB. | |||
| 1202 | MachineInstr *NewCopyMI = BuildMI(*CopyLeftBB, InsPos, CopyMI.getDebugLoc(), | |||
| 1203 | TII->get(TargetOpcode::COPY), IntB.reg()) | |||
| 1204 | .addReg(IntA.reg()); | |||
| 1205 | SlotIndex NewCopyIdx = | |||
| 1206 | LIS->InsertMachineInstrInMaps(*NewCopyMI).getRegSlot(); | |||
| 1207 | IntB.createDeadDef(NewCopyIdx, LIS->getVNInfoAllocator()); | |||
| 1208 | for (LiveInterval::SubRange &SR : IntB.subranges()) | |||
| 1209 | SR.createDeadDef(NewCopyIdx, LIS->getVNInfoAllocator()); | |||
| 1210 | ||||
| 1211 | // If the newly created Instruction has an address of an instruction that was | |||
| 1212 | // deleted before (object recycled by the allocator) it needs to be removed from | |||
| 1213 | // the deleted list. | |||
| 1214 | ErasedInstrs.erase(NewCopyMI); | |||
| 1215 | } else { | |||
| 1216 | LLVM_DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from "do { } while (false) | |||
| 1217 | << printMBBReference(MBB) << '\t' << CopyMI)do { } while (false); | |||
| 1218 | } | |||
| 1219 | ||||
| 1220 | // Remove CopyMI. | |||
| 1221 | // Note: This is fine to remove the copy before updating the live-ranges. | |||
| 1222 | // While updating the live-ranges, we only look at slot indices and | |||
| 1223 | // never go back to the instruction. | |||
| 1224 | // Mark instructions as deleted. | |||
| 1225 | deleteInstr(&CopyMI); | |||
| 1226 | ||||
| 1227 | // Update the liveness. | |||
| 1228 | SmallVector<SlotIndex, 8> EndPoints; | |||
| 1229 | VNInfo *BValNo = IntB.Query(CopyIdx).valueOutOrDead(); | |||
| 1230 | LIS->pruneValue(*static_cast<LiveRange *>(&IntB), CopyIdx.getRegSlot(), | |||
| 1231 | &EndPoints); | |||
| 1232 | BValNo->markUnused(); | |||
| 1233 | // Extend IntB to the EndPoints of its original live interval. | |||
| 1234 | LIS->extendToIndices(IntB, EndPoints); | |||
| 1235 | ||||
| 1236 | // Now, do the same for its subranges. | |||
| 1237 | for (LiveInterval::SubRange &SR : IntB.subranges()) { | |||
| 1238 | EndPoints.clear(); | |||
| 1239 | VNInfo *BValNo = SR.Query(CopyIdx).valueOutOrDead(); | |||
| 1240 | assert(BValNo && "All sublanes should be live")((void)0); | |||
| 1241 | LIS->pruneValue(SR, CopyIdx.getRegSlot(), &EndPoints); | |||
| 1242 | BValNo->markUnused(); | |||
| 1243 | // We can have a situation where the result of the original copy is live, | |||
| 1244 | // but is immediately dead in this subrange, e.g. [336r,336d:0). That makes | |||
| 1245 | // the copy appear as an endpoint from pruneValue(), but we don't want it | |||
| 1246 | // to because the copy has been removed. We can go ahead and remove that | |||
| 1247 | // endpoint; there is no other situation here that there could be a use at | |||
| 1248 | // the same place as we know that the copy is a full copy. | |||
| 1249 | for (unsigned I = 0; I != EndPoints.size(); ) { | |||
| 1250 | if (SlotIndex::isSameInstr(EndPoints[I], CopyIdx)) { | |||
| 1251 | EndPoints[I] = EndPoints.back(); | |||
| 1252 | EndPoints.pop_back(); | |||
| 1253 | continue; | |||
| 1254 | } | |||
| 1255 | ++I; | |||
| 1256 | } | |||
| 1257 | SmallVector<SlotIndex, 8> Undefs; | |||
| 1258 | IntB.computeSubRangeUndefs(Undefs, SR.LaneMask, *MRI, | |||
| 1259 | *LIS->getSlotIndexes()); | |||
| 1260 | LIS->extendToIndices(SR, EndPoints, Undefs); | |||
| 1261 | } | |||
| 1262 | // If any dead defs were extended, truncate them. | |||
| 1263 | shrinkToUses(&IntB); | |||
| 1264 | ||||
| 1265 | // Finally, update the live-range of IntA. | |||
| 1266 | shrinkToUses(&IntA); | |||
| 1267 | return true; | |||
| 1268 | } | |||
| 1269 | ||||
| 1270 | /// Returns true if @p MI defines the full vreg @p Reg, as opposed to just | |||
| 1271 | /// defining a subregister. | |||
| 1272 | static bool definesFullReg(const MachineInstr &MI, Register Reg) { | |||
| 1273 | assert(!Reg.isPhysical() && "This code cannot handle physreg aliasing")((void)0); | |||
| 1274 | ||||
| 1275 | for (const MachineOperand &Op : MI.operands()) { | |||
| 1276 | if (!Op.isReg() || !Op.isDef() || Op.getReg() != Reg) | |||
| 1277 | continue; | |||
| 1278 | // Return true if we define the full register or don't care about the value | |||
| 1279 | // inside other subregisters. | |||
| 1280 | if (Op.getSubReg() == 0 || Op.isUndef()) | |||
| 1281 | return true; | |||
| 1282 | } | |||
| 1283 | return false; | |||
| 1284 | } | |||
| 1285 | ||||
| 1286 | bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, | |||
| 1287 | MachineInstr *CopyMI, | |||
| 1288 | bool &IsDefCopy) { | |||
| 1289 | IsDefCopy = false; | |||
| 1290 | Register SrcReg = CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg(); | |||
| 1291 | unsigned SrcIdx = CP.isFlipped() ? CP.getDstIdx() : CP.getSrcIdx(); | |||
| 1292 | Register DstReg = CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg(); | |||
| 1293 | unsigned DstIdx = CP.isFlipped() ? CP.getSrcIdx() : CP.getDstIdx(); | |||
| 1294 | if (Register::isPhysicalRegister(SrcReg)) | |||
| 1295 | return false; | |||
| 1296 | ||||
| 1297 | LiveInterval &SrcInt = LIS->getInterval(SrcReg); | |||
| 1298 | SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI); | |||
| 1299 | VNInfo *ValNo = SrcInt.Query(CopyIdx).valueIn(); | |||
| 1300 | if (!ValNo) | |||
| 1301 | return false; | |||
| 1302 | if (ValNo->isPHIDef() || ValNo->isUnused()) | |||
| 1303 | return false; | |||
| 1304 | MachineInstr *DefMI = LIS->getInstructionFromIndex(ValNo->def); | |||
| 1305 | if (!DefMI) | |||
| 1306 | return false; | |||
| 1307 | if (DefMI->isCopyLike()) { | |||
| 1308 | IsDefCopy = true; | |||
| 1309 | return false; | |||
| 1310 | } | |||
| 1311 | if (!TII->isAsCheapAsAMove(*DefMI)) | |||
| 1312 | return false; | |||
| 1313 | if (!TII->isTriviallyReMaterializable(*DefMI, AA)) | |||
| 1314 | return false; | |||
| 1315 | if (!definesFullReg(*DefMI, SrcReg)) | |||
| 1316 | return false; | |||
| 1317 | bool SawStore = false; | |||
| 1318 | if (!DefMI->isSafeToMove(AA, SawStore)) | |||
| 1319 | return false; | |||
| 1320 | const MCInstrDesc &MCID = DefMI->getDesc(); | |||
| 1321 | if (MCID.getNumDefs() != 1) | |||
| 1322 | return false; | |||
| 1323 | // Only support subregister destinations when the def is read-undef. | |||
| 1324 | MachineOperand &DstOperand = CopyMI->getOperand(0); | |||
| 1325 | Register CopyDstReg = DstOperand.getReg(); | |||
| 1326 | if (DstOperand.getSubReg() && !DstOperand.isUndef()) | |||
| 1327 | return false; | |||
| 1328 | ||||
| 1329 | // If both SrcIdx and DstIdx are set, correct rematerialization would widen | |||
| 1330 | // the register substantially (beyond both source and dest size). This is bad | |||
| 1331 | // for performance since it can cascade through a function, introducing many | |||
| 1332 | // extra spills and fills (e.g. ARM can easily end up copying QQQQPR registers | |||
| 1333 | // around after a few subreg copies). | |||
| 1334 | if (SrcIdx && DstIdx) | |||
| 1335 | return false; | |||
| 1336 | ||||
| 1337 | const TargetRegisterClass *DefRC = TII->getRegClass(MCID, 0, TRI, *MF); | |||
| 1338 | if (!DefMI->isImplicitDef()) { | |||
| 1339 | if (DstReg.isPhysical()) { | |||
| 1340 | Register NewDstReg = DstReg; | |||
| 1341 | ||||
| 1342 | unsigned NewDstIdx = TRI->composeSubRegIndices(CP.getSrcIdx(), | |||
| 1343 | DefMI->getOperand(0).getSubReg()); | |||
| 1344 | if (NewDstIdx) | |||
| 1345 | NewDstReg = TRI->getSubReg(DstReg, NewDstIdx); | |||
| 1346 | ||||
| 1347 | // Finally, make sure that the physical subregister that will be | |||
| 1348 | // constructed later is permitted for the instruction. | |||
| 1349 | if (!DefRC->contains(NewDstReg)) | |||
| 1350 | return false; | |||
| 1351 | } else { | |||
| 1352 | // Theoretically, some stack frame reference could exist. Just make sure | |||
| 1353 | // it hasn't actually happened. | |||
| 1354 | assert(Register::isVirtualRegister(DstReg) &&((void)0) | |||
| 1355 | "Only expect to deal with virtual or physical registers")((void)0); | |||
| 1356 | } | |||
| 1357 | } | |||
| 1358 | ||||
| 1359 | if (!allUsesAvailableAt(DefMI, ValNo->def, CopyIdx)) | |||
| 1360 | return false; | |||
| 1361 | ||||
| 1362 | DebugLoc DL = CopyMI->getDebugLoc(); | |||
| 1363 | MachineBasicBlock *MBB = CopyMI->getParent(); | |||
| 1364 | MachineBasicBlock::iterator MII = | |||
| 1365 | std::next(MachineBasicBlock::iterator(CopyMI)); | |||
| 1366 | TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, *DefMI, *TRI); | |||
| 1367 | MachineInstr &NewMI = *std::prev(MII); | |||
| 1368 | NewMI.setDebugLoc(DL); | |||
| 1369 | ||||
| 1370 | // In a situation like the following: | |||
| 1371 | // %0:subreg = instr ; DefMI, subreg = DstIdx | |||
| 1372 | // %1 = copy %0:subreg ; CopyMI, SrcIdx = 0 | |||
| 1373 | // instead of widening %1 to the register class of %0 simply do: | |||
| 1374 | // %1 = instr | |||
| 1375 | const TargetRegisterClass *NewRC = CP.getNewRC(); | |||
| 1376 | if (DstIdx != 0) { | |||
| 1377 | MachineOperand &DefMO = NewMI.getOperand(0); | |||
| 1378 | if (DefMO.getSubReg() == DstIdx) { | |||
| 1379 | assert(SrcIdx == 0 && CP.isFlipped()((void)0) | |||
| 1380 | && "Shouldn't have SrcIdx+DstIdx at this point")((void)0); | |||
| 1381 | const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); | |||
| 1382 | const TargetRegisterClass *CommonRC = | |||
| 1383 | TRI->getCommonSubClass(DefRC, DstRC); | |||
| 1384 | if (CommonRC != nullptr) { | |||
| 1385 | NewRC = CommonRC; | |||
| 1386 | DstIdx = 0; | |||
| 1387 | DefMO.setSubReg(0); | |||
| 1388 | DefMO.setIsUndef(false); // Only subregs can have def+undef. | |||
| 1389 | } | |||
| 1390 | } | |||
| 1391 | } | |||
| 1392 | ||||
| 1393 | // CopyMI may have implicit operands, save them so that we can transfer them | |||
| 1394 | // over to the newly materialized instruction after CopyMI is removed. | |||
| 1395 | SmallVector<MachineOperand, 4> ImplicitOps; | |||
| 1396 | ImplicitOps.reserve(CopyMI->getNumOperands() - | |||
| 1397 | CopyMI->getDesc().getNumOperands()); | |||
| 1398 | for (unsigned I = CopyMI->getDesc().getNumOperands(), | |||
| 1399 | E = CopyMI->getNumOperands(); | |||
| 1400 | I != E; ++I) { | |||
| 1401 | MachineOperand &MO = CopyMI->getOperand(I); | |||
| 1402 | if (MO.isReg()) { | |||
| 1403 | assert(MO.isImplicit() && "No explicit operands after implicit operands.")((void)0); | |||
| 1404 | // Discard VReg implicit defs. | |||
| 1405 | if (Register::isPhysicalRegister(MO.getReg())) | |||
| 1406 | ImplicitOps.push_back(MO); | |||
| 1407 | } | |||
| 1408 | } | |||
| 1409 | ||||
| 1410 | LIS->ReplaceMachineInstrInMaps(*CopyMI, NewMI); | |||
| 1411 | CopyMI->eraseFromParent(); | |||
| 1412 | ErasedInstrs.insert(CopyMI); | |||
| 1413 | ||||
| 1414 | // NewMI may have dead implicit defs (E.g. EFLAGS for MOV<bits>r0 on X86). | |||
| 1415 | // We need to remember these so we can add intervals once we insert | |||
| 1416 | // NewMI into SlotIndexes. | |||
| 1417 | SmallVector<MCRegister, 4> NewMIImplDefs; | |||
| 1418 | for (unsigned i = NewMI.getDesc().getNumOperands(), | |||
| 1419 | e = NewMI.getNumOperands(); | |||
| 1420 | i != e; ++i) { | |||
| 1421 | MachineOperand &MO = NewMI.getOperand(i); | |||
| 1422 | if (MO.isReg() && MO.isDef()) { | |||
| 1423 | assert(MO.isImplicit() && MO.isDead() &&((void)0) | |||
| 1424 | Register::isPhysicalRegister(MO.getReg()))((void)0); | |||
| 1425 | NewMIImplDefs.push_back(MO.getReg().asMCReg()); | |||
| 1426 | } | |||
| 1427 | } | |||
| 1428 | ||||
| 1429 | if (DstReg.isVirtual()) { | |||
| 1430 | unsigned NewIdx = NewMI.getOperand(0).getSubReg(); | |||
| 1431 | ||||
| 1432 | if (DefRC != nullptr) { | |||
| 1433 | if (NewIdx) | |||
| 1434 | NewRC = TRI->getMatchingSuperRegClass(NewRC, DefRC, NewIdx); | |||
| 1435 | else | |||
| 1436 | NewRC = TRI->getCommonSubClass(NewRC, DefRC); | |||
| 1437 | assert(NewRC && "subreg chosen for remat incompatible with instruction")((void)0); | |||
| 1438 | } | |||
| 1439 | // Remap subranges to new lanemask and change register class. | |||
| 1440 | LiveInterval &DstInt = LIS->getInterval(DstReg); | |||
| 1441 | for (LiveInterval::SubRange &SR : DstInt.subranges()) { | |||
| 1442 | SR.LaneMask = TRI->composeSubRegIndexLaneMask(DstIdx, SR.LaneMask); | |||
| 1443 | } | |||
| 1444 | MRI->setRegClass(DstReg, NewRC); | |||
| 1445 | ||||
| 1446 | // Update machine operands and add flags. | |||
| 1447 | updateRegDefsUses(DstReg, DstReg, DstIdx); | |||
| 1448 | NewMI.getOperand(0).setSubReg(NewIdx); | |||
| 1449 | // updateRegDefUses can add an "undef" flag to the definition, since | |||
| 1450 | // it will replace DstReg with DstReg.DstIdx. If NewIdx is 0, make | |||
| 1451 | // sure that "undef" is not set. | |||
| 1452 | if (NewIdx == 0) | |||
| 1453 | NewMI.getOperand(0).setIsUndef(false); | |||
| 1454 | // Add dead subregister definitions if we are defining the whole register | |||
| 1455 | // but only part of it is live. | |||
| 1456 | // This could happen if the rematerialization instruction is rematerializing | |||
| 1457 | // more than actually is used in the register. | |||
| 1458 | // An example would be: | |||
| 1459 | // %1 = LOAD CONSTANTS 5, 8 ; Loading both 5 and 8 in different subregs | |||
| 1460 | // ; Copying only part of the register here, but the rest is undef. | |||
| 1461 | // %2:sub_16bit<def, read-undef> = COPY %1:sub_16bit | |||
| 1462 | // ==> | |||
| 1463 | // ; Materialize all the constants but only using one | |||
| 1464 | // %2 = LOAD_CONSTANTS 5, 8 | |||
| 1465 | // | |||
| 1466 | // at this point for the part that wasn't defined before we could have | |||
| 1467 | // subranges missing the definition. | |||
| 1468 | if (NewIdx == 0 && DstInt.hasSubRanges()) { | |||
| 1469 | SlotIndex CurrIdx = LIS->getInstructionIndex(NewMI); | |||
| 1470 | SlotIndex DefIndex = | |||
| 1471 | CurrIdx.getRegSlot(NewMI.getOperand(0).isEarlyClobber()); | |||
| 1472 | LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(DstReg); | |||
| 1473 | VNInfo::Allocator& Alloc = LIS->getVNInfoAllocator(); | |||
| 1474 | for (LiveInterval::SubRange &SR : DstInt.subranges()) { | |||
| 1475 | if (!SR.liveAt(DefIndex)) | |||
| 1476 | SR.createDeadDef(DefIndex, Alloc); | |||
| 1477 | MaxMask &= ~SR.LaneMask; | |||
| 1478 | } | |||
| 1479 | if (MaxMask.any()) { | |||
| 1480 | LiveInterval::SubRange *SR = DstInt.createSubRange(Alloc, MaxMask); | |||
| 1481 | SR->createDeadDef(DefIndex, Alloc); | |||
| 1482 | } | |||
| 1483 | } | |||
| 1484 | ||||
| 1485 | // Make sure that the subrange for resultant undef is removed | |||
| 1486 | // For example: | |||
| 1487 | // %1:sub1<def,read-undef> = LOAD CONSTANT 1 | |||
| 1488 | // %2 = COPY %1 | |||
| 1489 | // ==> | |||
| 1490 | // %2:sub1<def, read-undef> = LOAD CONSTANT 1 | |||
| 1491 | // ; Correct but need to remove the subrange for %2:sub0 | |||
| 1492 | // ; as it is now undef | |||
| 1493 | if (NewIdx != 0 && DstInt.hasSubRanges()) { | |||
| 1494 | // The affected subregister segments can be removed. | |||
| 1495 | SlotIndex CurrIdx = LIS->getInstructionIndex(NewMI); | |||
| 1496 | LaneBitmask DstMask = TRI->getSubRegIndexLaneMask(NewIdx); | |||
| 1497 | bool UpdatedSubRanges = false; | |||
| 1498 | SlotIndex DefIndex = | |||
| 1499 | CurrIdx.getRegSlot(NewMI.getOperand(0).isEarlyClobber()); | |||
| 1500 | VNInfo::Allocator &Alloc = LIS->getVNInfoAllocator(); | |||
| 1501 | for (LiveInterval::SubRange &SR : DstInt.subranges()) { | |||
| 1502 | if ((SR.LaneMask & DstMask).none()) { | |||
| 1503 | LLVM_DEBUG(dbgs()do { } while (false) | |||
| 1504 | << "Removing undefined SubRange "do { } while (false) | |||
| 1505 | << PrintLaneMask(SR.LaneMask) << " : " << SR << "\n")do { } while (false); | |||
| 1506 | // VNI is in ValNo - remove any segments in this SubRange that have this ValNo | |||
| 1507 | if (VNInfo *RmValNo = SR.getVNInfoAt(CurrIdx.getRegSlot())) { | |||
| 1508 | SR.removeValNo(RmValNo); | |||
| 1509 | UpdatedSubRanges = true; | |||
| 1510 | } | |||
| 1511 | } else { | |||
| 1512 | // We know that this lane is defined by this instruction, | |||
| 1513 | // but at this point it may be empty because it is not used by | |||
| 1514 | // anything. This happens when updateRegDefUses adds the missing | |||
| 1515 | // lanes. Assign that lane a dead def so that the interferences | |||
| 1516 | // are properly modeled. | |||
| 1517 | if (SR.empty()) | |||
| 1518 | SR.createDeadDef(DefIndex, Alloc); | |||
| 1519 | } | |||
| 1520 | } | |||
| 1521 | if (UpdatedSubRanges) | |||
| 1522 | DstInt.removeEmptySubRanges(); | |||
| 1523 | } | |||
| 1524 | } else if (NewMI.getOperand(0).getReg() != CopyDstReg) { | |||
| 1525 | // The New instruction may be defining a sub-register of what's actually | |||
| 1526 | // been asked for. If so it must implicitly define the whole thing. | |||
| 1527 | assert(Register::isPhysicalRegister(DstReg) &&((void)0) | |||
| 1528 | "Only expect virtual or physical registers in remat")((void)0); | |||
| 1529 | NewMI.getOperand(0).setIsDead(true); | |||
| 1530 | NewMI.addOperand(MachineOperand::CreateReg( | |||
| 1531 | CopyDstReg, true /*IsDef*/, true /*IsImp*/, false /*IsKill*/)); | |||
| 1532 | // Record small dead def live-ranges for all the subregisters | |||
| 1533 | // of the destination register. | |||
| 1534 | // Otherwise, variables that live through may miss some | |||
| 1535 | // interferences, thus creating invalid allocation. | |||
| 1536 | // E.g., i386 code: | |||
| 1537 | // %1 = somedef ; %1 GR8 | |||
| 1538 | // %2 = remat ; %2 GR32 | |||
| 1539 | // CL = COPY %2.sub_8bit | |||
| 1540 | // = somedef %1 ; %1 GR8 | |||
| 1541 | // => | |||
| 1542 | // %1 = somedef ; %1 GR8 | |||
| 1543 | // dead ECX = remat ; implicit-def CL | |||
| 1544 | // = somedef %1 ; %1 GR8 | |||
| 1545 | // %1 will see the interferences with CL but not with CH since | |||
| 1546 | // no live-ranges would have been created for ECX. | |||
| 1547 | // Fix that! | |||
| 1548 | SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI); | |||
| 1549 | for (MCRegUnitIterator Units(NewMI.getOperand(0).getReg(), TRI); | |||
| 1550 | Units.isValid(); ++Units) | |||
| 1551 | if (LiveRange *LR = LIS->getCachedRegUnit(*Units)) | |||
| 1552 | LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator()); | |||
| 1553 | } | |||
| 1554 | ||||
| 1555 | if (NewMI.getOperand(0).getSubReg()) | |||
| 1556 | NewMI.getOperand(0).setIsUndef(); | |||
| 1557 | ||||
| 1558 | // Transfer over implicit operands to the rematerialized instruction. | |||
| 1559 | for (MachineOperand &MO : ImplicitOps) | |||
| 1560 | NewMI.addOperand(MO); | |||
| 1561 | ||||
| 1562 | SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI); | |||
| 1563 | for (unsigned i = 0, e = NewMIImplDefs.size(); i != e; ++i) { | |||
| 1564 | MCRegister Reg = NewMIImplDefs[i]; | |||
| 1565 | for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) | |||
| 1566 | if (LiveRange *LR = LIS->getCachedRegUnit(*Units)) | |||
| 1567 | LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator()); | |||
| 1568 | } | |||
| 1569 | ||||
| 1570 | LLVM_DEBUG(dbgs() << "Remat: " << NewMI)do { } while (false); | |||
| 1571 | ++NumReMats; | |||
| 1572 | ||||
| 1573 | // If the virtual SrcReg is completely eliminated, update all DBG_VALUEs | |||
| 1574 | // to describe DstReg instead. | |||
| 1575 | if (MRI->use_nodbg_empty(SrcReg)) { | |||
| 1576 | for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg); | |||
| 1577 | UI != MRI->use_end();) { | |||
| 1578 | MachineOperand &UseMO = *UI++; | |||
| 1579 | MachineInstr *UseMI = UseMO.getParent(); | |||
| 1580 | if (UseMI->isDebugInstr()) { | |||
| 1581 | if (Register::isPhysicalRegister(DstReg)) | |||
| 1582 | UseMO.substPhysReg(DstReg, *TRI); | |||
| 1583 | else | |||
| 1584 | UseMO.setReg(DstReg); | |||
| 1585 | // Move the debug value directly after the def of the rematerialized | |||
| 1586 | // value in DstReg. | |||
| 1587 | MBB->splice(std::next(NewMI.getIterator()), UseMI->getParent(), UseMI); | |||
| 1588 | LLVM_DEBUG(dbgs() << "\t\tupdated: " << *UseMI)do { } while (false); | |||
| 1589 | } | |||
| 1590 | } | |||
| 1591 | } | |||
| 1592 | ||||
| 1593 | if (ToBeUpdated.count(SrcReg)) | |||
| 1594 | return true; | |||
| 1595 | ||||
| 1596 | unsigned NumCopyUses = 0; | |||
| 1597 | for (MachineOperand &UseMO : MRI->use_nodbg_operands(SrcReg)) { | |||
| 1598 | if (UseMO.getParent()->isCopyLike()) | |||
| 1599 | NumCopyUses++; | |||
| 1600 | } | |||
| 1601 | if (NumCopyUses < LateRematUpdateThreshold) { | |||
| 1602 | // The source interval can become smaller because we removed a use. | |||
| 1603 | shrinkToUses(&SrcInt, &DeadDefs); | |||
| 1604 | if (!DeadDefs.empty()) | |||
| 1605 | eliminateDeadDefs(); | |||
| 1606 | } else { | |||
| 1607 | ToBeUpdated.insert(SrcReg); | |||
| 1608 | } | |||
| 1609 | return true; | |||
| 1610 | } | |||
| 1611 | ||||
| 1612 | MachineInstr *RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) { | |||
| 1613 | // ProcessImplicitDefs may leave some copies of <undef> values, it only | |||
| 1614 | // removes local variables. When we have a copy like: | |||
| 1615 | // | |||
| 1616 | // %1 = COPY undef %2 | |||
| 1617 | // | |||
| 1618 | // We delete the copy and remove the corresponding value number from %1. | |||
| 1619 | // Any uses of that value number are marked as <undef>. | |||
| 1620 | ||||
| 1621 | // Note that we do not query CoalescerPair here but redo isMoveInstr as the | |||
| 1622 | // CoalescerPair may have a new register class with adjusted subreg indices | |||
| 1623 | // at this point. | |||
| 1624 | Register SrcReg, DstReg; | |||
| 1625 | unsigned SrcSubIdx = 0, DstSubIdx = 0; | |||
| 1626 | if(!isMoveInstr(*TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) | |||
| 1627 | return nullptr; | |||
| 1628 | ||||
| 1629 | SlotIndex Idx = LIS->getInstructionIndex(*CopyMI); | |||
| 1630 | const LiveInterval &SrcLI = LIS->getInterval(SrcReg); | |||
| 1631 | // CopyMI is undef iff SrcReg is not live before the instruction. | |||
| 1632 | if (SrcSubIdx != 0 && SrcLI.hasSubRanges()) { | |||
| 1633 | LaneBitmask SrcMask = TRI->getSubRegIndexLaneMask(SrcSubIdx); | |||
| 1634 | for (const LiveInterval::SubRange &SR : SrcLI.subranges()) { | |||
| 1635 | if ((SR.LaneMask & SrcMask).none()) | |||
| 1636 | continue; | |||
| 1637 | if (SR.liveAt(Idx)) | |||
| 1638 | return nullptr; | |||
| 1639 | } | |||
| 1640 | } else if (SrcLI.liveAt(Idx)) | |||
| 1641 | return nullptr; | |||
| 1642 | ||||
| 1643 | // If the undef copy defines a live-out value (i.e. an input to a PHI def), | |||
| 1644 | // then replace it with an IMPLICIT_DEF. | |||
| 1645 | LiveInterval &DstLI = LIS->getInterval(DstReg); | |||
| 1646 | SlotIndex RegIndex = Idx.getRegSlot(); | |||
| 1647 | LiveRange::Segment *Seg = DstLI.getSegmentContaining(RegIndex); | |||
| 1648 | assert(Seg != nullptr && "No segment for defining instruction")((void)0); | |||
| 1649 | if (VNInfo *V = DstLI.getVNInfoAt(Seg->end)) { | |||
| 1650 | if (V->isPHIDef()) { | |||
| 1651 | CopyMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF)); | |||
| 1652 | for (unsigned i = CopyMI->getNumOperands(); i != 0; --i) { | |||
| 1653 | MachineOperand &MO = CopyMI->getOperand(i-1); | |||
| 1654 | if (MO.isReg() && MO.isUse()) | |||
| 1655 | CopyMI->RemoveOperand(i-1); | |||
| 1656 | } | |||
| 1657 | LLVM_DEBUG(dbgs() << "\tReplaced copy of <undef> value with an "do { } while (false) | |||
| 1658 | "implicit def\n")do { } while (false); | |||
| 1659 | return CopyMI; | |||
| 1660 | } | |||
| 1661 | } | |||
| 1662 | ||||
| 1663 | // Remove any DstReg segments starting at the instruction. | |||
| 1664 | LLVM_DEBUG(dbgs() << "\tEliminating copy of <undef> value\n")do { } while (false); | |||
| 1665 | ||||
| 1666 | // Remove value or merge with previous one in case of a subregister def. | |||
| 1667 | if (VNInfo *PrevVNI = DstLI.getVNInfoAt(Idx)) { | |||
| 1668 | VNInfo *VNI = DstLI.getVNInfoAt(RegIndex); | |||
| 1669 | DstLI.MergeValueNumberInto(VNI, PrevVNI); | |||
| 1670 | ||||
| 1671 | // The affected subregister segments can be removed. | |||
| 1672 | LaneBitmask DstMask = TRI->getSubRegIndexLaneMask(DstSubIdx); | |||
| 1673 | for (LiveInterval::SubRange &SR : DstLI.subranges()) { | |||
| 1674 | if ((SR.LaneMask & DstMask).none()) | |||
| 1675 | continue; | |||
| 1676 | ||||
| 1677 | VNInfo *SVNI = SR.getVNInfoAt(RegIndex); | |||
| 1678 | assert(SVNI != nullptr && SlotIndex::isSameInstr(SVNI->def, RegIndex))((void)0); | |||
| 1679 | SR.removeValNo(SVNI); | |||
| 1680 | } | |||
| 1681 | DstLI.removeEmptySubRanges(); | |||
| 1682 | } else | |||
| 1683 | LIS->removeVRegDefAt(DstLI, RegIndex); | |||
| 1684 | ||||
| 1685 | // Mark uses as undef. | |||
| 1686 | for (MachineOperand &MO : MRI->reg_nodbg_operands(DstReg)) { | |||
| 1687 | if (MO.isDef() /*|| MO.isUndef()*/) | |||
| 1688 | continue; | |||
| 1689 | const MachineInstr &MI = *MO.getParent(); | |||
| 1690 | SlotIndex UseIdx = LIS->getInstructionIndex(MI); | |||
| 1691 | LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg()); | |||
| 1692 | bool isLive; | |||
| 1693 | if (!UseMask.all() && DstLI.hasSubRanges()) { | |||
| 1694 | isLive = false; | |||
| 1695 | for (const LiveInterval::SubRange &SR : DstLI.subranges()) { | |||
| 1696 | if ((SR.LaneMask & UseMask).none()) | |||
| 1697 | continue; | |||
| 1698 | if (SR.liveAt(UseIdx)) { | |||
| 1699 | isLive = true; | |||
| 1700 | break; | |||
| 1701 | } | |||
| 1702 | } | |||
| 1703 | } else | |||
| 1704 | isLive = DstLI.liveAt(UseIdx); | |||
| 1705 | if (isLive) | |||
| 1706 | continue; | |||
| 1707 | MO.setIsUndef(true); | |||
| 1708 | LLVM_DEBUG(dbgs() << "\tnew undef: " << UseIdx << '\t' << MI)do { } while (false); | |||
| 1709 | } | |||
| 1710 | ||||
| 1711 | // A def of a subregister may be a use of the other subregisters, so | |||
| 1712 | // deleting a def of a subregister may also remove uses. Since CopyMI | |||
| 1713 | // is still part of the function (but about to be erased), mark all | |||
| 1714 | // defs of DstReg in it as <undef>, so that shrinkToUses would | |||
| 1715 | // ignore them. | |||
| 1716 | for (MachineOperand &MO : CopyMI->operands()) | |||
| 1717 | if (MO.isReg() && MO.isDef() && MO.getReg() == DstReg) | |||
| 1718 | MO.setIsUndef(true); | |||
| 1719 | LIS->shrinkToUses(&DstLI); | |||
| 1720 | ||||
| 1721 | return CopyMI; | |||
| 1722 | } | |||
| 1723 | ||||
| 1724 | void RegisterCoalescer::addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx, | |||
| 1725 | MachineOperand &MO, unsigned SubRegIdx) { | |||
| 1726 | LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubRegIdx); | |||
| 1727 | if (MO.isDef()) | |||
| 1728 | Mask = ~Mask; | |||
| 1729 | bool IsUndef = true; | |||
| 1730 | for (const LiveInterval::SubRange &S : Int.subranges()) { | |||
| 1731 | if ((S.LaneMask & Mask).none()) | |||
| 1732 | continue; | |||
| 1733 | if (S.liveAt(UseIdx)) { | |||
| 1734 | IsUndef = false; | |||
| 1735 | break; | |||
| 1736 | } | |||
| 1737 | } | |||
| 1738 | if (IsUndef) { | |||
| 1739 | MO.setIsUndef(true); | |||
| 1740 | // We found out some subregister use is actually reading an undefined | |||
| 1741 | // value. In some cases the whole vreg has become undefined at this | |||
| 1742 | // point so we have to potentially shrink the main range if the | |||
| 1743 | // use was ending a live segment there. | |||
| 1744 | LiveQueryResult Q = Int.Query(UseIdx); | |||
| 1745 | if (Q.valueOut() == nullptr) | |||
| 1746 | ShrinkMainRange = true; | |||
| 1747 | } | |||
| 1748 | } | |||
| 1749 | ||||
| 1750 | void RegisterCoalescer::updateRegDefsUses(Register SrcReg, Register DstReg, | |||
| 1751 | unsigned SubIdx) { | |||
| 1752 | bool DstIsPhys = Register::isPhysicalRegister(DstReg); | |||
| 1753 | LiveInterval *DstInt = DstIsPhys ? nullptr : &LIS->getInterval(DstReg); | |||
| 1754 | ||||
| 1755 | if (DstInt && DstInt->hasSubRanges() && DstReg != SrcReg) { | |||
| 1756 | for (MachineOperand &MO : MRI->reg_operands(DstReg)) { | |||
| 1757 | unsigned SubReg = MO.getSubReg(); | |||
| 1758 | if (SubReg == 0 || MO.isUndef()) | |||
| 1759 | continue; | |||
| 1760 | MachineInstr &MI = *MO.getParent(); | |||
| 1761 | if (MI.isDebugInstr()) | |||
| 1762 | continue; | |||
| 1763 | SlotIndex UseIdx = LIS->getInstructionIndex(MI).getRegSlot(true); | |||
| 1764 | addUndefFlag(*DstInt, UseIdx, MO, SubReg); | |||
| 1765 | } | |||
| 1766 | } | |||
| 1767 | ||||
| 1768 | SmallPtrSet<MachineInstr*, 8> Visited; | |||
| 1769 | for (MachineRegisterInfo::reg_instr_iterator | |||
| 1770 | I = MRI->reg_instr_begin(SrcReg), E = MRI->reg_instr_end(); | |||
| 1771 | I != E; ) { | |||
| 1772 | MachineInstr *UseMI = &*(I++); | |||
| 1773 | ||||
| 1774 | // Each instruction can only be rewritten once because sub-register | |||
| 1775 | // composition is not always idempotent. When SrcReg != DstReg, rewriting | |||
| 1776 | // the UseMI operands removes them from the SrcReg use-def chain, but when | |||
| 1777 | // SrcReg is DstReg we could encounter UseMI twice if it has multiple | |||
| 1778 | // operands mentioning the virtual register. | |||
| 1779 | if (SrcReg == DstReg && !Visited.insert(UseMI).second) | |||
| 1780 | continue; | |||
| 1781 | ||||
| 1782 | SmallVector<unsigned,8> Ops; | |||
| 1783 | bool Reads, Writes; | |||
| 1784 | std::tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops); | |||
| 1785 | ||||
| 1786 | // If SrcReg wasn't read, it may still be the case that DstReg is live-in | |||
| 1787 | // because SrcReg is a sub-register. | |||
| 1788 | if (DstInt && !Reads && SubIdx && !UseMI->isDebugInstr()) | |||
| 1789 | Reads = DstInt->liveAt(LIS->getInstructionIndex(*UseMI)); | |||
| 1790 | ||||
| 1791 | // Replace SrcReg with DstReg in all UseMI operands. | |||
| 1792 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) { | |||
| 1793 | MachineOperand &MO = UseMI->getOperand(Ops[i]); | |||
| 1794 | ||||
| 1795 | // Adjust <undef> flags in case of sub-register joins. We don't want to | |||
| 1796 | // turn a full def into a read-modify-write sub-register def and vice | |||
| 1797 | // versa. | |||
| 1798 | if (SubIdx && MO.isDef()) | |||
| 1799 | MO.setIsUndef(!Reads); | |||
| 1800 | ||||
| 1801 | // A subreg use of a partially undef (super) register may be a complete | |||
| 1802 | // undef use now and then has to be marked that way. | |||
| 1803 | if (MO.isUse() && !DstIsPhys) { | |||
| 1804 | unsigned SubUseIdx = TRI->composeSubRegIndices(SubIdx, MO.getSubReg()); | |||
| 1805 | if (SubUseIdx != 0 && MRI->shouldTrackSubRegLiveness(DstReg)) { | |||
| 1806 | if (!DstInt->hasSubRanges()) { | |||
| 1807 | BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator(); | |||
| 1808 | LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(DstInt->reg()); | |||
| 1809 | LaneBitmask UsedLanes = TRI->getSubRegIndexLaneMask(SubIdx); | |||
| 1810 | LaneBitmask UnusedLanes = FullMask & ~UsedLanes; | |||
| 1811 | DstInt->createSubRangeFrom(Allocator, UsedLanes, *DstInt); | |||
| 1812 | // The unused lanes are just empty live-ranges at this point. | |||
| 1813 | // It is the caller responsibility to set the proper | |||
| 1814 | // dead segments if there is an actual dead def of the | |||
| 1815 | // unused lanes. This may happen with rematerialization. | |||
| 1816 | DstInt->createSubRange(Allocator, UnusedLanes); | |||
| 1817 | } | |||
| 1818 | SlotIndex MIIdx = UseMI->isDebugInstr() | |||
| 1819 | ? LIS->getSlotIndexes()->getIndexBefore(*UseMI) | |||
| 1820 | : LIS->getInstructionIndex(*UseMI); | |||
| 1821 | SlotIndex UseIdx = MIIdx.getRegSlot(true); | |||
| 1822 | addUndefFlag(*DstInt, UseIdx, MO, SubUseIdx); | |||
| 1823 | } | |||
| 1824 | } | |||
| 1825 | ||||
| 1826 | if (DstIsPhys) | |||
| 1827 | MO.substPhysReg(DstReg, *TRI); | |||
| 1828 | else | |||
| 1829 | MO.substVirtReg(DstReg, SubIdx, *TRI); | |||
| 1830 | } | |||
| 1831 | ||||
| 1832 | LLVM_DEBUG({do { } while (false) | |||
| 1833 | dbgs() << "\t\tupdated: ";do { } while (false) | |||
| 1834 | if (!UseMI->isDebugInstr())do { } while (false) | |||
| 1835 | dbgs() << LIS->getInstructionIndex(*UseMI) << "\t";do { } while (false) | |||
| 1836 | dbgs() << *UseMI;do { } while (false) | |||
| 1837 | })do { } while (false); | |||
| 1838 | } | |||
| 1839 | } | |||
| 1840 | ||||
| 1841 | bool RegisterCoalescer::canJoinPhys(const CoalescerPair &CP) { | |||
| 1842 | // Always join simple intervals that are defined by a single copy from a | |||
| 1843 | // reserved register. This doesn't increase register pressure, so it is | |||
| 1844 | // always beneficial. | |||
| 1845 | if (!MRI->isReserved(CP.getDstReg())) { | |||
| 1846 | LLVM_DEBUG(dbgs() << "\tCan only merge into reserved registers.\n")do { } while (false); | |||
| 1847 | return false; | |||
| 1848 | } | |||
| 1849 | ||||
| 1850 | LiveInterval &JoinVInt = LIS->getInterval(CP.getSrcReg()); | |||
| 1851 | if (JoinVInt.containsOneValue()) | |||
| 1852 | return true; | |||
| 1853 | ||||
| 1854 | LLVM_DEBUG(do { } while (false) | |||
| 1855 | dbgs() << "\tCannot join complex intervals into reserved register.\n")do { } while (false); | |||
| 1856 | return false; | |||
| 1857 | } | |||
| 1858 | ||||
| 1859 | bool RegisterCoalescer::copyValueUndefInPredecessors( | |||
| 1860 | LiveRange &S, const MachineBasicBlock *MBB, LiveQueryResult SLRQ) { | |||
| 1861 | for (const MachineBasicBlock *Pred : MBB->predecessors()) { | |||
| 1862 | SlotIndex PredEnd = LIS->getMBBEndIdx(Pred); | |||
| 1863 | if (VNInfo *V = S.getVNInfoAt(PredEnd.getPrevSlot())) { | |||
| 1864 | // If this is a self loop, we may be reading the same value. | |||
| 1865 | if (V->id != SLRQ.valueOutOrDead()->id) | |||
| 1866 | return false; | |||
| 1867 | } | |||
| 1868 | } | |||
| 1869 | ||||
| 1870 | return true; | |||
| 1871 | } | |||
| 1872 | ||||
| 1873 | void RegisterCoalescer::setUndefOnPrunedSubRegUses(LiveInterval &LI, | |||
| 1874 | Register Reg, | |||
| 1875 | LaneBitmask PrunedLanes) { | |||
| 1876 | // If we had other instructions in the segment reading the undef sublane | |||
| 1877 | // value, we need to mark them with undef. | |||
| 1878 | for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) { | |||
| 1879 | unsigned SubRegIdx = MO.getSubReg(); | |||
| 1880 | if (SubRegIdx == 0 || MO.isUndef()) | |||
| 1881 | continue; | |||
| 1882 | ||||
| 1883 | LaneBitmask SubRegMask = TRI->getSubRegIndexLaneMask(SubRegIdx); | |||
| 1884 | SlotIndex Pos = LIS->getInstructionIndex(*MO.getParent()); | |||
| 1885 | for (LiveInterval::SubRange &S : LI.subranges()) { | |||
| 1886 | if (!S.liveAt(Pos) && (PrunedLanes & SubRegMask).any()) { | |||
| 1887 | MO.setIsUndef(); | |||
| 1888 | break; | |||
| 1889 | } | |||
| 1890 | } | |||
| 1891 | } | |||
| 1892 | ||||
| 1893 | LI.removeEmptySubRanges(); | |||
| 1894 | ||||
| 1895 | // A def of a subregister may be a use of other register lanes. Replacing | |||
| 1896 | // such a def with a def of a different register will eliminate the use, | |||
| 1897 | // and may cause the recorded live range to be larger than the actual | |||
| 1898 | // liveness in the program IR. | |||
| 1899 | LIS->shrinkToUses(&LI); | |||
| 1900 | } | |||
| 1901 | ||||
| 1902 | bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) { | |||
| 1903 | Again = false; | |||
| 1904 | LLVM_DEBUG(dbgs() << LIS->getInstructionIndex(*CopyMI) << '\t' << *CopyMI)do { } while (false); | |||
| 1905 | ||||
| 1906 | CoalescerPair CP(*TRI); | |||
| 1907 | if (!CP.setRegisters(CopyMI)) { | |||
| 1908 | LLVM_DEBUG(dbgs() << "\tNot coalescable.\n")do { } while (false); | |||
| 1909 | return false; | |||
| 1910 | } | |||
| 1911 | ||||
| 1912 | if (CP.getNewRC()) { | |||
| 1913 | auto SrcRC = MRI->getRegClass(CP.getSrcReg()); | |||
| 1914 | auto DstRC = MRI->getRegClass(CP.getDstReg()); | |||
| 1915 | unsigned SrcIdx = CP.getSrcIdx(); | |||
| 1916 | unsigned DstIdx = CP.getDstIdx(); | |||
| 1917 | if (CP.isFlipped()) { | |||
| 1918 | std::swap(SrcIdx, DstIdx); | |||
| 1919 | std::swap(SrcRC, DstRC); | |||
| 1920 | } | |||
| 1921 | if (!TRI->shouldCoalesce(CopyMI, SrcRC, SrcIdx, DstRC, DstIdx, | |||
| 1922 | CP.getNewRC(), *LIS)) { | |||
| 1923 | LLVM_DEBUG(dbgs() << "\tSubtarget bailed on coalescing.\n")do { } while (false); | |||
| 1924 | return false; | |||
| 1925 | } | |||
| 1926 | } | |||
| 1927 | ||||
| 1928 | // Dead code elimination. This really should be handled by MachineDCE, but | |||
| 1929 | // sometimes dead copies slip through, and we can't generate invalid live | |||
| 1930 | // ranges. | |||
| 1931 | if (!CP.isPhys() && CopyMI->allDefsAreDead()) { | |||
| 1932 | LLVM_DEBUG(dbgs() << "\tCopy is dead.\n")do { } while (false); | |||
| 1933 | DeadDefs.push_back(CopyMI); | |||
| 1934 | eliminateDeadDefs(); | |||
| 1935 | return true; | |||
| 1936 | } | |||
| 1937 | ||||
| 1938 | // Eliminate undefs. | |||
| 1939 | if (!CP.isPhys()) { | |||
| 1940 | // If this is an IMPLICIT_DEF, leave it alone, but don't try to coalesce. | |||
| 1941 | if (MachineInstr *UndefMI = eliminateUndefCopy(CopyMI)) { | |||
| 1942 | if (UndefMI->isImplicitDef()) | |||
| 1943 | return false; | |||
| 1944 | deleteInstr(CopyMI); | |||
| 1945 | return false; // Not coalescable. | |||
| 1946 | } | |||
| 1947 | } | |||
| 1948 | ||||
| 1949 | // Coalesced copies are normally removed immediately, but transformations | |||
| 1950 | // like removeCopyByCommutingDef() can inadvertently create identity copies. | |||
| 1951 | // When that happens, just join the values and remove the copy. | |||
| 1952 | if (CP.getSrcReg() == CP.getDstReg()) { | |||
| 1953 | LiveInterval &LI = LIS->getInterval(CP.getSrcReg()); | |||
| 1954 | LLVM_DEBUG(dbgs() << "\tCopy already coalesced: " << LI << '\n')do { } while (false); | |||
| 1955 | const SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI); | |||
| 1956 | LiveQueryResult LRQ = LI.Query(CopyIdx); | |||
| 1957 | if (VNInfo *DefVNI = LRQ.valueDefined()) { | |||
| 1958 | VNInfo *ReadVNI = LRQ.valueIn(); | |||
| 1959 | assert(ReadVNI && "No value before copy and no <undef> flag.")((void)0); | |||
| 1960 | assert(ReadVNI != DefVNI && "Cannot read and define the same value.")((void)0); | |||
| 1961 | ||||
| 1962 | // Track incoming undef lanes we need to eliminate from the subrange. | |||
| 1963 | LaneBitmask PrunedLanes; | |||
| 1964 | MachineBasicBlock *MBB = CopyMI->getParent(); | |||
| 1965 | ||||
| 1966 | // Process subregister liveranges. | |||
| 1967 | for (LiveInterval::SubRange &S : LI.subranges()) { | |||
| 1968 | LiveQueryResult SLRQ = S.Query(CopyIdx); | |||
| 1969 | if (VNInfo *SDefVNI = SLRQ.valueDefined()) { | |||
| 1970 | if (VNInfo *SReadVNI = SLRQ.valueIn()) | |||
| 1971 | SDefVNI = S.MergeValueNumberInto(SDefVNI, SReadVNI); | |||
| 1972 | ||||
| 1973 | // If this copy introduced an undef subrange from an incoming value, | |||
| 1974 | // we need to eliminate the undef live in values from the subrange. | |||
| 1975 | if (copyValueUndefInPredecessors(S, MBB, SLRQ)) { | |||
| 1976 | LLVM_DEBUG(dbgs() << "Incoming sublane value is undef at copy\n")do { } while (false); | |||
| 1977 | PrunedLanes |= S.LaneMask; | |||
| 1978 | S.removeValNo(SDefVNI); | |||
| 1979 | } | |||
| 1980 | } | |||
| 1981 | } | |||
| 1982 | ||||
| 1983 | LI.MergeValueNumberInto(DefVNI, ReadVNI); | |||
| 1984 | if (PrunedLanes.any()) { | |||
| 1985 | LLVM_DEBUG(dbgs() << "Pruning undef incoming lanes: "do { } while (false) | |||
| 1986 | << PrunedLanes << '\n')do { } while (false); | |||
| 1987 | setUndefOnPrunedSubRegUses(LI, CP.getSrcReg(), PrunedLanes); | |||
| 1988 | } | |||
| 1989 | ||||
| 1990 | LLVM_DEBUG(dbgs() << "\tMerged values: " << LI << '\n')do { } while (false); | |||
| 1991 | } | |||
| 1992 | deleteInstr(CopyMI); | |||
| 1993 | return true; | |||
| 1994 | } | |||
| 1995 | ||||
| 1996 | // Enforce policies. | |||
| 1997 | if (CP.isPhys()) { | |||
| 1998 | LLVM_DEBUG(dbgs() << "\tConsidering merging "do { } while (false) | |||
| 1999 | << printReg(CP.getSrcReg(), TRI) << " with "do { } while (false) | |||
| 2000 | << printReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n')do { } while (false); | |||
| 2001 | if (!canJoinPhys(CP)) { | |||
| 2002 | // Before giving up coalescing, if definition of source is defined by | |||
| 2003 | // trivial computation, try rematerializing it. | |||
| 2004 | bool IsDefCopy = false; | |||
| 2005 | if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy)) | |||
| 2006 | return true; | |||
| 2007 | if (IsDefCopy) | |||
| 2008 | Again = true; // May be possible to coalesce later. | |||
| 2009 | return false; | |||
| 2010 | } | |||
| 2011 | } else { | |||
| 2012 | // When possible, let DstReg be the larger interval. | |||
| 2013 | if (!CP.isPartial() && LIS->getInterval(CP.getSrcReg()).size() > | |||
| 2014 | LIS->getInterval(CP.getDstReg()).size()) | |||
| 2015 | CP.flip(); | |||
| 2016 | ||||
| 2017 | LLVM_DEBUG({do { } while (false) | |||
| 2018 | dbgs() << "\tConsidering merging to "do { } while (false) | |||
| 2019 | << TRI->getRegClassName(CP.getNewRC()) << " with ";do { } while (false) | |||
| 2020 | if (CP.getDstIdx() && CP.getSrcIdx())do { } while (false) | |||
| 2021 | dbgs() << printReg(CP.getDstReg()) << " in "do { } while (false) | |||
| 2022 | << TRI->getSubRegIndexName(CP.getDstIdx()) << " and "do { } while (false) | |||
| 2023 | << printReg(CP.getSrcReg()) << " in "do { } while (false) | |||
| 2024 | << TRI->getSubRegIndexName(CP.getSrcIdx()) << '\n';do { } while (false) | |||
| 2025 | elsedo { } while (false) | |||
| 2026 | dbgs() << printReg(CP.getSrcReg(), TRI) << " in "do { } while (false) | |||
| 2027 | << printReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n';do { } while (false) | |||
| 2028 | })do { } while (false); | |||
| 2029 | } | |||
| 2030 | ||||
| 2031 | ShrinkMask = LaneBitmask::getNone(); | |||
| 2032 | ShrinkMainRange = false; | |||
| 2033 | ||||
| 2034 | // Okay, attempt to join these two intervals. On failure, this returns false. | |||
| 2035 | // Otherwise, if one of the intervals being joined is a physreg, this method | |||
| 2036 | // always canonicalizes DstInt to be it. The output "SrcInt" will not have | |||
| 2037 | // been modified, so we can use this information below to update aliases. | |||
| 2038 | if (!joinIntervals(CP)) { | |||
| 2039 | // Coalescing failed. | |||
| 2040 | ||||
| 2041 | // If definition of source is defined by trivial computation, try | |||
| 2042 | // rematerializing it. | |||
| 2043 | bool IsDefCopy = false; | |||
| 2044 | if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy)) | |||
| 2045 | return true; | |||
| 2046 | ||||
| 2047 | // If we can eliminate the copy without merging the live segments, do so | |||
| 2048 | // now. | |||
| 2049 | if (!CP.isPartial() && !CP.isPhys()) { | |||
| 2050 | bool Changed = adjustCopiesBackFrom(CP, CopyMI); | |||
| 2051 | bool Shrink = false; | |||
| 2052 | if (!Changed) | |||
| 2053 | std::tie(Changed, Shrink) = removeCopyByCommutingDef(CP, CopyMI); | |||
| 2054 | if (Changed) { | |||
| 2055 | deleteInstr(CopyMI); | |||
| 2056 | if (Shrink) { | |||
| 2057 | Register DstReg = CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg(); | |||
| 2058 | LiveInterval &DstLI = LIS->getInterval(DstReg); | |||
| 2059 | shrinkToUses(&DstLI); | |||
| 2060 | LLVM_DEBUG(dbgs() << "\t\tshrunk: " << DstLI << '\n')do { } while (false); | |||
| 2061 | } | |||
| 2062 | LLVM_DEBUG(dbgs() << "\tTrivial!\n")do { } while (false); | |||
| 2063 | return true; | |||
| 2064 | } | |||
| 2065 | } | |||
| 2066 | ||||
| 2067 | // Try and see if we can partially eliminate the copy by moving the copy to | |||
| 2068 | // its predecessor. | |||
| 2069 | if (!CP.isPartial() && !CP.isPhys()) | |||
| 2070 | if (removePartialRedundancy(CP, *CopyMI)) | |||
| 2071 | return true; | |||
| 2072 | ||||
| 2073 | // Otherwise, we are unable to join the intervals. | |||
| 2074 | LLVM_DEBUG(dbgs() << "\tInterference!\n")do { } while (false); | |||
| 2075 | Again = true; // May be possible to coalesce later. | |||
| 2076 | return false; | |||
| 2077 | } | |||
| 2078 | ||||
| 2079 | // Coalescing to a virtual register that is of a sub-register class of the | |||
| 2080 | // other. Make sure the resulting register is set to the right register class. | |||
| 2081 | if (CP.isCrossClass()) { | |||
| 2082 | ++numCrossRCs; | |||
| 2083 | MRI->setRegClass(CP.getDstReg(), CP.getNewRC()); | |||
| 2084 | } | |||
| 2085 | ||||
| 2086 | // Removing sub-register copies can ease the register class constraints. | |||
| 2087 | // Make sure we attempt to inflate the register class of DstReg. | |||
| 2088 | if (!CP.isPhys() && RegClassInfo.isProperSubClass(CP.getNewRC())) | |||
| 2089 | InflateRegs.push_back(CP.getDstReg()); | |||
| 2090 | ||||
| 2091 | // CopyMI has been erased by joinIntervals at this point. Remove it from | |||
| 2092 | // ErasedInstrs since copyCoalesceWorkList() won't add a successful join back | |||
| 2093 | // to the work list. This keeps ErasedInstrs from growing needlessly. | |||
| 2094 | ErasedInstrs.erase(CopyMI); | |||
| 2095 | ||||
| 2096 | // Rewrite all SrcReg operands to DstReg. | |||
| 2097 | // Also update DstReg operands to include DstIdx if it is set. | |||
| 2098 | if (CP.getDstIdx()) | |||
| 2099 | updateRegDefsUses(CP.getDstReg(), CP.getDstReg(), CP.getDstIdx()); | |||
| 2100 | updateRegDefsUses(CP.getSrcReg(), CP.getDstReg(), CP.getSrcIdx()); | |||
| 2101 | ||||
| 2102 | // Shrink subregister ranges if necessary. | |||
| 2103 | if (ShrinkMask.any()) { | |||
| 2104 | LiveInterval &LI = LIS->getInterval(CP.getDstReg()); | |||
| 2105 | for (LiveInterval::SubRange &S : LI.subranges()) { | |||
| 2106 | if ((S.LaneMask & ShrinkMask).none()) | |||
| 2107 | continue; | |||
| 2108 | LLVM_DEBUG(dbgs() << "Shrink LaneUses (Lane " << PrintLaneMask(S.LaneMask)do { } while (false) | |||
| 2109 | << ")\n")do { } while (false); | |||
| 2110 | LIS->shrinkToUses(S, LI.reg()); | |||
| 2111 | } | |||
| 2112 | LI.removeEmptySubRanges(); | |||
| 2113 | } | |||
| 2114 | ||||
| 2115 | // CP.getSrcReg()'s live interval has been merged into CP.getDstReg's live | |||
| 2116 | // interval. Since CP.getSrcReg() is in ToBeUpdated set and its live interval | |||
| 2117 | // is not up-to-date, need to update the merged live interval here. | |||
| 2118 | if (ToBeUpdated.count(CP.getSrcReg())) | |||
| 2119 | ShrinkMainRange = true; | |||
| 2120 | ||||
| 2121 | if (ShrinkMainRange) { | |||
| 2122 | LiveInterval &LI = LIS->getInterval(CP.getDstReg()); | |||
| 2123 | shrinkToUses(&LI); | |||
| 2124 | } | |||
| 2125 | ||||
| 2126 | // SrcReg is guaranteed to be the register whose live interval that is | |||
| 2127 | // being merged. | |||
| 2128 | LIS->removeInterval(CP.getSrcReg()); | |||
| 2129 | ||||
| 2130 | // Update regalloc hint. | |||
| 2131 | TRI->updateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *MF); | |||
| 2132 | ||||
| 2133 | LLVM_DEBUG({do { } while (false) | |||
| 2134 | dbgs() << "\tSuccess: " << printReg(CP.getSrcReg(), TRI, CP.getSrcIdx())do { } while (false) | |||
| 2135 | << " -> " << printReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';do { } while (false) | |||
| 2136 | dbgs() << "\tResult = ";do { } while (false) | |||
| 2137 | if (CP.isPhys())do { } while (false) | |||
| 2138 | dbgs() << printReg(CP.getDstReg(), TRI);do { } while (false) | |||
| 2139 | elsedo { } while (false) | |||
| 2140 | dbgs() << LIS->getInterval(CP.getDstReg());do { } while (false) | |||
| 2141 | dbgs() << '\n';do { } while (false) | |||
| 2142 | })do { } while (false); | |||
| 2143 | ||||
| 2144 | ++numJoins; | |||
| 2145 | return true; | |||
| 2146 | } | |||
| 2147 | ||||
| 2148 | bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) { | |||
| 2149 | Register DstReg = CP.getDstReg(); | |||
| 2150 | Register SrcReg = CP.getSrcReg(); | |||
| 2151 | assert(CP.isPhys() && "Must be a physreg copy")((void)0); | |||
| 2152 | assert(MRI->isReserved(DstReg) && "Not a reserved register")((void)0); | |||
| 2153 | LiveInterval &RHS = LIS->getInterval(SrcReg); | |||
| 2154 | LLVM_DEBUG(dbgs() << "\t\tRHS = " << RHS << '\n')do { } while (false); | |||
| 2155 | ||||
| 2156 | assert(RHS.containsOneValue() && "Invalid join with reserved register")((void)0); | |||
| 2157 | ||||
| 2158 | // Optimization for reserved registers like ESP. We can only merge with a | |||
| 2159 | // reserved physreg if RHS has a single value that is a copy of DstReg. | |||
| 2160 | // The live range of the reserved register will look like a set of dead defs | |||
| 2161 | // - we don't properly track the live range of reserved registers. | |||
| 2162 | ||||
| 2163 | // Deny any overlapping intervals. This depends on all the reserved | |||
| 2164 | // register live ranges to look like dead defs. | |||
| 2165 | if (!MRI->isConstantPhysReg(DstReg)) { | |||
| 2166 | for (MCRegUnitIterator UI(DstReg, TRI); UI.isValid(); ++UI) { | |||
| 2167 | // Abort if not all the regunits are reserved. | |||
| 2168 | for (MCRegUnitRootIterator RI(*UI, TRI); RI.isValid(); ++RI) { | |||
| 2169 | if (!MRI->isReserved(*RI)) | |||
| 2170 | return false; | |||
| 2171 | } | |||
| 2172 | if (RHS.overlaps(LIS->getRegUnit(*UI))) { | |||
| 2173 | LLVM_DEBUG(dbgs() << "\t\tInterference: " << printRegUnit(*UI, TRI)do { } while (false) | |||
| 2174 | << '\n')do { } while (false); | |||
| 2175 | return false; | |||
| 2176 | } | |||
| 2177 | } | |||
| 2178 | ||||
| 2179 | // We must also check for overlaps with regmask clobbers. | |||
| 2180 | BitVector RegMaskUsable; | |||
| 2181 | if (LIS->checkRegMaskInterference(RHS, RegMaskUsable) && | |||
| 2182 | !RegMaskUsable.test(DstReg)) { | |||
| 2183 | LLVM_DEBUG(dbgs() << "\t\tRegMask interference\n")do { } while (false); | |||
| 2184 | return false; | |||
| 2185 | } | |||
| 2186 | } | |||
| 2187 | ||||
| 2188 | // Skip any value computations, we are not adding new values to the | |||
| 2189 | // reserved register. Also skip merging the live ranges, the reserved | |||
| 2190 | // register live range doesn't need to be accurate as long as all the | |||
| 2191 | // defs are there. | |||
| 2192 | ||||
| 2193 | // Delete the identity copy. | |||
| 2194 | MachineInstr *CopyMI; | |||
| 2195 | if (CP.isFlipped()) { | |||
| 2196 | // Physreg is copied into vreg | |||
| 2197 | // %y = COPY %physreg_x | |||
| 2198 | // ... //< no other def of %physreg_x here | |||
| 2199 | // use %y | |||
| 2200 | // => | |||
| 2201 | // ... | |||
| 2202 | // use %physreg_x | |||
| 2203 | CopyMI = MRI->getVRegDef(SrcReg); | |||
| 2204 | } else { | |||
| 2205 | // VReg is copied into physreg: | |||
| 2206 | // %y = def | |||
| 2207 | // ... //< no other def or use of %physreg_x here | |||
| 2208 | // %physreg_x = COPY %y | |||
| 2209 | // => | |||
| 2210 | // %physreg_x = def | |||
| 2211 | // ... | |||
| 2212 | if (!MRI->hasOneNonDBGUse(SrcReg)) { | |||
| 2213 | LLVM_DEBUG(dbgs() << "\t\tMultiple vreg uses!\n")do { } while (false); | |||
| 2214 | return false; | |||
| 2215 | } | |||
| 2216 | ||||
| 2217 | if (!LIS->intervalIsInOneMBB(RHS)) { | |||
| 2218 | LLVM_DEBUG(dbgs() << "\t\tComplex control flow!\n")do { } while (false); | |||
| 2219 | return false; | |||
| 2220 | } | |||
| 2221 | ||||
| 2222 | MachineInstr &DestMI = *MRI->getVRegDef(SrcReg); | |||
| 2223 | CopyMI = &*MRI->use_instr_nodbg_begin(SrcReg); | |||
| 2224 | SlotIndex CopyRegIdx = LIS->getInstructionIndex(*CopyMI).getRegSlot(); | |||
| 2225 | SlotIndex DestRegIdx = LIS->getInstructionIndex(DestMI).getRegSlot(); | |||
| 2226 | ||||
| 2227 | if (!MRI->isConstantPhysReg(DstReg)) { | |||
| 2228 | // We checked above that there are no interfering defs of the physical | |||
| 2229 | // register. However, for this case, where we intend to move up the def of | |||
| 2230 | // the physical register, we also need to check for interfering uses. | |||
| 2231 | SlotIndexes *Indexes = LIS->getSlotIndexes(); | |||
| 2232 | for (SlotIndex SI = Indexes->getNextNonNullIndex(DestRegIdx); | |||
| 2233 | SI != CopyRegIdx; SI = Indexes->getNextNonNullIndex(SI)) { | |||
| 2234 | MachineInstr *MI = LIS->getInstructionFromIndex(SI); | |||
| 2235 | if (MI->readsRegister(DstReg, TRI)) { | |||
| 2236 | LLVM_DEBUG(dbgs() << "\t\tInterference (read): " << *MI)do { } while (false); | |||
| 2237 | return false; | |||
| 2238 | } | |||
| 2239 | } | |||
| 2240 | } | |||
| 2241 | ||||
| 2242 | // We're going to remove the copy which defines a physical reserved | |||
| 2243 | // register, so remove its valno, etc. | |||
| 2244 | LLVM_DEBUG(dbgs() << "\t\tRemoving phys reg def of "do { } while (false) | |||
| 2245 | << printReg(DstReg, TRI) << " at " << CopyRegIdx << "\n")do { } while (false); | |||
| 2246 | ||||
| 2247 | LIS->removePhysRegDefAt(DstReg.asMCReg(), CopyRegIdx); | |||
| 2248 | // Create a new dead def at the new def location. | |||
| 2249 | for (MCRegUnitIterator UI(DstReg, TRI); UI.isValid(); ++UI) { | |||
| 2250 | LiveRange &LR = LIS->getRegUnit(*UI); | |||
| 2251 | LR.createDeadDef(DestRegIdx, LIS->getVNInfoAllocator()); | |||
| 2252 | } | |||
| 2253 | } | |||
| 2254 | ||||
| 2255 | deleteInstr(CopyMI); | |||
| 2256 | ||||
| 2257 | // We don't track kills for reserved registers. | |||
| 2258 | MRI->clearKillFlags(CP.getSrcReg()); | |||
| 2259 | ||||
| 2260 | return true; | |||
| 2261 | } | |||
| 2262 | ||||
| 2263 | //===----------------------------------------------------------------------===// | |||
| 2264 | // Interference checking and interval joining | |||
| 2265 | //===----------------------------------------------------------------------===// | |||
| 2266 | // | |||
| 2267 | // In the easiest case, the two live ranges being joined are disjoint, and | |||
| 2268 | // there is no interference to consider. It is quite common, though, to have | |||
| 2269 | // overlapping live ranges, and we need to check if the interference can be | |||
| 2270 | // resolved. | |||
| 2271 | // | |||
| 2272 | // The live range of a single SSA value forms a sub-tree of the dominator tree. | |||
| 2273 | // This means that two SSA values overlap if and only if the def of one value | |||
| 2274 | // is contained in the live range of the other value. As a special case, the | |||
| 2275 | // overlapping values can be defined at the same index. | |||
| 2276 | // | |||
| 2277 | // The interference from an overlapping def can be resolved in these cases: | |||
| 2278 | // | |||
| 2279 | // 1. Coalescable copies. The value is defined by a copy that would become an | |||
| 2280 | // identity copy after joining SrcReg and DstReg. The copy instruction will | |||
| 2281 | // be removed, and the value will be merged with the source value. | |||
| 2282 | // | |||
| 2283 | // There can be several copies back and forth, causing many values to be | |||
| 2284 | // merged into one. We compute a list of ultimate values in the joined live | |||
| 2285 | // range as well as a mappings from the old value numbers. | |||
| 2286 | // | |||
| 2287 | // 2. IMPLICIT_DEF. This instruction is only inserted to ensure all PHI | |||
| 2288 | // predecessors have a live out value. It doesn't cause real interference, | |||
| 2289 | // and can be merged into the value it overlaps. Like a coalescable copy, it | |||
| 2290 | // can be erased after joining. | |||
| 2291 | // | |||
| 2292 | // 3. Copy of external value. The overlapping def may be a copy of a value that | |||
| 2293 | // is already in the other register. This is like a coalescable copy, but | |||
| 2294 | // the live range of the source register must be trimmed after erasing the | |||
| 2295 | // copy instruction: | |||
| 2296 | // | |||
| 2297 | // %src = COPY %ext | |||
| 2298 | // %dst = COPY %ext <-- Remove this COPY, trim the live range of %ext. | |||
| 2299 | // | |||
| 2300 | // 4. Clobbering undefined lanes. Vector registers are sometimes built by | |||
| 2301 | // defining one lane at a time: | |||
| 2302 | // | |||
| 2303 | // %dst:ssub0<def,read-undef> = FOO | |||
| 2304 | // %src = BAR | |||
| 2305 | // %dst:ssub1 = COPY %src | |||
| 2306 | // | |||
| 2307 | // The live range of %src overlaps the %dst value defined by FOO, but | |||
| 2308 | // merging %src into %dst:ssub1 is only going to clobber the ssub1 lane | |||
| 2309 | // which was undef anyway. | |||
| 2310 | // | |||
| 2311 | // The value mapping is more complicated in this case. The final live range | |||
| 2312 | // will have different value numbers for both FOO and BAR, but there is no | |||
| 2313 | // simple mapping from old to new values. It may even be necessary to add | |||
| 2314 | // new PHI values. | |||
| 2315 | // | |||
| 2316 | // 5. Clobbering dead lanes. A def may clobber a lane of a vector register that | |||
| 2317 | // is live, but never read. This can happen because we don't compute | |||
| 2318 | // individual live ranges per lane. | |||
| 2319 | // | |||
| 2320 | // %dst = FOO | |||
| 2321 | // %src = BAR | |||
| 2322 | // %dst:ssub1 = COPY %src | |||
| 2323 | // | |||
| 2324 | // This kind of interference is only resolved locally. If the clobbered | |||
| 2325 | // lane value escapes the block, the join is aborted. | |||
| 2326 | ||||
| 2327 | namespace { | |||
| 2328 | ||||
| 2329 | /// Track information about values in a single virtual register about to be | |||
| 2330 | /// joined. Objects of this class are always created in pairs - one for each | |||
| 2331 | /// side of the CoalescerPair (or one for each lane of a side of the coalescer | |||
| 2332 | /// pair) | |||
| 2333 | class JoinVals { | |||
| 2334 | /// Live range we work on. | |||
| 2335 | LiveRange &LR; | |||
| 2336 | ||||
| 2337 | /// (Main) register we work on. | |||
| 2338 | const Register Reg; | |||
| 2339 | ||||
| 2340 | /// Reg (and therefore the values in this liverange) will end up as | |||
| 2341 | /// subregister SubIdx in the coalesced register. Either CP.DstIdx or | |||
| 2342 | /// CP.SrcIdx. | |||
| 2343 | const unsigned SubIdx; | |||
| 2344 | ||||
| 2345 | /// The LaneMask that this liverange will occupy the coalesced register. May | |||
| 2346 | /// be smaller than the lanemask produced by SubIdx when merging subranges. | |||
| 2347 | const LaneBitmask LaneMask; | |||
| 2348 | ||||
| 2349 | /// This is true when joining sub register ranges, false when joining main | |||
| 2350 | /// ranges. | |||
| 2351 | const bool SubRangeJoin; | |||
| 2352 | ||||
| 2353 | /// Whether the current LiveInterval tracks subregister liveness. | |||
| 2354 | const bool TrackSubRegLiveness; | |||
| 2355 | ||||
| 2356 | /// Values that will be present in the final live range. | |||
| 2357 | SmallVectorImpl<VNInfo*> &NewVNInfo; | |||
| 2358 | ||||
| 2359 | const CoalescerPair &CP; | |||
| 2360 | LiveIntervals *LIS; | |||
| 2361 | SlotIndexes *Indexes; | |||
| 2362 | const TargetRegisterInfo *TRI; | |||
| 2363 | ||||
| 2364 | /// Value number assignments. Maps value numbers in LI to entries in | |||
| 2365 | /// NewVNInfo. This is suitable for passing to LiveInterval::join(). | |||
| 2366 | SmallVector<int, 8> Assignments; | |||
| 2367 | ||||
| 2368 | public: | |||
| 2369 | /// Conflict resolution for overlapping values. | |||
| 2370 | enum ConflictResolution { | |||
| 2371 | /// No overlap, simply keep this value. | |||
| 2372 | CR_Keep, | |||
| 2373 | ||||
| 2374 | /// Merge this value into OtherVNI and erase the defining instruction. | |||
| 2375 | /// Used for IMPLICIT_DEF, coalescable copies, and copies from external | |||
| 2376 | /// values. | |||
| 2377 | CR_Erase, | |||
| 2378 | ||||
| 2379 | /// Merge this value into OtherVNI but keep the defining instruction. | |||
| 2380 | /// This is for the special case where OtherVNI is defined by the same | |||
| 2381 | /// instruction. | |||
| 2382 | CR_Merge, | |||
| 2383 | ||||
| 2384 | /// Keep this value, and have it replace OtherVNI where possible. This | |||
| 2385 | /// complicates value mapping since OtherVNI maps to two different values | |||
| 2386 | /// before and after this def. | |||
| 2387 | /// Used when clobbering undefined or dead lanes. | |||
| 2388 | CR_Replace, | |||
| 2389 | ||||
| 2390 | /// Unresolved conflict. Visit later when all values have been mapped. | |||
| 2391 | CR_Unresolved, | |||
| 2392 | ||||
| 2393 | /// Unresolvable conflict. Abort the join. | |||
| 2394 | CR_Impossible | |||
| 2395 | }; | |||
| 2396 | ||||
| 2397 | private: | |||
| 2398 | /// Per-value info for LI. The lane bit masks are all relative to the final | |||
| 2399 | /// joined register, so they can be compared directly between SrcReg and | |||
| 2400 | /// DstReg. | |||
| 2401 | struct Val { | |||
| 2402 | ConflictResolution Resolution = CR_Keep; | |||
| 2403 | ||||
| 2404 | /// Lanes written by this def, 0 for unanalyzed values. | |||
| 2405 | LaneBitmask WriteLanes; | |||
| 2406 | ||||
| 2407 | /// Lanes with defined values in this register. Other lanes are undef and | |||
| 2408 | /// safe to clobber. | |||
| 2409 | LaneBitmask ValidLanes; | |||
| 2410 | ||||
| 2411 | /// Value in LI being redefined by this def. | |||
| 2412 | VNInfo *RedefVNI = nullptr; | |||
| 2413 | ||||
| 2414 | /// Value in the other live range that overlaps this def, if any. | |||
| 2415 | VNInfo *OtherVNI = nullptr; | |||
| 2416 | ||||
| 2417 | /// Is this value an IMPLICIT_DEF that can be erased? | |||
| 2418 | /// | |||
| 2419 | /// IMPLICIT_DEF values should only exist at the end of a basic block that | |||
| 2420 | /// is a predecessor to a phi-value. These IMPLICIT_DEF instructions can be | |||
| 2421 | /// safely erased if they are overlapping a live value in the other live | |||
| 2422 | /// interval. | |||
| 2423 | /// | |||
| 2424 | /// Weird control flow graphs and incomplete PHI handling in | |||
| 2425 | /// ProcessImplicitDefs can very rarely create IMPLICIT_DEF values with | |||
| 2426 | /// longer live ranges. Such IMPLICIT_DEF values should be treated like | |||
| 2427 | /// normal values. | |||
| 2428 | bool ErasableImplicitDef = false; | |||
| 2429 | ||||
| 2430 | /// True when the live range of this value will be pruned because of an | |||
| 2431 | /// overlapping CR_Replace value in the other live range. | |||
| 2432 | bool Pruned = false; | |||
| 2433 | ||||
| 2434 | /// True once Pruned above has been computed. | |||
| 2435 | bool PrunedComputed = false; | |||
| 2436 | ||||
| 2437 | /// True if this value is determined to be identical to OtherVNI | |||
| 2438 | /// (in valuesIdentical). This is used with CR_Erase where the erased | |||
| 2439 | /// copy is redundant, i.e. the source value is already the same as | |||
| 2440 | /// the destination. In such cases the subranges need to be updated | |||
| 2441 | /// properly. See comment at pruneSubRegValues for more info. | |||
| 2442 | bool Identical = false; | |||
| 2443 | ||||
| 2444 | Val() = default; | |||
| 2445 | ||||
| 2446 | bool isAnalyzed() const { return WriteLanes.any(); } | |||
| 2447 | }; | |||
| 2448 | ||||
| 2449 | /// One entry per value number in LI. | |||
| 2450 | SmallVector<Val, 8> Vals; | |||
| 2451 | ||||
| 2452 | /// Compute the bitmask of lanes actually written by DefMI. | |||
| 2453 | /// Set Redef if there are any partial register definitions that depend on the | |||
| 2454 | /// previous value of the register. | |||
| 2455 | LaneBitmask computeWriteLanes(const MachineInstr *DefMI, bool &Redef) const; | |||
| 2456 | ||||
| 2457 | /// Find the ultimate value that VNI was copied from. | |||
| 2458 | std::pair<const VNInfo *, Register> followCopyChain(const VNInfo *VNI) const; | |||
| 2459 | ||||
| 2460 | bool valuesIdentical(VNInfo *Value0, VNInfo *Value1, const JoinVals &Other) const; | |||
| 2461 | ||||
| 2462 | /// Analyze ValNo in this live range, and set all fields of Vals[ValNo]. | |||
| 2463 | /// Return a conflict resolution when possible, but leave the hard cases as | |||
| 2464 | /// CR_Unresolved. | |||
| 2465 | /// Recursively calls computeAssignment() on this and Other, guaranteeing that | |||
| 2466 | /// both OtherVNI and RedefVNI have been analyzed and mapped before returning. | |||
| 2467 | /// The recursion always goes upwards in the dominator tree, making loops | |||
| 2468 | /// impossible. | |||
| 2469 | ConflictResolution analyzeValue(unsigned ValNo, JoinVals &Other); | |||
| 2470 | ||||
| 2471 | /// Compute the value assignment for ValNo in RI. | |||
| 2472 | /// This may be called recursively by analyzeValue(), but never for a ValNo on | |||
| 2473 | /// the stack. | |||
| 2474 | void computeAssignment(unsigned ValNo, JoinVals &Other); | |||
| 2475 | ||||
| 2476 | /// Assuming ValNo is going to clobber some valid lanes in Other.LR, compute | |||
| 2477 | /// the extent of the tainted lanes in the block. | |||
| 2478 | /// | |||
| 2479 | /// Multiple values in Other.LR can be affected since partial redefinitions | |||
| 2480 | /// can preserve previously tainted lanes. | |||
| 2481 | /// | |||
| 2482 | /// 1 %dst = VLOAD <-- Define all lanes in %dst | |||
| 2483 | /// 2 %src = FOO <-- ValNo to be joined with %dst:ssub0 | |||
| 2484 | /// 3 %dst:ssub1 = BAR <-- Partial redef doesn't clear taint in ssub0 | |||
| 2485 | /// 4 %dst:ssub0 = COPY %src <-- Conflict resolved, ssub0 wasn't read | |||
| 2486 | /// | |||
| 2487 | /// For each ValNo in Other that is affected, add an (EndIndex, TaintedLanes) | |||
| 2488 | /// entry to TaintedVals. | |||
| 2489 | /// | |||
| 2490 | /// Returns false if the tainted lanes extend beyond the basic block. | |||
| 2491 | bool | |||
| 2492 | taintExtent(unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other, | |||
| 2493 | SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent); | |||
| 2494 | ||||
| 2495 | /// Return true if MI uses any of the given Lanes from Reg. | |||
| 2496 | /// This does not include partial redefinitions of Reg. | |||
| 2497 | bool usesLanes(const MachineInstr &MI, Register, unsigned, LaneBitmask) const; | |||
| 2498 | ||||
| 2499 | /// Determine if ValNo is a copy of a value number in LR or Other.LR that will | |||
| 2500 | /// be pruned: | |||
| 2501 | /// | |||
| 2502 | /// %dst = COPY %src | |||
| 2503 | /// %src = COPY %dst <-- This value to be pruned. | |||
| 2504 | /// %dst = COPY %src <-- This value is a copy of a pruned value. | |||
| 2505 | bool isPrunedValue(unsigned ValNo, JoinVals &Other); | |||
| 2506 | ||||
| 2507 | public: | |||
| 2508 | JoinVals(LiveRange &LR, Register Reg, unsigned SubIdx, LaneBitmask LaneMask, | |||
| 2509 | SmallVectorImpl<VNInfo *> &newVNInfo, const CoalescerPair &cp, | |||
| 2510 | LiveIntervals *lis, const TargetRegisterInfo *TRI, bool SubRangeJoin, | |||
| 2511 | bool TrackSubRegLiveness) | |||
| 2512 | : LR(LR), Reg(Reg), SubIdx(SubIdx), LaneMask(LaneMask), | |||
| 2513 | SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness), | |||
| 2514 | NewVNInfo(newVNInfo), CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()), | |||
| 2515 | TRI(TRI), Assignments(LR.getNumValNums(), -1), | |||
| 2516 | Vals(LR.getNumValNums()) {} | |||
| 2517 | ||||
| 2518 | /// Analyze defs in LR and compute a value mapping in NewVNInfo. | |||
| 2519 | /// Returns false if any conflicts were impossible to resolve. | |||
| 2520 | bool mapValues(JoinVals &Other); | |||
| 2521 | ||||
| 2522 | /// Try to resolve conflicts that require all values to be mapped. | |||
| 2523 | /// Returns false if any conflicts were impossible to resolve. | |||
| 2524 | bool resolveConflicts(JoinVals &Other); | |||
| 2525 | ||||
| 2526 | /// Prune the live range of values in Other.LR where they would conflict with | |||
| 2527 | /// CR_Replace values in LR. Collect end points for restoring the live range | |||
| 2528 | /// after joining. | |||
| 2529 | void pruneValues(JoinVals &Other, SmallVectorImpl<SlotIndex> &EndPoints, | |||
| 2530 | bool changeInstrs); | |||
| 2531 | ||||
| 2532 | /// Removes subranges starting at copies that get removed. This sometimes | |||
| 2533 | /// happens when undefined subranges are copied around. These ranges contain | |||
| 2534 | /// no useful information and can be removed. | |||
| 2535 | void pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask); | |||
| 2536 | ||||
| 2537 | /// Pruning values in subranges can lead to removing segments in these | |||
| 2538 | /// subranges started by IMPLICIT_DEFs. The corresponding segments in | |||
| 2539 | /// the main range also need to be removed. This function will mark | |||
| 2540 | /// the corresponding values in the main range as pruned, so that | |||
| 2541 | /// eraseInstrs can do the final cleanup. | |||
| 2542 | /// The parameter @p LI must be the interval whose main range is the | |||
| 2543 | /// live range LR. | |||
| 2544 | void pruneMainSegments(LiveInterval &LI, bool &ShrinkMainRange); | |||
| 2545 | ||||
| 2546 | /// Erase any machine instructions that have been coalesced away. | |||
| 2547 | /// Add erased instructions to ErasedInstrs. | |||
| 2548 | /// Add foreign virtual registers to ShrinkRegs if their live range ended at | |||
| 2549 | /// the erased instrs. | |||
| 2550 | void eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs, | |||
| 2551 | SmallVectorImpl<Register> &ShrinkRegs, | |||
| 2552 | LiveInterval *LI = nullptr); | |||
| 2553 | ||||
| 2554 | /// Remove liverange defs at places where implicit defs will be removed. | |||
| 2555 | void removeImplicitDefs(); | |||
| 2556 | ||||
| 2557 | /// Get the value assignments suitable for passing to LiveInterval::join. | |||
| 2558 | const int *getAssignments() const { return Assignments.data(); } | |||
| 2559 | ||||
| 2560 | /// Get the conflict resolution for a value number. | |||
| 2561 | ConflictResolution getResolution(unsigned Num) const { | |||
| 2562 | return Vals[Num].Resolution; | |||
| 2563 | } | |||
| 2564 | }; | |||
| 2565 | ||||
| 2566 | } // end anonymous namespace | |||
| 2567 | ||||
| 2568 | LaneBitmask JoinVals::computeWriteLanes(const MachineInstr *DefMI, bool &Redef) | |||
| 2569 | const { | |||
| 2570 | LaneBitmask L; | |||
| 2571 | for (const MachineOperand &MO : DefMI->operands()) { | |||
| 2572 | if (!MO.isReg() || MO.getReg() != Reg || !MO.isDef()) | |||
| 2573 | continue; | |||
| 2574 | L |= TRI->getSubRegIndexLaneMask( | |||
| 2575 | TRI->composeSubRegIndices(SubIdx, MO.getSubReg())); | |||
| 2576 | if (MO.readsReg()) | |||
| 2577 | Redef = true; | |||
| 2578 | } | |||
| 2579 | return L; | |||
| 2580 | } | |||
| 2581 | ||||
| 2582 | std::pair<const VNInfo *, Register> | |||
| 2583 | JoinVals::followCopyChain(const VNInfo *VNI) const { | |||
| 2584 | Register TrackReg = Reg; | |||
| 2585 | ||||
| 2586 | while (!VNI->isPHIDef()) { | |||
| ||||
| 2587 | SlotIndex Def = VNI->def; | |||
| 2588 | MachineInstr *MI = Indexes->getInstructionFromIndex(Def); | |||
| 2589 | assert(MI && "No defining instruction")((void)0); | |||
| 2590 | if (!MI->isFullCopy()) | |||
| 2591 | return std::make_pair(VNI, TrackReg); | |||
| 2592 | Register SrcReg = MI->getOperand(1).getReg(); | |||
| 2593 | if (!SrcReg.isVirtual()) | |||
| 2594 | return std::make_pair(VNI, TrackReg); | |||
| 2595 | ||||
| 2596 | const LiveInterval &LI = LIS->getInterval(SrcReg); | |||
| 2597 | const VNInfo *ValueIn; | |||
| 2598 | // No subrange involved. | |||
| 2599 | if (!SubRangeJoin || !LI.hasSubRanges()) { | |||
| 2600 | LiveQueryResult LRQ = LI.Query(Def); | |||
| 2601 | ValueIn = LRQ.valueIn(); | |||
| 2602 | } else { | |||
| 2603 | // Query subranges. Ensure that all matching ones take us to the same def | |||
| 2604 | // (allowing some of them to be undef). | |||
| 2605 | ValueIn = nullptr; | |||
| 2606 | for (const LiveInterval::SubRange &S : LI.subranges()) { | |||
| 2607 | // Transform lanemask to a mask in the joined live interval. | |||
| 2608 | LaneBitmask SMask = TRI->composeSubRegIndexLaneMask(SubIdx, S.LaneMask); | |||
| 2609 | if ((SMask & LaneMask).none()) | |||
| 2610 | continue; | |||
| 2611 | LiveQueryResult LRQ = S.Query(Def); | |||
| 2612 | if (!ValueIn) { | |||
| 2613 | ValueIn = LRQ.valueIn(); | |||
| 2614 | continue; | |||
| 2615 | } | |||
| 2616 | if (LRQ.valueIn() && ValueIn != LRQ.valueIn()) | |||
| 2617 | return std::make_pair(VNI, TrackReg); | |||
| 2618 | } | |||
| 2619 | } | |||
| 2620 | if (ValueIn == nullptr) { | |||
| 2621 | // Reaching an undefined value is legitimate, for example: | |||
| 2622 | // | |||
| 2623 | // 1 undef %0.sub1 = ... ;; %0.sub0 == undef | |||
| 2624 | // 2 %1 = COPY %0 ;; %1 is defined here. | |||
| 2625 | // 3 %0 = COPY %1 ;; Now %0.sub0 has a definition, | |||
| 2626 | // ;; but it's equivalent to "undef". | |||
| 2627 | return std::make_pair(nullptr, SrcReg); | |||
| 2628 | } | |||
| 2629 | VNI = ValueIn; | |||
| 2630 | TrackReg = SrcReg; | |||
| 2631 | } | |||
| 2632 | return std::make_pair(VNI, TrackReg); | |||
| 2633 | } | |||
| 2634 | ||||
| 2635 | bool JoinVals::valuesIdentical(VNInfo *Value0, VNInfo *Value1, | |||
| 2636 | const JoinVals &Other) const { | |||
| 2637 | const VNInfo *Orig0; | |||
| 2638 | Register Reg0; | |||
| 2639 | std::tie(Orig0, Reg0) = followCopyChain(Value0); | |||
| ||||
| 2640 | if (Orig0 == Value1 && Reg0 == Other.Reg) | |||
| 2641 | return true; | |||
| 2642 | ||||
| 2643 | const VNInfo *Orig1; | |||
| 2644 | Register Reg1; | |||
| 2645 | std::tie(Orig1, Reg1) = Other.followCopyChain(Value1); | |||
| 2646 | // If both values are undefined, and the source registers are the same | |||
| 2647 | // register, the values are identical. Filter out cases where only one | |||
| 2648 | // value is defined. | |||
| 2649 | if (Orig0 == nullptr || Orig1 == nullptr) | |||
| 2650 | return Orig0 == Orig1 && Reg0 == Reg1; | |||
| 2651 | ||||
| 2652 | // The values are equal if they are defined at the same place and use the | |||
| 2653 | // same register. Note that we cannot compare VNInfos directly as some of | |||
| 2654 | // them might be from a copy created in mergeSubRangeInto() while the other | |||
| 2655 | // is from the original LiveInterval. | |||
| 2656 | return Orig0->def == Orig1->def && Reg0 == Reg1; | |||
| 2657 | } | |||
| 2658 | ||||
| 2659 | JoinVals::ConflictResolution | |||
| 2660 | JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) { | |||
| 2661 | Val &V = Vals[ValNo]; | |||
| 2662 | assert(!V.isAnalyzed() && "Value has already been analyzed!")((void)0); | |||
| 2663 | VNInfo *VNI = LR.getValNumInfo(ValNo); | |||
| 2664 | if (VNI->isUnused()) { | |||
| 2665 | V.WriteLanes = LaneBitmask::getAll(); | |||
| 2666 | return CR_Keep; | |||
| 2667 | } | |||
| 2668 | ||||
| 2669 | // Get the instruction defining this value, compute the lanes written. | |||
| 2670 | const MachineInstr *DefMI = nullptr; | |||
| 2671 | if (VNI->isPHIDef()) { | |||
| 2672 | // Conservatively assume that all lanes in a PHI are valid. | |||
| 2673 | LaneBitmask Lanes = SubRangeJoin ? LaneBitmask::getLane(0) | |||
| 2674 | : TRI->getSubRegIndexLaneMask(SubIdx); | |||
| 2675 | V.ValidLanes = V.WriteLanes = Lanes; | |||
| 2676 | } else { | |||
| 2677 | DefMI = Indexes->getInstructionFromIndex(VNI->def); | |||
| 2678 | assert(DefMI != nullptr)((void)0); | |||
| 2679 | if (SubRangeJoin) { | |||
| 2680 | // We don't care about the lanes when joining subregister ranges. | |||
| 2681 | V.WriteLanes = V.ValidLanes = LaneBitmask::getLane(0); | |||
| 2682 | if (DefMI->isImplicitDef()) { | |||
| 2683 | V.ValidLanes = LaneBitmask::getNone(); | |||
| 2684 | V.ErasableImplicitDef = true; | |||
| 2685 | } | |||
| 2686 | } else { | |||
| 2687 | bool Redef = false; | |||
| 2688 | V.ValidLanes = V.WriteLanes = computeWriteLanes(DefMI, Redef); | |||
| 2689 | ||||
| 2690 | // If this is a read-modify-write instruction, there may be more valid | |||
| 2691 | // lanes than the ones written by this instruction. | |||
| 2692 | // This only covers partial redef operands. DefMI may have normal use | |||
| 2693 | // operands reading the register. They don't contribute valid lanes. | |||
| 2694 | // | |||
| 2695 | // This adds ssub1 to the set of valid lanes in %src: | |||
| 2696 | // | |||
| 2697 | // %src:ssub1 = FOO | |||
| 2698 | // | |||
| 2699 | // This leaves only ssub1 valid, making any other lanes undef: | |||
| 2700 | // | |||
| 2701 | // %src:ssub1<def,read-undef> = FOO %src:ssub2 | |||
| 2702 | // | |||
| 2703 | // The <read-undef> flag on the def operand means that old lane values are | |||
| 2704 | // not important. | |||
| 2705 | if (Redef) { | |||
| 2706 | V.RedefVNI = LR.Query(VNI->def).valueIn(); | |||
| 2707 | assert((TrackSubRegLiveness || V.RedefVNI) &&((void)0) | |||
| 2708 | "Instruction is reading nonexistent value")((void)0); | |||
| 2709 | if (V.RedefVNI != nullptr) { | |||
| 2710 | computeAssignment(V.RedefVNI->id, Other); | |||
| 2711 | V.ValidLanes |= Vals[V.RedefVNI->id].ValidLanes; | |||
| 2712 | } | |||
| 2713 | } | |||
| 2714 | ||||
| 2715 | // An IMPLICIT_DEF writes undef values. | |||
| 2716 | if (DefMI->isImplicitDef()) { | |||
| 2717 | // We normally expect IMPLICIT_DEF values to be live only until the end | |||
| 2718 | // of their block. If the value is really live longer and gets pruned in | |||
| 2719 | // another block, this flag is cleared again. | |||
| 2720 | // | |||
| 2721 | // Clearing the valid lanes is deferred until it is sure this can be | |||
| 2722 | // erased. | |||
| 2723 | V.ErasableImplicitDef = true; | |||
| 2724 | } | |||
| 2725 | } | |||
| 2726 | } | |||
| 2727 | ||||
| 2728 | // Find the value in Other that overlaps VNI->def, if any. | |||
| 2729 | LiveQueryResult OtherLRQ = Other.LR.Query(VNI->def); | |||
| 2730 | ||||
| 2731 | // It is possible that both values are defined by the same instruction, or | |||
| 2732 | // the values are PHIs defined in the same block. When that happens, the two | |||
| 2733 | // values should be merged into one, but not into any preceding value. | |||
| 2734 | // The first value defined or visited gets CR_Keep, the other gets CR_Merge. | |||
| 2735 | if (VNInfo *OtherVNI = OtherLRQ.valueDefined()) { | |||
| 2736 | assert(SlotIndex::isSameInstr(VNI->def, OtherVNI->def) && "Broken LRQ")((void)0); | |||
| 2737 | ||||
| 2738 | // One value stays, the other is merged. Keep the earlier one, or the first | |||
| 2739 | // one we see. | |||
| 2740 | if (OtherVNI->def < VNI->def) | |||
| 2741 | Other.computeAssignment(OtherVNI->id, *this); | |||
| 2742 | else if (VNI->def < OtherVNI->def && OtherLRQ.valueIn()) { | |||
| 2743 | // This is an early-clobber def overlapping a live-in value in the other | |||
| 2744 | // register. Not mergeable. | |||
| 2745 | V.OtherVNI = OtherLRQ.valueIn(); | |||
| 2746 | return CR_Impossible; | |||
| 2747 | } | |||
| 2748 | V.OtherVNI = OtherVNI; | |||
| 2749 | Val &OtherV = Other.Vals[OtherVNI->id]; | |||
| 2750 | // Keep this value, check for conflicts when analyzing OtherVNI. | |||
| 2751 | if (!OtherV.isAnalyzed()) | |||
| 2752 | return CR_Keep; | |||
| 2753 | // Both sides have been analyzed now. | |||
| 2754 | // Allow overlapping PHI values. Any real interference would show up in a | |||
| 2755 | // predecessor, the PHI itself can't introduce any conflicts. | |||
| 2756 | if (VNI->isPHIDef()) | |||
| 2757 | return CR_Merge; | |||
| 2758 | if ((V.ValidLanes & OtherV.ValidLanes).any()) | |||
| 2759 | // Overlapping lanes can't be resolved. | |||
| 2760 | return CR_Impossible; | |||
| 2761 | else | |||
| 2762 | return CR_Merge; | |||
| 2763 | } | |||
| 2764 | ||||
| 2765 | // No simultaneous def. Is Other live at the def? | |||
| 2766 | V.OtherVNI = OtherLRQ.valueIn(); | |||
| 2767 | if (!V.OtherVNI) | |||
| 2768 | // No overlap, no conflict. | |||
| 2769 | return CR_Keep; | |||
| 2770 | ||||
| 2771 | assert(!SlotIndex::isSameInstr(VNI->def, V.OtherVNI->def) && "Broken LRQ")((void)0); | |||
| 2772 | ||||
| 2773 | // We have overlapping values, or possibly a kill of Other. | |||
| 2774 | // Recursively compute assignments up the dominator tree. | |||
| 2775 | Other.computeAssignment(V.OtherVNI->id, *this); | |||
| 2776 | Val &OtherV = Other.Vals[V.OtherVNI->id]; | |||
| 2777 | ||||
| 2778 | if (OtherV.ErasableImplicitDef) { | |||
| 2779 | // Check if OtherV is an IMPLICIT_DEF that extends beyond its basic block. | |||
| 2780 | // This shouldn't normally happen, but ProcessImplicitDefs can leave such | |||
| 2781 | // IMPLICIT_DEF instructions behind, and there is nothing wrong with it | |||
| 2782 | // technically. | |||
| 2783 | // | |||
| 2784 | // When it happens, treat that IMPLICIT_DEF as a normal value, and don't try | |||
| 2785 | // to erase the IMPLICIT_DEF instruction. | |||
| 2786 | if (DefMI && | |||
| 2787 | DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) { | |||
| 2788 | LLVM_DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->defdo { } while (false) | |||
| 2789 | << " extends into "do { } while (false) | |||
| 2790 | << printMBBReference(*DefMI->getParent())do { } while (false) | |||
| 2791 | << ", keeping it.\n")do { } while (false); | |||
| 2792 | OtherV.ErasableImplicitDef = false; | |||
| 2793 | } else { | |||
| 2794 | // We deferred clearing these lanes in case we needed to save them | |||
| 2795 | OtherV.ValidLanes &= ~OtherV.WriteLanes; | |||
| 2796 | } | |||
| 2797 | } | |||
| 2798 | ||||
| 2799 | // Allow overlapping PHI values. Any real interference would show up in a | |||
| 2800 | // predecessor, the PHI itself can't introduce any conflicts. | |||
| 2801 | if (VNI->isPHIDef()) | |||
| 2802 | return CR_Replace; | |||
| 2803 | ||||
| 2804 | // Check for simple erasable conflicts. | |||
| 2805 | if (DefMI->isImplicitDef()) | |||
| 2806 | return CR_Erase; | |||
| 2807 | ||||
| 2808 | // Include the non-conflict where DefMI is a coalescable copy that kills | |||
| 2809 | // OtherVNI. We still want the copy erased and value numbers merged. | |||
| 2810 | if (CP.isCoalescable(DefMI)) { | |||
| 2811 | // Some of the lanes copied from OtherVNI may be undef, making them undef | |||
| 2812 | // here too. | |||
| 2813 | V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes; | |||
| 2814 | return CR_Erase; | |||
| 2815 | } | |||
| 2816 | ||||
| 2817 | // This may not be a real conflict if DefMI simply kills Other and defines | |||
| 2818 | // VNI. | |||
| 2819 | if (OtherLRQ.isKill() && OtherLRQ.endPoint() <= VNI->def) | |||
| 2820 | return CR_Keep; | |||
| 2821 | ||||
| 2822 | // Handle the case where VNI and OtherVNI can be proven to be identical: | |||
| 2823 | // | |||
| 2824 | // %other = COPY %ext | |||
| 2825 | // %this = COPY %ext <-- Erase this copy | |||
| 2826 | // | |||
| 2827 | if (DefMI->isFullCopy() && !CP.isPartial() && | |||
| 2828 | valuesIdentical(VNI, V.OtherVNI, Other)) { | |||
| 2829 | V.Identical = true; | |||
| 2830 | return CR_Erase; | |||
| 2831 | } | |||
| 2832 | ||||
| 2833 | // The remaining checks apply to the lanes, which aren't tracked here. This | |||
| 2834 | // was already decided to be OK via the following CR_Replace condition. | |||
| 2835 | // CR_Replace. | |||
| 2836 | if (SubRangeJoin) | |||
| 2837 | return CR_Replace; | |||
| 2838 | ||||
| 2839 | // If the lanes written by this instruction were all undef in OtherVNI, it is | |||
| 2840 | // still safe to join the live ranges. This can't be done with a simple value | |||
| 2841 | // mapping, though - OtherVNI will map to multiple values: | |||
| 2842 | // | |||
| 2843 | // 1 %dst:ssub0 = FOO <-- OtherVNI | |||
| 2844 | // 2 %src = BAR <-- VNI | |||
| 2845 | // 3 %dst:ssub1 = COPY killed %src <-- Eliminate this copy. | |||
| 2846 | // 4 BAZ killed %dst | |||
| 2847 | // 5 QUUX killed %src | |||
| 2848 | // | |||
| 2849 | // Here OtherVNI will map to itself in [1;2), but to VNI in [2;5). CR_Replace | |||
| 2850 | // handles this complex value mapping. | |||
| 2851 | if ((V.WriteLanes & OtherV.ValidLanes).none()) | |||
| 2852 | return CR_Replace; | |||
| 2853 | ||||
| 2854 | // If the other live range is killed by DefMI and the live ranges are still | |||
| 2855 | // overlapping, it must be because we're looking at an early clobber def: | |||
| 2856 | // | |||
| 2857 | // %dst<def,early-clobber> = ASM killed %src | |||
| 2858 | // | |||
| 2859 | // In this case, it is illegal to merge the two live ranges since the early | |||
| 2860 | // clobber def would clobber %src before it was read. | |||
| 2861 | if (OtherLRQ.isKill()) { | |||
| 2862 | // This case where the def doesn't overlap the kill is handled above. | |||
| 2863 | assert(VNI->def.isEarlyClobber() &&((void)0) | |||
| 2864 | "Only early clobber defs can overlap a kill")((void)0); | |||
| 2865 | return CR_Impossible; | |||
| 2866 | } | |||
| 2867 | ||||
| 2868 | // VNI is clobbering live lanes in OtherVNI, but there is still the | |||
| 2869 | // possibility that no instructions actually read the clobbered lanes. | |||
| 2870 | // If we're clobbering all the lanes in OtherVNI, at least one must be read. | |||
| 2871 | // Otherwise Other.RI wouldn't be live here. | |||
| 2872 | if ((TRI->getSubRegIndexLaneMask(Other.SubIdx) & ~V.WriteLanes).none()) | |||
| 2873 | return CR_Impossible; | |||
| 2874 | ||||
| 2875 | if (TrackSubRegLiveness) { | |||
| 2876 | auto &OtherLI = LIS->getInterval(Other.Reg); | |||
| 2877 | // If OtherVNI does not have subranges, it means all the lanes of OtherVNI | |||
| 2878 | // share the same live range, so we just need to check whether they have | |||
| 2879 | // any conflict bit in their LaneMask. | |||
| 2880 | if (!OtherLI.hasSubRanges()) { | |||
| 2881 | LaneBitmask OtherMask = TRI->getSubRegIndexLaneMask(Other.SubIdx); | |||
| 2882 | return (OtherMask & V.WriteLanes).none() ? CR_Replace : CR_Impossible; | |||
| 2883 | } | |||
| 2884 | ||||
| 2885 | // If we are clobbering some active lanes of OtherVNI at VNI->def, it is | |||
| 2886 | // impossible to resolve the conflict. Otherwise, we can just replace | |||
| 2887 | // OtherVNI because of no real conflict. | |||
| 2888 | for (LiveInterval::SubRange &OtherSR : OtherLI.subranges()) { | |||
| 2889 | LaneBitmask OtherMask = | |||
| 2890 | TRI->composeSubRegIndexLaneMask(Other.SubIdx, OtherSR.LaneMask); | |||
| 2891 | if ((OtherMask & V.WriteLanes).none()) | |||
| 2892 | continue; | |||
| 2893 | ||||
| 2894 | auto OtherSRQ = OtherSR.Query(VNI->def); | |||
| 2895 | if (OtherSRQ.valueIn() && OtherSRQ.endPoint() > VNI->def) { | |||
| 2896 | // VNI is clobbering some lanes of OtherVNI, they have real conflict. | |||
| 2897 | return CR_Impossible; | |||
| 2898 | } | |||
| 2899 | } | |||
| 2900 | ||||
| 2901 | // VNI is NOT clobbering any lane of OtherVNI, just replace OtherVNI. | |||
| 2902 | return CR_Replace; | |||
| 2903 | } | |||
| 2904 | ||||
| 2905 | // We need to verify that no instructions are reading the clobbered lanes. | |||
| 2906 | // To save compile time, we'll only check that locally. Don't allow the | |||
| 2907 | // tainted value to escape the basic block. | |||
| 2908 | MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def); | |||
| 2909 | if (OtherLRQ.endPoint() >= Indexes->getMBBEndIdx(MBB)) | |||
| 2910 | return CR_Impossible; | |||
| 2911 | ||||
| 2912 | // There are still some things that could go wrong besides clobbered lanes | |||
| 2913 | // being read, for example OtherVNI may be only partially redefined in MBB, | |||
| 2914 | // and some clobbered lanes could escape the block. Save this analysis for | |||
| 2915 | // resolveConflicts() when all values have been mapped. We need to know | |||
| 2916 | // RedefVNI and WriteLanes for any later defs in MBB, and we can't compute | |||
| 2917 | // that now - the recursive analyzeValue() calls must go upwards in the | |||
| 2918 | // dominator tree. | |||
| 2919 | return CR_Unresolved; | |||
| 2920 | } | |||
| 2921 | ||||
| 2922 | void JoinVals::computeAssignment(unsigned ValNo, JoinVals &Other) { | |||
| 2923 | Val &V = Vals[ValNo]; | |||
| 2924 | if (V.isAnalyzed()) { | |||
| 2925 | // Recursion should always move up the dominator tree, so ValNo is not | |||
| 2926 | // supposed to reappear before it has been assigned. | |||
| 2927 | assert(Assignments[ValNo] != -1 && "Bad recursion?")((void)0); | |||
| 2928 | return; | |||
| 2929 | } | |||
| 2930 | switch ((V.Resolution = analyzeValue(ValNo, Other))) { | |||
| 2931 | case CR_Erase: | |||
| 2932 | case CR_Merge: | |||
| 2933 | // Merge this ValNo into OtherVNI. | |||
| 2934 | assert(V.OtherVNI && "OtherVNI not assigned, can't merge.")((void)0); | |||
| 2935 | assert(Other.Vals[V.OtherVNI->id].isAnalyzed() && "Missing recursion")((void)0); | |||
| 2936 | Assignments[ValNo] = Other.Assignments[V.OtherVNI->id]; | |||
| 2937 | LLVM_DEBUG(dbgs() << "\t\tmerge " << printReg(Reg) << ':' << ValNo << '@'do { } while (false) | |||
| 2938 | << LR.getValNumInfo(ValNo)->def << " into "do { } while (false) | |||
| 2939 | << printReg(Other.Reg) << ':' << V.OtherVNI->id << '@'do { } while (false) | |||
| 2940 | << V.OtherVNI->def << " --> @"do { } while (false) | |||
| 2941 | << NewVNInfo[Assignments[ValNo]]->def << '\n')do { } while (false); | |||
| 2942 | break; | |||
| 2943 | case CR_Replace: | |||
| 2944 | case CR_Unresolved: { | |||
| 2945 | // The other value is going to be pruned if this join is successful. | |||
| 2946 | assert(V.OtherVNI && "OtherVNI not assigned, can't prune")((void)0); | |||
| 2947 | Val &OtherV = Other.Vals[V.OtherVNI->id]; | |||
| 2948 | // We cannot erase an IMPLICIT_DEF if we don't have valid values for all | |||
| 2949 | // its lanes. | |||
| 2950 | if (OtherV.ErasableImplicitDef && | |||
| 2951 | TrackSubRegLiveness && | |||
| 2952 | (OtherV.WriteLanes & ~V.ValidLanes).any()) { | |||
| 2953 | LLVM_DEBUG(dbgs() << "Cannot erase implicit_def with missing values\n")do { } while (false); | |||
| 2954 | ||||
| 2955 | OtherV.ErasableImplicitDef = false; | |||
| 2956 | // The valid lanes written by the implicit_def were speculatively cleared | |||
| 2957 | // before, so make this more conservative. It may be better to track this, | |||
| 2958 | // I haven't found a testcase where it matters. | |||
| 2959 | OtherV.ValidLanes = LaneBitmask::getAll(); | |||
| 2960 | } | |||
| 2961 | ||||
| 2962 | OtherV.Pruned = true; | |||
| 2963 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
| 2964 | } | |||
| 2965 | default: | |||
| 2966 | // This value number needs to go in the final joined live range. | |||
| 2967 | Assignments[ValNo] = NewVNInfo.size(); | |||
| 2968 | NewVNInfo.push_back(LR.getValNumInfo(ValNo)); | |||
| 2969 | break; | |||
| 2970 | } | |||
| 2971 | } | |||
| 2972 | ||||
| 2973 | bool JoinVals::mapValues(JoinVals &Other) { | |||
| 2974 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { | |||
| 2975 | computeAssignment(i, Other); | |||
| 2976 | if (Vals[i].Resolution == CR_Impossible) { | |||
| 2977 | LLVM_DEBUG(dbgs() << "\t\tinterference at " << printReg(Reg) << ':' << ido { } while (false) | |||
| 2978 | << '@' << LR.getValNumInfo(i)->def << '\n')do { } while (false); | |||
| 2979 | return false; | |||
| 2980 | } | |||
| 2981 | } | |||
| 2982 | return true; | |||
| 2983 | } | |||
| 2984 | ||||
| 2985 | bool JoinVals:: | |||
| 2986 | taintExtent(unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other, | |||
| 2987 | SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent) { | |||
| 2988 | VNInfo *VNI = LR.getValNumInfo(ValNo); | |||
| 2989 | MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def); | |||
| 2990 | SlotIndex MBBEnd = Indexes->getMBBEndIdx(MBB); | |||
| 2991 | ||||
| 2992 | // Scan Other.LR from VNI.def to MBBEnd. | |||
| 2993 | LiveInterval::iterator OtherI = Other.LR.find(VNI->def); | |||
| 2994 | assert(OtherI != Other.LR.end() && "No conflict?")((void)0); | |||
| 2995 | do { | |||
| 2996 | // OtherI is pointing to a tainted value. Abort the join if the tainted | |||
| 2997 | // lanes escape the block. | |||
| 2998 | SlotIndex End = OtherI->end; | |||
| 2999 | if (End >= MBBEnd) { | |||
| 3000 | LLVM_DEBUG(dbgs() << "\t\ttaints global " << printReg(Other.Reg) << ':'do { } while (false) | |||
| 3001 | << OtherI->valno->id << '@' << OtherI->start << '\n')do { } while (false); | |||
| 3002 | return false; | |||
| 3003 | } | |||
| 3004 | LLVM_DEBUG(dbgs() << "\t\ttaints local " << printReg(Other.Reg) << ':'do { } while (false) | |||
| 3005 | << OtherI->valno->id << '@' << OtherI->start << " to "do { } while (false) | |||
| 3006 | << End << '\n')do { } while (false); | |||
| 3007 | // A dead def is not a problem. | |||
| 3008 | if (End.isDead()) | |||
| 3009 | break; | |||
| 3010 | TaintExtent.push_back(std::make_pair(End, TaintedLanes)); | |||
| 3011 | ||||
| 3012 | // Check for another def in the MBB. | |||
| 3013 | if (++OtherI == Other.LR.end() || OtherI->start >= MBBEnd) | |||
| 3014 | break; | |||
| 3015 | ||||
| 3016 | // Lanes written by the new def are no longer tainted. | |||
| 3017 | const Val &OV = Other.Vals[OtherI->valno->id]; | |||
| 3018 | TaintedLanes &= ~OV.WriteLanes; | |||
| 3019 | if (!OV.RedefVNI) | |||
| 3020 | break; | |||
| 3021 | } while (TaintedLanes.any()); | |||
| 3022 | return true; | |||
| 3023 | } | |||
| 3024 | ||||
| 3025 | bool JoinVals::usesLanes(const MachineInstr &MI, Register Reg, unsigned SubIdx, | |||
| 3026 | LaneBitmask Lanes) const { | |||
| 3027 | if (MI.isDebugOrPseudoInstr()) | |||
| 3028 | return false; | |||
| 3029 | for (const MachineOperand &MO : MI.operands()) { | |||
| 3030 | if (!MO.isReg() || MO.isDef() || MO.getReg() != Reg) | |||
| 3031 | continue; | |||
| 3032 | if (!MO.readsReg()) | |||
| 3033 | continue; | |||
| 3034 | unsigned S = TRI->composeSubRegIndices(SubIdx, MO.getSubReg()); | |||
| 3035 | if ((Lanes & TRI->getSubRegIndexLaneMask(S)).any()) | |||
| 3036 | return true; | |||
| 3037 | } | |||
| 3038 | return false; | |||
| 3039 | } | |||
| 3040 | ||||
| 3041 | bool JoinVals::resolveConflicts(JoinVals &Other) { | |||
| 3042 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { | |||
| 3043 | Val &V = Vals[i]; | |||
| 3044 | assert(V.Resolution != CR_Impossible && "Unresolvable conflict")((void)0); | |||
| 3045 | if (V.Resolution != CR_Unresolved) | |||
| 3046 | continue; | |||
| 3047 | LLVM_DEBUG(dbgs() << "\t\tconflict at " << printReg(Reg) << ':' << i << '@'do { } while (false) | |||
| 3048 | << LR.getValNumInfo(i)->defdo { } while (false) | |||
| 3049 | << ' ' << PrintLaneMask(LaneMask) << '\n')do { } while (false); | |||
| 3050 | if (SubRangeJoin) | |||
| 3051 | return false; | |||
| 3052 | ||||
| 3053 | ++NumLaneConflicts; | |||
| 3054 | assert(V.OtherVNI && "Inconsistent conflict resolution.")((void)0); | |||
| 3055 | VNInfo *VNI = LR.getValNumInfo(i); | |||
| 3056 | const Val &OtherV = Other.Vals[V.OtherVNI->id]; | |||
| 3057 | ||||
| 3058 | // VNI is known to clobber some lanes in OtherVNI. If we go ahead with the | |||
| 3059 | // join, those lanes will be tainted with a wrong value. Get the extent of | |||
| 3060 | // the tainted lanes. | |||
| 3061 | LaneBitmask TaintedLanes = V.WriteLanes & OtherV.ValidLanes; | |||
| 3062 | SmallVector<std::pair<SlotIndex, LaneBitmask>, 8> TaintExtent; | |||
| 3063 | if (!taintExtent(i, TaintedLanes, Other, TaintExtent)) | |||
| 3064 | // Tainted lanes would extend beyond the basic block. | |||
| 3065 | return false; | |||
| 3066 | ||||
| 3067 | assert(!TaintExtent.empty() && "There should be at least one conflict.")((void)0); | |||
| 3068 | ||||
| 3069 | // Now look at the instructions from VNI->def to TaintExtent (inclusive). | |||
| 3070 | MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def); | |||
| 3071 | MachineBasicBlock::iterator MI = MBB->begin(); | |||
| 3072 | if (!VNI->isPHIDef()) { | |||
| 3073 | MI = Indexes->getInstructionFromIndex(VNI->def); | |||
| 3074 | if (!VNI->def.isEarlyClobber()) { | |||
| 3075 | // No need to check the instruction defining VNI for reads. | |||
| 3076 | ++MI; | |||
| 3077 | } | |||
| 3078 | } | |||
| 3079 | assert(!SlotIndex::isSameInstr(VNI->def, TaintExtent.front().first) &&((void)0) | |||
| 3080 | "Interference ends on VNI->def. Should have been handled earlier")((void)0); | |||
| 3081 | MachineInstr *LastMI = | |||
| 3082 | Indexes->getInstructionFromIndex(TaintExtent.front().first); | |||
| 3083 | assert(LastMI && "Range must end at a proper instruction")((void)0); | |||
| 3084 | unsigned TaintNum = 0; | |||
| 3085 | while (true) { | |||
| 3086 | assert(MI != MBB->end() && "Bad LastMI")((void)0); | |||
| 3087 | if (usesLanes(*MI, Other.Reg, Other.SubIdx, TaintedLanes)) { | |||
| 3088 | LLVM_DEBUG(dbgs() << "\t\ttainted lanes used by: " << *MI)do { } while (false); | |||
| 3089 | return false; | |||
| 3090 | } | |||
| 3091 | // LastMI is the last instruction to use the current value. | |||
| 3092 | if (&*MI == LastMI) { | |||
| 3093 | if (++TaintNum == TaintExtent.size()) | |||
| 3094 | break; | |||
| 3095 | LastMI = Indexes->getInstructionFromIndex(TaintExtent[TaintNum].first); | |||
| 3096 | assert(LastMI && "Range must end at a proper instruction")((void)0); | |||
| 3097 | TaintedLanes = TaintExtent[TaintNum].second; | |||
| 3098 | } | |||
| 3099 | ++MI; | |||
| 3100 | } | |||
| 3101 | ||||
| 3102 | // The tainted lanes are unused. | |||
| 3103 | V.Resolution = CR_Replace; | |||
| 3104 | ++NumLaneResolves; | |||
| 3105 | } | |||
| 3106 | return true; | |||
| 3107 | } | |||
| 3108 | ||||
| 3109 | bool JoinVals::isPrunedValue(unsigned ValNo, JoinVals &Other) { | |||
| 3110 | Val &V = Vals[ValNo]; | |||
| 3111 | if (V.Pruned || V.PrunedComputed) | |||
| 3112 | return V.Pruned; | |||
| 3113 | ||||
| 3114 | if (V.Resolution != CR_Erase && V.Resolution != CR_Merge) | |||
| 3115 | return V.Pruned; | |||
| 3116 | ||||
| 3117 | // Follow copies up the dominator tree and check if any intermediate value | |||
| 3118 | // has been pruned. | |||
| 3119 | V.PrunedComputed = true; | |||
| 3120 | V.Pruned = Other.isPrunedValue(V.OtherVNI->id, *this); | |||
| 3121 | return V.Pruned; | |||
| 3122 | } | |||
| 3123 | ||||
| 3124 | void JoinVals::pruneValues(JoinVals &Other, | |||
| 3125 | SmallVectorImpl<SlotIndex> &EndPoints, | |||
| 3126 | bool changeInstrs) { | |||
| 3127 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { | |||
| 3128 | SlotIndex Def = LR.getValNumInfo(i)->def; | |||
| 3129 | switch (Vals[i].Resolution) { | |||
| 3130 | case CR_Keep: | |||
| 3131 | break; | |||
| 3132 | case CR_Replace: { | |||
| 3133 | // This value takes precedence over the value in Other.LR. | |||
| 3134 | LIS->pruneValue(Other.LR, Def, &EndPoints); | |||
| 3135 | // Check if we're replacing an IMPLICIT_DEF value. The IMPLICIT_DEF | |||
| 3136 | // instructions are only inserted to provide a live-out value for PHI | |||
| 3137 | // predecessors, so the instruction should simply go away once its value | |||
| 3138 | // has been replaced. | |||
| 3139 | Val &OtherV = Other.Vals[Vals[i].OtherVNI->id]; | |||
| 3140 | bool EraseImpDef = OtherV.ErasableImplicitDef && | |||
| 3141 | OtherV.Resolution == CR_Keep; | |||
| 3142 | if (!Def.isBlock()) { | |||
| 3143 | if (changeInstrs) { | |||
| 3144 | // Remove <def,read-undef> flags. This def is now a partial redef. | |||
| 3145 | // Also remove dead flags since the joined live range will | |||
| 3146 | // continue past this instruction. | |||
| 3147 | for (MachineOperand &MO : | |||
| 3148 | Indexes->getInstructionFromIndex(Def)->operands()) { | |||
| 3149 | if (MO.isReg() && MO.isDef() && MO.getReg() == Reg) { | |||
| 3150 | if (MO.getSubReg() != 0 && MO.isUndef() && !EraseImpDef) | |||
| 3151 | MO.setIsUndef(false); | |||
| 3152 | MO.setIsDead(false); | |||
| 3153 | } | |||
| 3154 | } | |||
| 3155 | } | |||
| 3156 | // This value will reach instructions below, but we need to make sure | |||
| 3157 | // the live range also reaches the instruction at Def. | |||
| 3158 | if (!EraseImpDef) | |||
| 3159 | EndPoints.push_back(Def); | |||
| 3160 | } | |||
| 3161 | LLVM_DEBUG(dbgs() << "\t\tpruned " << printReg(Other.Reg) << " at " << Defdo { } while (false) | |||
| 3162 | << ": " << Other.LR << '\n')do { } while (false); | |||
| 3163 | break; | |||
| 3164 | } | |||
| 3165 | case CR_Erase: | |||
| 3166 | case CR_Merge: | |||
| 3167 | if (isPrunedValue(i, Other)) { | |||
| 3168 | // This value is ultimately a copy of a pruned value in LR or Other.LR. | |||
| 3169 | // We can no longer trust the value mapping computed by | |||
| 3170 | // computeAssignment(), the value that was originally copied could have | |||
| 3171 | // been replaced. | |||
| 3172 | LIS->pruneValue(LR, Def, &EndPoints); | |||
| 3173 | LLVM_DEBUG(dbgs() << "\t\tpruned all of " << printReg(Reg) << " at "do { } while (false) | |||
| 3174 | << Def << ": " << LR << '\n')do { } while (false); | |||
| 3175 | } | |||
| 3176 | break; | |||
| 3177 | case CR_Unresolved: | |||
| 3178 | case CR_Impossible: | |||
| 3179 | llvm_unreachable("Unresolved conflicts")__builtin_unreachable(); | |||
| 3180 | } | |||
| 3181 | } | |||
| 3182 | } | |||
| 3183 | ||||
| 3184 | // Check if the segment consists of a copied live-through value (i.e. the copy | |||
| 3185 | // in the block only extended the liveness, of an undef value which we may need | |||
| 3186 | // to handle). | |||
| 3187 | static bool isLiveThrough(const LiveQueryResult Q) { | |||
| 3188 | return Q.valueIn() && Q.valueIn()->isPHIDef() && Q.valueIn() == Q.valueOut(); | |||
| 3189 | } | |||
| 3190 | ||||
| 3191 | /// Consider the following situation when coalescing the copy between | |||
| 3192 | /// %31 and %45 at 800. (The vertical lines represent live range segments.) | |||
| 3193 | /// | |||
| 3194 | /// Main range Subrange 0004 (sub2) | |||
| 3195 | /// %31 %45 %31 %45 | |||
| 3196 | /// 544 %45 = COPY %28 + + | |||
| 3197 | /// | v1 | v1 | |||
| 3198 | /// 560B bb.1: + + | |||
| 3199 | /// 624 = %45.sub2 | v2 | v2 | |||
| 3200 | /// 800 %31 = COPY %45 + + + + | |||
| 3201 | /// | v0 | v0 | |||
| 3202 | /// 816 %31.sub1 = ... + | | |||
| 3203 | /// 880 %30 = COPY %31 | v1 + | |||
| 3204 | /// 928 %45 = COPY %30 | + + | |||
| 3205 | /// | | v0 | v0 <--+ | |||
| 3206 | /// 992B ; backedge -> bb.1 | + + | | |||
| 3207 | /// 1040 = %31.sub0 + | | |||
| 3208 | /// This value must remain | |||
| 3209 | /// live-out! | |||
| 3210 | /// | |||
| 3211 | /// Assuming that %31 is coalesced into %45, the copy at 928 becomes | |||
| 3212 | /// redundant, since it copies the value from %45 back into it. The | |||
| 3213 | /// conflict resolution for the main range determines that %45.v0 is | |||
| 3214 | /// to be erased, which is ok since %31.v1 is identical to it. | |||
| 3215 | /// The problem happens with the subrange for sub2: it has to be live | |||
| 3216 | /// on exit from the block, but since 928 was actually a point of | |||
| 3217 | /// definition of %45.sub2, %45.sub2 was not live immediately prior | |||
| 3218 | /// to that definition. As a result, when 928 was erased, the value v0 | |||
| 3219 | /// for %45.sub2 was pruned in pruneSubRegValues. Consequently, an | |||
| 3220 | /// IMPLICIT_DEF was inserted as a "backedge" definition for %45.sub2, | |||
| 3221 | /// providing an incorrect value to the use at 624. | |||
| 3222 | /// | |||
| 3223 | /// Since the main-range values %31.v1 and %45.v0 were proved to be | |||
| 3224 | /// identical, the corresponding values in subranges must also be the | |||
| 3225 | /// same. A redundant copy is removed because it's not needed, and not | |||
| 3226 | /// because it copied an undefined value, so any liveness that originated | |||
| 3227 | /// from that copy cannot disappear. When pruning a value that started | |||
| 3228 | /// at the removed copy, the corresponding identical value must be | |||
| 3229 | /// extended to replace it. | |||
| 3230 | void JoinVals::pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask) { | |||
| 3231 | // Look for values being erased. | |||
| 3232 | bool DidPrune = false; | |||
| 3233 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { | |||
| 3234 | Val &V = Vals[i]; | |||
| 3235 | // We should trigger in all cases in which eraseInstrs() does something. | |||
| 3236 | // match what eraseInstrs() is doing, print a message so | |||
| 3237 | if (V.Resolution != CR_Erase && | |||
| 3238 | (V.Resolution != CR_Keep || !V.ErasableImplicitDef || !V.Pruned)) | |||
| 3239 | continue; | |||
| 3240 | ||||
| 3241 | // Check subranges at the point where the copy will be removed. | |||
| 3242 | SlotIndex Def = LR.getValNumInfo(i)->def; | |||
| 3243 | SlotIndex OtherDef; | |||
| 3244 | if (V.Identical) | |||
| 3245 | OtherDef = V.OtherVNI->def; | |||
| 3246 | ||||
| 3247 | // Print message so mismatches with eraseInstrs() can be diagnosed. | |||
| 3248 | LLVM_DEBUG(dbgs() << "\t\tExpecting instruction removal at " << Defdo { } while (false) | |||
| 3249 | << '\n')do { } while (false); | |||
| 3250 | for (LiveInterval::SubRange &S : LI.subranges()) { | |||
| 3251 | LiveQueryResult Q = S.Query(Def); | |||
| 3252 | ||||
| 3253 | // If a subrange starts at the copy then an undefined value has been | |||
| 3254 | // copied and we must remove that subrange value as well. | |||
| 3255 | VNInfo *ValueOut = Q.valueOutOrDead(); | |||
| 3256 | if (ValueOut != nullptr && (Q.valueIn() == nullptr || | |||
| 3257 | (V.Identical && V.Resolution == CR_Erase && | |||
| 3258 | ValueOut->def == Def))) { | |||
| 3259 | LLVM_DEBUG(dbgs() << "\t\tPrune sublane " << PrintLaneMask(S.LaneMask)do { } while (false) | |||
| 3260 | << " at " << Def << "\n")do { } while (false); | |||
| 3261 | SmallVector<SlotIndex,8> EndPoints; | |||
| 3262 | LIS->pruneValue(S, Def, &EndPoints); | |||
| 3263 | DidPrune = true; | |||
| 3264 | // Mark value number as unused. | |||
| 3265 | ValueOut->markUnused(); | |||
| 3266 | ||||
| 3267 | if (V.Identical && S.Query(OtherDef).valueOutOrDead()) { | |||
| 3268 | // If V is identical to V.OtherVNI (and S was live at OtherDef), | |||
| 3269 | // then we can't simply prune V from S. V needs to be replaced | |||
| 3270 | // with V.OtherVNI. | |||
| 3271 | LIS->extendToIndices(S, EndPoints); | |||
| 3272 | } | |||
| 3273 | ||||
| 3274 | // We may need to eliminate the subrange if the copy introduced a live | |||
| 3275 | // out undef value. | |||
| 3276 | if (ValueOut->isPHIDef()) | |||
| 3277 | ShrinkMask |= S.LaneMask; | |||
| 3278 | continue; | |||
| 3279 | } | |||
| 3280 | ||||
| 3281 | // If a subrange ends at the copy, then a value was copied but only | |||
| 3282 | // partially used later. Shrink the subregister range appropriately. | |||
| 3283 | // | |||
| 3284 | // Ultimately this calls shrinkToUses, so assuming ShrinkMask is | |||
| 3285 | // conservatively correct. | |||
| 3286 | if ((Q.valueIn() != nullptr && Q.valueOut() == nullptr) || | |||
| 3287 | (V.Resolution == CR_Erase && isLiveThrough(Q))) { | |||
| 3288 | LLVM_DEBUG(dbgs() << "\t\tDead uses at sublane "do { } while (false) | |||
| 3289 | << PrintLaneMask(S.LaneMask) << " at " << Defdo { } while (false) | |||
| 3290 | << "\n")do { } while (false); | |||
| 3291 | ShrinkMask |= S.LaneMask; | |||
| 3292 | } | |||
| 3293 | } | |||
| 3294 | } | |||
| 3295 | if (DidPrune) | |||
| 3296 | LI.removeEmptySubRanges(); | |||
| 3297 | } | |||
| 3298 | ||||
| 3299 | /// Check if any of the subranges of @p LI contain a definition at @p Def. | |||
| 3300 | static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def) { | |||
| 3301 | for (LiveInterval::SubRange &SR : LI.subranges()) { | |||
| 3302 | if (VNInfo *VNI = SR.Query(Def).valueOutOrDead()) | |||
| 3303 | if (VNI->def == Def) | |||
| 3304 | return true; | |||
| 3305 | } | |||
| 3306 | return false; | |||
| 3307 | } | |||
| 3308 | ||||
| 3309 | void JoinVals::pruneMainSegments(LiveInterval &LI, bool &ShrinkMainRange) { | |||
| 3310 | assert(&static_cast<LiveRange&>(LI) == &LR)((void)0); | |||
| 3311 | ||||
| 3312 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { | |||
| 3313 | if (Vals[i].Resolution != CR_Keep) | |||
| 3314 | continue; | |||
| 3315 | VNInfo *VNI = LR.getValNumInfo(i); | |||
| 3316 | if (VNI->isUnused() || VNI->isPHIDef() || isDefInSubRange(LI, VNI->def)) | |||
| 3317 | continue; | |||
| 3318 | Vals[i].Pruned = true; | |||
| 3319 | ShrinkMainRange = true; | |||
| 3320 | } | |||
| 3321 | } | |||
| 3322 | ||||
| 3323 | void JoinVals::removeImplicitDefs() { | |||
| 3324 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { | |||
| 3325 | Val &V = Vals[i]; | |||
| 3326 | if (V.Resolution != CR_Keep || !V.ErasableImplicitDef || !V.Pruned) | |||
| 3327 | continue; | |||
| 3328 | ||||
| 3329 | VNInfo *VNI = LR.getValNumInfo(i); | |||
| 3330 | VNI->markUnused(); | |||
| 3331 | LR.removeValNo(VNI); | |||
| 3332 | } | |||
| 3333 | } | |||
| 3334 | ||||
| 3335 | void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs, | |||
| 3336 | SmallVectorImpl<Register> &ShrinkRegs, | |||
| 3337 | LiveInterval *LI) { | |||
| 3338 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { | |||
| 3339 | // Get the def location before markUnused() below invalidates it. | |||
| 3340 | VNInfo *VNI = LR.getValNumInfo(i); | |||
| 3341 | SlotIndex Def = VNI->def; | |||
| 3342 | switch (Vals[i].Resolution) { | |||
| 3343 | case CR_Keep: { | |||
| 3344 | // If an IMPLICIT_DEF value is pruned, it doesn't serve a purpose any | |||
| 3345 | // longer. The IMPLICIT_DEF instructions are only inserted by | |||
| 3346 | // PHIElimination to guarantee that all PHI predecessors have a value. | |||
| 3347 | if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned) | |||
| 3348 | break; | |||
| 3349 | // Remove value number i from LR. | |||
| 3350 | // For intervals with subranges, removing a segment from the main range | |||
| 3351 | // may require extending the previous segment: for each definition of | |||
| 3352 | // a subregister, there will be a corresponding def in the main range. | |||
| 3353 | // That def may fall in the middle of a segment from another subrange. | |||
| 3354 | // In such cases, removing this def from the main range must be | |||
| 3355 | // complemented by extending the main range to account for the liveness | |||
| 3356 | // of the other subrange. | |||
| 3357 | // The new end point of the main range segment to be extended. | |||
| 3358 | SlotIndex NewEnd; | |||
| 3359 | if (LI != nullptr) { | |||
| 3360 | LiveRange::iterator I = LR.FindSegmentContaining(Def); | |||
| 3361 | assert(I != LR.end())((void)0); | |||
| 3362 | // Do not extend beyond the end of the segment being removed. | |||
| 3363 | // The segment may have been pruned in preparation for joining | |||
| 3364 | // live ranges. | |||
| 3365 | NewEnd = I->end; | |||
| 3366 | } | |||
| 3367 | ||||
| 3368 | LR.removeValNo(VNI); | |||
| 3369 | // Note that this VNInfo is reused and still referenced in NewVNInfo, | |||
| 3370 | // make it appear like an unused value number. | |||
| 3371 | VNI->markUnused(); | |||
| 3372 | ||||
| 3373 | if (LI != nullptr && LI->hasSubRanges()) { | |||
| 3374 | assert(static_cast<LiveRange*>(LI) == &LR)((void)0); | |||
| 3375 | // Determine the end point based on the subrange information: | |||
| 3376 | // minimum of (earliest def of next segment, | |||
| 3377 | // latest end point of containing segment) | |||
| 3378 | SlotIndex ED, LE; | |||
| 3379 | for (LiveInterval::SubRange &SR : LI->subranges()) { | |||
| 3380 | LiveRange::iterator I = SR.find(Def); | |||
| 3381 | if (I == SR.end()) | |||
| 3382 | continue; | |||
| 3383 | if (I->start > Def) | |||
| 3384 | ED = ED.isValid() ? std::min(ED, I->start) : I->start; | |||
| 3385 | else | |||
| 3386 | LE = LE.isValid() ? std::max(LE, I->end) : I->end; | |||
| 3387 | } | |||
| 3388 | if (LE.isValid()) | |||
| 3389 | NewEnd = std::min(NewEnd, LE); | |||
| 3390 | if (ED.isValid()) | |||
| 3391 | NewEnd = std::min(NewEnd, ED); | |||
| 3392 | ||||
| 3393 | // We only want to do the extension if there was a subrange that | |||
| 3394 | // was live across Def. | |||
| 3395 | if (LE.isValid()) { | |||
| 3396 | LiveRange::iterator S = LR.find(Def); | |||
| 3397 | if (S != LR.begin()) | |||
| 3398 | std::prev(S)->end = NewEnd; | |||
| 3399 | } | |||
| 3400 | } | |||
| 3401 | LLVM_DEBUG({do { } while (false) | |||
| 3402 | dbgs() << "\t\tremoved " << i << '@' << Def << ": " << LR << '\n';do { } while (false) | |||
| 3403 | if (LI != nullptr)do { } while (false) | |||
| 3404 | dbgs() << "\t\t LHS = " << *LI << '\n';do { } while (false) | |||
| 3405 | })do { } while (false); | |||
| 3406 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
| 3407 | } | |||
| 3408 | ||||
| 3409 | case CR_Erase: { | |||
| 3410 | MachineInstr *MI = Indexes->getInstructionFromIndex(Def); | |||
| 3411 | assert(MI && "No instruction to erase")((void)0); | |||
| 3412 | if (MI->isCopy()) { | |||
| 3413 | Register Reg = MI->getOperand(1).getReg(); | |||
| 3414 | if (Register::isVirtualRegister(Reg) && Reg != CP.getSrcReg() && | |||
| 3415 | Reg != CP.getDstReg()) | |||
| 3416 | ShrinkRegs.push_back(Reg); | |||
| 3417 | } | |||
| 3418 | ErasedInstrs.insert(MI); | |||
| 3419 | LLVM_DEBUG(dbgs() << "\t\terased:\t" << Def << '\t' << *MI)do { } while (false); | |||
| 3420 | LIS->RemoveMachineInstrFromMaps(*MI); | |||
| 3421 | MI->eraseFromParent(); | |||
| 3422 | break; | |||
| 3423 | } | |||
| 3424 | default: | |||
| 3425 | break; | |||
| 3426 | } | |||
| 3427 | } | |||
| 3428 | } | |||
| 3429 | ||||
| 3430 | void RegisterCoalescer::joinSubRegRanges(LiveRange &LRange, LiveRange &RRange, | |||
| 3431 | LaneBitmask LaneMask, | |||
| 3432 | const CoalescerPair &CP) { | |||
| 3433 | SmallVector<VNInfo*, 16> NewVNInfo; | |||
| 3434 | JoinVals RHSVals(RRange, CP.getSrcReg(), CP.getSrcIdx(), LaneMask, | |||
| 3435 | NewVNInfo, CP, LIS, TRI, true, true); | |||
| 3436 | JoinVals LHSVals(LRange, CP.getDstReg(), CP.getDstIdx(), LaneMask, | |||
| 3437 | NewVNInfo, CP, LIS, TRI, true, true); | |||
| 3438 | ||||
| 3439 | // Compute NewVNInfo and resolve conflicts (see also joinVirtRegs()) | |||
| 3440 | // We should be able to resolve all conflicts here as we could successfully do | |||
| 3441 | // it on the mainrange already. There is however a problem when multiple | |||
| 3442 | // ranges get mapped to the "overflow" lane mask bit which creates unexpected | |||
| 3443 | // interferences. | |||
| 3444 | if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) { | |||
| 3445 | // We already determined that it is legal to merge the intervals, so this | |||
| 3446 | // should never fail. | |||
| 3447 | llvm_unreachable("*** Couldn't join subrange!\n")__builtin_unreachable(); | |||
| 3448 | } | |||
| 3449 | if (!LHSVals.resolveConflicts(RHSVals) || | |||
| 3450 | !RHSVals.resolveConflicts(LHSVals)) { | |||
| 3451 | // We already determined that it is legal to merge the intervals, so this | |||
| 3452 | // should never fail. | |||
| 3453 | llvm_unreachable("*** Couldn't join subrange!\n")__builtin_unreachable(); | |||
| 3454 | } | |||
| 3455 | ||||
| 3456 | // The merging algorithm in LiveInterval::join() can't handle conflicting | |||
| 3457 | // value mappings, so we need to remove any live ranges that overlap a | |||
| 3458 | // CR_Replace resolution. Collect a set of end points that can be used to | |||
| 3459 | // restore the live range after joining. | |||
| 3460 | SmallVector<SlotIndex, 8> EndPoints; | |||
| 3461 | LHSVals.pruneValues(RHSVals, EndPoints, false); | |||
| 3462 | RHSVals.pruneValues(LHSVals, EndPoints, false); | |||
| 3463 | ||||
| 3464 | LHSVals.removeImplicitDefs(); | |||
| 3465 | RHSVals.removeImplicitDefs(); | |||
| 3466 | ||||
| 3467 | LRange.verify(); | |||
| 3468 | RRange.verify(); | |||
| 3469 | ||||
| 3470 | // Join RRange into LHS. | |||
| 3471 | LRange.join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(), | |||
| 3472 | NewVNInfo); | |||
| 3473 | ||||
| 3474 | LLVM_DEBUG(dbgs() << "\t\tjoined lanes: " << PrintLaneMask(LaneMask)do { } while (false) | |||
| 3475 | << ' ' << LRange << "\n")do { } while (false); | |||
| 3476 | if (EndPoints.empty()) | |||
| 3477 | return; | |||
| 3478 | ||||
| 3479 | // Recompute the parts of the live range we had to remove because of | |||
| 3480 | // CR_Replace conflicts. | |||
| 3481 | LLVM_DEBUG({do { } while (false) | |||
| 3482 | dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: ";do { } while (false) | |||
| 3483 | for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) {do { } while (false) | |||
| 3484 | dbgs() << EndPoints[i];do { } while (false) | |||
| 3485 | if (i != n-1)do { } while (false) | |||
| 3486 | dbgs() << ',';do { } while (false) | |||
| 3487 | }do { } while (false) | |||
| 3488 | dbgs() << ": " << LRange << '\n';do { } while (false) | |||
| 3489 | })do { } while (false); | |||
| 3490 | LIS->extendToIndices(LRange, EndPoints); | |||
| 3491 | } | |||
| 3492 | ||||
| 3493 | void RegisterCoalescer::mergeSubRangeInto(LiveInterval &LI, | |||
| 3494 | const LiveRange &ToMerge, | |||
| 3495 | LaneBitmask LaneMask, | |||
| 3496 | CoalescerPair &CP, | |||
| 3497 | unsigned ComposeSubRegIdx) { | |||
| 3498 | BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator(); | |||
| 3499 | LI.refineSubRanges( | |||
| 3500 | Allocator, LaneMask, | |||
| 3501 | [this, &Allocator, &ToMerge, &CP](LiveInterval::SubRange &SR) { | |||
| 3502 | if (SR.empty()) { | |||
| 3503 | SR.assign(ToMerge, Allocator); | |||
| 3504 | } else { | |||
| 3505 | // joinSubRegRange() destroys the merged range, so we need a copy. | |||
| 3506 | LiveRange RangeCopy(ToMerge, Allocator); | |||
| 3507 | joinSubRegRanges(SR, RangeCopy, SR.LaneMask, CP); | |||
| 3508 | } | |||
| 3509 | }, | |||
| 3510 | *LIS->getSlotIndexes(), *TRI, ComposeSubRegIdx); | |||
| 3511 | } | |||
| 3512 | ||||
| 3513 | bool RegisterCoalescer::isHighCostLiveInterval(LiveInterval &LI) { | |||
| 3514 | if (LI.valnos.size() < LargeIntervalSizeThreshold) | |||
| 3515 | return false; | |||
| 3516 | auto &Counter = LargeLIVisitCounter[LI.reg()]; | |||
| 3517 | if (Counter < LargeIntervalFreqThreshold) { | |||
| 3518 | Counter++; | |||
| 3519 | return false; | |||
| 3520 | } | |||
| 3521 | return true; | |||
| 3522 | } | |||
| 3523 | ||||
| 3524 | bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) { | |||
| 3525 | SmallVector<VNInfo*, 16> NewVNInfo; | |||
| 3526 | LiveInterval &RHS = LIS->getInterval(CP.getSrcReg()); | |||
| 3527 | LiveInterval &LHS = LIS->getInterval(CP.getDstReg()); | |||
| 3528 | bool TrackSubRegLiveness = MRI->shouldTrackSubRegLiveness(*CP.getNewRC()); | |||
| 3529 | JoinVals RHSVals(RHS, CP.getSrcReg(), CP.getSrcIdx(), LaneBitmask::getNone(), | |||
| 3530 | NewVNInfo, CP, LIS, TRI, false, TrackSubRegLiveness); | |||
| 3531 | JoinVals LHSVals(LHS, CP.getDstReg(), CP.getDstIdx(), LaneBitmask::getNone(), | |||
| 3532 | NewVNInfo, CP, LIS, TRI, false, TrackSubRegLiveness); | |||
| 3533 | ||||
| 3534 | LLVM_DEBUG(dbgs() << "\t\tRHS = " << RHS << "\n\t\tLHS = " << LHS << '\n')do { } while (false); | |||
| 3535 | ||||
| 3536 | if (isHighCostLiveInterval(LHS) || isHighCostLiveInterval(RHS)) | |||
| 3537 | return false; | |||
| 3538 | ||||
| 3539 | // First compute NewVNInfo and the simple value mappings. | |||
| 3540 | // Detect impossible conflicts early. | |||
| 3541 | if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) | |||
| 3542 | return false; | |||
| 3543 | ||||
| 3544 | // Some conflicts can only be resolved after all values have been mapped. | |||
| 3545 | if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals)) | |||
| 3546 | return false; | |||
| 3547 | ||||
| 3548 | // All clear, the live ranges can be merged. | |||
| 3549 | if (RHS.hasSubRanges() || LHS.hasSubRanges()) { | |||
| 3550 | BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator(); | |||
| 3551 | ||||
| 3552 | // Transform lanemasks from the LHS to masks in the coalesced register and | |||
| 3553 | // create initial subranges if necessary. | |||
| 3554 | unsigned DstIdx = CP.getDstIdx(); | |||
| 3555 | if (!LHS.hasSubRanges()) { | |||
| 3556 | LaneBitmask Mask = DstIdx == 0 ? CP.getNewRC()->getLaneMask() | |||
| 3557 | : TRI->getSubRegIndexLaneMask(DstIdx); | |||
| 3558 | // LHS must support subregs or we wouldn't be in this codepath. | |||
| 3559 | assert(Mask.any())((void)0); | |||
| 3560 | LHS.createSubRangeFrom(Allocator, Mask, LHS); | |||
| 3561 | } else if (DstIdx != 0) { | |||
| 3562 | // Transform LHS lanemasks to new register class if necessary. | |||
| 3563 | for (LiveInterval::SubRange &R : LHS.subranges()) { | |||
| 3564 | LaneBitmask Mask = TRI->composeSubRegIndexLaneMask(DstIdx, R.LaneMask); | |||
| 3565 | R.LaneMask = Mask; | |||
| 3566 | } | |||
| 3567 | } | |||
| 3568 | LLVM_DEBUG(dbgs() << "\t\tLHST = " << printReg(CP.getDstReg()) << ' ' << LHSdo { } while (false) | |||
| 3569 | << '\n')do { } while (false); | |||
| 3570 | ||||
| 3571 | // Determine lanemasks of RHS in the coalesced register and merge subranges. | |||
| 3572 | unsigned SrcIdx = CP.getSrcIdx(); | |||
| 3573 | if (!RHS.hasSubRanges()) { | |||
| 3574 | LaneBitmask Mask = SrcIdx == 0 ? CP.getNewRC()->getLaneMask() | |||
| 3575 | : TRI->getSubRegIndexLaneMask(SrcIdx); | |||
| 3576 | mergeSubRangeInto(LHS, RHS, Mask, CP, DstIdx); | |||
| 3577 | } else { | |||
| 3578 | // Pair up subranges and merge. | |||
| 3579 | for (LiveInterval::SubRange &R : RHS.subranges()) { | |||
| 3580 | LaneBitmask Mask = TRI->composeSubRegIndexLaneMask(SrcIdx, R.LaneMask); | |||
| 3581 | mergeSubRangeInto(LHS, R, Mask, CP, DstIdx); | |||
| 3582 | } | |||
| 3583 | } | |||
| 3584 | LLVM_DEBUG(dbgs() << "\tJoined SubRanges " << LHS << "\n")do { } while (false); | |||
| 3585 | ||||
| 3586 | // Pruning implicit defs from subranges may result in the main range | |||
| 3587 | // having stale segments. | |||
| 3588 | LHSVals.pruneMainSegments(LHS, ShrinkMainRange); | |||
| 3589 | ||||
| 3590 | LHSVals.pruneSubRegValues(LHS, ShrinkMask); | |||
| 3591 | RHSVals.pruneSubRegValues(LHS, ShrinkMask); | |||
| 3592 | } | |||
| 3593 | ||||
| 3594 | // The merging algorithm in LiveInterval::join() can't handle conflicting | |||
| 3595 | // value mappings, so we need to remove any live ranges that overlap a | |||
| 3596 | // CR_Replace resolution. Collect a set of end points that can be used to | |||
| 3597 | // restore the live range after joining. | |||
| 3598 | SmallVector<SlotIndex, 8> EndPoints; | |||
| 3599 | LHSVals.pruneValues(RHSVals, EndPoints, true); | |||
| 3600 | RHSVals.pruneValues(LHSVals, EndPoints, true); | |||
| 3601 | ||||
| 3602 | // Erase COPY and IMPLICIT_DEF instructions. This may cause some external | |||
| 3603 | // registers to require trimming. | |||
| 3604 | SmallVector<Register, 8> ShrinkRegs; | |||
| 3605 | LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs, &LHS); | |||
| 3606 | RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs); | |||
| 3607 | while (!ShrinkRegs.empty()) | |||
| 3608 | shrinkToUses(&LIS->getInterval(ShrinkRegs.pop_back_val())); | |||
| 3609 | ||||
| 3610 | // Scan and mark undef any DBG_VALUEs that would refer to a different value. | |||
| 3611 | checkMergingChangesDbgValues(CP, LHS, LHSVals, RHS, RHSVals); | |||
| 3612 | ||||
| 3613 | // If the RHS covers any PHI locations that were tracked for debug-info, we | |||
| 3614 | // must update tracking information to reflect the join. | |||
| 3615 | auto RegIt = RegToPHIIdx.find(CP.getSrcReg()); | |||
| 3616 | if (RegIt != RegToPHIIdx.end()) { | |||
| 3617 | // Iterate over all the debug instruction numbers assigned this register. | |||
| 3618 | for (unsigned InstID : RegIt->second) { | |||
| 3619 | auto PHIIt = PHIValToPos.find(InstID); | |||
| 3620 | assert(PHIIt != PHIValToPos.end())((void)0); | |||
| 3621 | const SlotIndex &SI = PHIIt->second.SI; | |||
| 3622 | ||||
| 3623 | // Does the RHS cover the position of this PHI? | |||
| 3624 | auto LII = RHS.find(SI); | |||
| 3625 | if (LII == RHS.end() || LII->start > SI) | |||
| 3626 | continue; | |||
| 3627 | ||||
| 3628 | // Accept two kinds of subregister movement: | |||
| 3629 | // * When we merge from one register class into a larger register: | |||
| 3630 | // %1:gr16 = some-inst | |||
| 3631 | // -> | |||
| 3632 | // %2:gr32.sub_16bit = some-inst | |||
| 3633 | // * When the PHI is already in a subregister, and the larger class | |||
| 3634 | // is coalesced: | |||
| 3635 | // %2:gr32.sub_16bit = some-inst | |||
| 3636 | // %3:gr32 = COPY %2 | |||
| 3637 | // -> | |||
| 3638 | // %3:gr32.sub_16bit = some-inst | |||
| 3639 | // Test for subregister move: | |||
| 3640 | if (CP.getSrcIdx() != 0 || CP.getDstIdx() != 0) | |||
| 3641 | // If we're moving between different subregisters, ignore this join. | |||
| 3642 | // The PHI will not get a location, dropping variable locations. | |||
| 3643 | if (PHIIt->second.SubReg && PHIIt->second.SubReg != CP.getSrcIdx()) | |||
| 3644 | continue; | |||
| 3645 | ||||
| 3646 | // Update our tracking of where the PHI is. | |||
| 3647 | PHIIt->second.Reg = CP.getDstReg(); | |||
| 3648 | ||||
| 3649 | // If we merge into a sub-register of a larger class (test above), | |||
| 3650 | // update SubReg. | |||
| 3651 | if (CP.getSrcIdx() != 0) | |||
| 3652 | PHIIt->second.SubReg = CP.getSrcIdx(); | |||
| 3653 | } | |||
| 3654 | ||||
| 3655 | // Rebuild the register index in RegToPHIIdx to account for PHIs tracking | |||
| 3656 | // different VRegs now. Copy old collection of debug instruction numbers and | |||
| 3657 | // erase the old one: | |||
| 3658 | auto InstrNums = RegIt->second; | |||
| 3659 | RegToPHIIdx.erase(RegIt); | |||
| 3660 | ||||
| 3661 | // There might already be PHIs being tracked in the destination VReg. Insert | |||
| 3662 | // into an existing tracking collection, or insert a new one. | |||
| 3663 | RegIt = RegToPHIIdx.find(CP.getDstReg()); | |||
| 3664 | if (RegIt != RegToPHIIdx.end()) | |||
| 3665 | RegIt->second.insert(RegIt->second.end(), InstrNums.begin(), | |||
| 3666 | InstrNums.end()); | |||
| 3667 | else | |||
| 3668 | RegToPHIIdx.insert({CP.getDstReg(), InstrNums}); | |||
| 3669 | } | |||
| 3670 | ||||
| 3671 | // Join RHS into LHS. | |||
| 3672 | LHS.join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo); | |||
| 3673 | ||||
| 3674 | // Kill flags are going to be wrong if the live ranges were overlapping. | |||
| 3675 | // Eventually, we should simply clear all kill flags when computing live | |||
| 3676 | // ranges. They are reinserted after register allocation. | |||
| 3677 | MRI->clearKillFlags(LHS.reg()); | |||
| 3678 | MRI->clearKillFlags(RHS.reg()); | |||
| 3679 | ||||
| 3680 | if (!EndPoints.empty()) { | |||
| 3681 | // Recompute the parts of the live range we had to remove because of | |||
| 3682 | // CR_Replace conflicts. | |||
| 3683 | LLVM_DEBUG({do { } while (false) | |||
| 3684 | dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: ";do { } while (false) | |||
| 3685 | for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) {do { } while (false) | |||
| 3686 | dbgs() << EndPoints[i];do { } while (false) | |||
| 3687 | if (i != n-1)do { } while (false) | |||
| 3688 | dbgs() << ',';do { } while (false) | |||
| 3689 | }do { } while (false) | |||
| 3690 | dbgs() << ": " << LHS << '\n';do { } while (false) | |||
| 3691 | })do { } while (false); | |||
| 3692 | LIS->extendToIndices((LiveRange&)LHS, EndPoints); | |||
| 3693 | } | |||
| 3694 | ||||
| 3695 | return true; | |||
| 3696 | } | |||
| 3697 | ||||
| 3698 | bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) { | |||
| 3699 | return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(CP); | |||
| 3700 | } | |||
| 3701 | ||||
| 3702 | void RegisterCoalescer::buildVRegToDbgValueMap(MachineFunction &MF) | |||
| 3703 | { | |||
| 3704 | const SlotIndexes &Slots = *LIS->getSlotIndexes(); | |||
| 3705 | SmallVector<MachineInstr *, 8> ToInsert; | |||
| 3706 | ||||
| 3707 | // After collecting a block of DBG_VALUEs into ToInsert, enter them into the | |||
| 3708 | // vreg => DbgValueLoc map. | |||
| 3709 | auto CloseNewDVRange = [this, &ToInsert](SlotIndex Slot) { | |||
| 3710 | for (auto *X : ToInsert) { | |||
| 3711 | for (auto Op : X->debug_operands()) { | |||
| 3712 | if (Op.isReg() && Op.getReg().isVirtual()) | |||
| 3713 | DbgVRegToValues[Op.getReg()].push_back({Slot, X}); | |||
| 3714 | } | |||
| 3715 | } | |||
| 3716 | ||||
| 3717 | ToInsert.clear(); | |||
| 3718 | }; | |||
| 3719 | ||||
| 3720 | // Iterate over all instructions, collecting them into the ToInsert vector. | |||
| 3721 | // Once a non-debug instruction is found, record the slot index of the | |||
| 3722 | // collected DBG_VALUEs. | |||
| 3723 | for (auto &MBB : MF) { | |||
| 3724 | SlotIndex CurrentSlot = Slots.getMBBStartIdx(&MBB); | |||
| 3725 | ||||
| 3726 | for (auto &MI : MBB) { | |||
| 3727 | if (MI.isDebugValue()) { | |||
| 3728 | if (any_of(MI.debug_operands(), [](const MachineOperand &MO) { | |||
| 3729 | return MO.isReg() && MO.getReg().isVirtual(); | |||
| 3730 | })) | |||
| 3731 | ToInsert.push_back(&MI); | |||
| 3732 | } else if (!MI.isDebugOrPseudoInstr()) { | |||
| 3733 | CurrentSlot = Slots.getInstructionIndex(MI); | |||
| 3734 | CloseNewDVRange(CurrentSlot); | |||
| 3735 | } | |||
| 3736 | } | |||
| 3737 | ||||
| 3738 | // Close range of DBG_VALUEs at the end of blocks. | |||
| 3739 | CloseNewDVRange(Slots.getMBBEndIdx(&MBB)); | |||
| 3740 | } | |||
| 3741 | ||||
| 3742 | // Sort all DBG_VALUEs we've seen by slot number. | |||
| 3743 | for (auto &Pair : DbgVRegToValues) | |||
| 3744 | llvm::sort(Pair.second); | |||
| 3745 | } | |||
| 3746 | ||||
| 3747 | void RegisterCoalescer::checkMergingChangesDbgValues(CoalescerPair &CP, | |||
| 3748 | LiveRange &LHS, | |||
| 3749 | JoinVals &LHSVals, | |||
| 3750 | LiveRange &RHS, | |||
| 3751 | JoinVals &RHSVals) { | |||
| 3752 | auto ScanForDstReg = [&](Register Reg) { | |||
| 3753 | checkMergingChangesDbgValuesImpl(Reg, RHS, LHS, LHSVals); | |||
| 3754 | }; | |||
| 3755 | ||||
| 3756 | auto ScanForSrcReg = [&](Register Reg) { | |||
| 3757 | checkMergingChangesDbgValuesImpl(Reg, LHS, RHS, RHSVals); | |||
| 3758 | }; | |||
| 3759 | ||||
| 3760 | // Scan for potentially unsound DBG_VALUEs: examine first the register number | |||
| 3761 | // Reg, and then any other vregs that may have been merged into it. | |||
| 3762 | auto PerformScan = [this](Register Reg, std::function<void(Register)> Func) { | |||
| 3763 | Func(Reg); | |||
| 3764 | if (DbgMergedVRegNums.count(Reg)) | |||
| 3765 | for (Register X : DbgMergedVRegNums[Reg]) | |||
| 3766 | Func(X); | |||
| 3767 | }; | |||
| 3768 | ||||
| 3769 | // Scan for unsound updates of both the source and destination register. | |||
| 3770 | PerformScan(CP.getSrcReg(), ScanForSrcReg); | |||
| 3771 | PerformScan(CP.getDstReg(), ScanForDstReg); | |||
| 3772 | } | |||
| 3773 | ||||
| 3774 | void RegisterCoalescer::checkMergingChangesDbgValuesImpl(Register Reg, | |||
| 3775 | LiveRange &OtherLR, | |||
| 3776 | LiveRange &RegLR, | |||
| 3777 | JoinVals &RegVals) { | |||
| 3778 | // Are there any DBG_VALUEs to examine? | |||
| 3779 | auto VRegMapIt = DbgVRegToValues.find(Reg); | |||
| 3780 | if (VRegMapIt == DbgVRegToValues.end()) | |||
| 3781 | return; | |||
| 3782 | ||||
| 3783 | auto &DbgValueSet = VRegMapIt->second; | |||
| 3784 | auto DbgValueSetIt = DbgValueSet.begin(); | |||
| 3785 | auto SegmentIt = OtherLR.begin(); | |||
| 3786 | ||||
| 3787 | bool LastUndefResult = false; | |||
| 3788 | SlotIndex LastUndefIdx; | |||
| 3789 | ||||
| 3790 | // If the "Other" register is live at a slot Idx, test whether Reg can | |||
| 3791 | // safely be merged with it, or should be marked undef. | |||
| 3792 | auto ShouldUndef = [&RegVals, &RegLR, &LastUndefResult, | |||
| 3793 | &LastUndefIdx](SlotIndex Idx) -> bool { | |||
| 3794 | // Our worst-case performance typically happens with asan, causing very | |||
| 3795 | // many DBG_VALUEs of the same location. Cache a copy of the most recent | |||
| 3796 | // result for this edge-case. | |||
| 3797 | if (LastUndefIdx == Idx) | |||
| 3798 | return LastUndefResult; | |||
| 3799 | ||||
| 3800 | // If the other range was live, and Reg's was not, the register coalescer | |||
| 3801 | // will not have tried to resolve any conflicts. We don't know whether | |||
| 3802 | // the DBG_VALUE will refer to the same value number, so it must be made | |||
| 3803 | // undef. | |||
| 3804 | auto OtherIt = RegLR.find(Idx); | |||
| 3805 | if (OtherIt == RegLR.end()) | |||
| 3806 | return true; | |||
| 3807 | ||||
| 3808 | // Both the registers were live: examine the conflict resolution record for | |||
| 3809 | // the value number Reg refers to. CR_Keep meant that this value number | |||
| 3810 | // "won" and the merged register definitely refers to that value. CR_Erase | |||
| 3811 | // means the value number was a redundant copy of the other value, which | |||
| 3812 | // was coalesced and Reg deleted. It's safe to refer to the other register | |||
| 3813 | // (which will be the source of the copy). | |||
| 3814 | auto Resolution = RegVals.getResolution(OtherIt->valno->id); | |||
| 3815 | LastUndefResult = Resolution != JoinVals::CR_Keep && | |||
| 3816 | Resolution != JoinVals::CR_Erase; | |||
| 3817 | LastUndefIdx = Idx; | |||
| 3818 | return LastUndefResult; | |||
| 3819 | }; | |||
| 3820 | ||||
| 3821 | // Iterate over both the live-range of the "Other" register, and the set of | |||
| 3822 | // DBG_VALUEs for Reg at the same time. Advance whichever one has the lowest | |||
| 3823 | // slot index. This relies on the DbgValueSet being ordered. | |||
| 3824 | while (DbgValueSetIt != DbgValueSet.end() && SegmentIt != OtherLR.end()) { | |||
| 3825 | if (DbgValueSetIt->first < SegmentIt->end) { | |||
| 3826 | // "Other" is live and there is a DBG_VALUE of Reg: test if we should | |||
| 3827 | // set it undef. | |||
| 3828 | if (DbgValueSetIt->first >= SegmentIt->start) { | |||
| 3829 | bool HasReg = DbgValueSetIt->second->hasDebugOperandForReg(Reg); | |||
| 3830 | bool ShouldUndefReg = ShouldUndef(DbgValueSetIt->first); | |||
| 3831 | if (HasReg && ShouldUndefReg) { | |||
| 3832 | // Mark undef, erase record of this DBG_VALUE to avoid revisiting. | |||
| 3833 | DbgValueSetIt->second->setDebugValueUndef(); | |||
| 3834 | continue; | |||
| 3835 | } | |||
| 3836 | } | |||
| 3837 | ++DbgValueSetIt; | |||
| 3838 | } else { | |||
| 3839 | ++SegmentIt; | |||
| 3840 | } | |||
| 3841 | } | |||
| 3842 | } | |||
| 3843 | ||||
| 3844 | namespace { | |||
| 3845 | ||||
| 3846 | /// Information concerning MBB coalescing priority. | |||
| 3847 | struct MBBPriorityInfo { | |||
| 3848 | MachineBasicBlock *MBB; | |||
| 3849 | unsigned Depth; | |||
| 3850 | bool IsSplit; | |||
| 3851 | ||||
| 3852 | MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit) | |||
| 3853 | : MBB(mbb), Depth(depth), IsSplit(issplit) {} | |||
| 3854 | }; | |||
| 3855 | ||||
| 3856 | } // end anonymous namespace | |||
| 3857 | ||||
| 3858 | /// C-style comparator that sorts first based on the loop depth of the basic | |||
| 3859 | /// block (the unsigned), and then on the MBB number. | |||
| 3860 | /// | |||
| 3861 | /// EnableGlobalCopies assumes that the primary sort key is loop depth. | |||
| 3862 | static int compareMBBPriority(const MBBPriorityInfo *LHS, | |||
| 3863 | const MBBPriorityInfo *RHS) { | |||
| 3864 | // Deeper loops first | |||
| 3865 | if (LHS->Depth != RHS->Depth) | |||
| 3866 | return LHS->Depth > RHS->Depth ? -1 : 1; | |||
| 3867 | ||||
| 3868 | // Try to unsplit critical edges next. | |||
| 3869 | if (LHS->IsSplit != RHS->IsSplit) | |||
| 3870 | return LHS->IsSplit ? -1 : 1; | |||
| 3871 | ||||
| 3872 | // Prefer blocks that are more connected in the CFG. This takes care of | |||
| 3873 | // the most difficult copies first while intervals are short. | |||
| 3874 | unsigned cl = LHS->MBB->pred_size() + LHS->MBB->succ_size(); | |||
| 3875 | unsigned cr = RHS->MBB->pred_size() + RHS->MBB->succ_size(); | |||
| 3876 | if (cl != cr) | |||
| 3877 | return cl > cr ? -1 : 1; | |||
| 3878 | ||||
| 3879 | // As a last resort, sort by block number. | |||
| 3880 | return LHS->MBB->getNumber() < RHS->MBB->getNumber() ? -1 : 1; | |||
| 3881 | } | |||
| 3882 | ||||
| 3883 | /// \returns true if the given copy uses or defines a local live range. | |||
| 3884 | static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) { | |||
| 3885 | if (!Copy->isCopy()) | |||
| 3886 | return false; | |||
| 3887 | ||||
| 3888 | if (Copy->getOperand(1).isUndef()) | |||
| 3889 | return false; | |||
| 3890 | ||||
| 3891 | Register SrcReg = Copy->getOperand(1).getReg(); | |||
| 3892 | Register DstReg = Copy->getOperand(0).getReg(); | |||
| 3893 | if (Register::isPhysicalRegister(SrcReg) || | |||
| 3894 | Register::isPhysicalRegister(DstReg)) | |||
| 3895 | return false; | |||
| 3896 | ||||
| 3897 | return LIS->intervalIsInOneMBB(LIS->getInterval(SrcReg)) | |||
| 3898 | || LIS->intervalIsInOneMBB(LIS->getInterval(DstReg)); | |||
| 3899 | } | |||
| 3900 | ||||
| 3901 | void RegisterCoalescer::lateLiveIntervalUpdate() { | |||
| 3902 | for (Register reg : ToBeUpdated) { | |||
| 3903 | if (!LIS->hasInterval(reg)) | |||
| 3904 | continue; | |||
| 3905 | LiveInterval &LI = LIS->getInterval(reg); | |||
| 3906 | shrinkToUses(&LI, &DeadDefs); | |||
| 3907 | if (!DeadDefs.empty()) | |||
| 3908 | eliminateDeadDefs(); | |||
| 3909 | } | |||
| 3910 | ToBeUpdated.clear(); | |||
| 3911 | } | |||
| 3912 | ||||
| 3913 | bool RegisterCoalescer:: | |||
| 3914 | copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList) { | |||
| 3915 | bool Progress = false; | |||
| 3916 | for (unsigned i = 0, e = CurrList.size(); i != e; ++i) { | |||
| 3917 | if (!CurrList[i]) | |||
| 3918 | continue; | |||
| 3919 | // Skip instruction pointers that have already been erased, for example by | |||
| 3920 | // dead code elimination. | |||
| 3921 | if (ErasedInstrs.count(CurrList[i])) { | |||
| 3922 | CurrList[i] = nullptr; | |||
| 3923 | continue; | |||
| 3924 | } | |||
| 3925 | bool Again = false; | |||
| 3926 | bool Success = joinCopy(CurrList[i], Again); | |||
| 3927 | Progress |= Success; | |||
| 3928 | if (Success || !Again) | |||
| 3929 | CurrList[i] = nullptr; | |||
| 3930 | } | |||
| 3931 | return Progress; | |||
| 3932 | } | |||
| 3933 | ||||
| 3934 | /// Check if DstReg is a terminal node. | |||
| 3935 | /// I.e., it does not have any affinity other than \p Copy. | |||
| 3936 | static bool isTerminalReg(Register DstReg, const MachineInstr &Copy, | |||
| 3937 | const MachineRegisterInfo *MRI) { | |||
| 3938 | assert(Copy.isCopyLike())((void)0); | |||
| 3939 | // Check if the destination of this copy as any other affinity. | |||
| 3940 | for (const MachineInstr &MI : MRI->reg_nodbg_instructions(DstReg)) | |||
| 3941 | if (&MI != &Copy && MI.isCopyLike()) | |||
| 3942 | return false; | |||
| 3943 | return true; | |||
| 3944 | } | |||
| 3945 | ||||
| 3946 | bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const { | |||
| 3947 | assert(Copy.isCopyLike())((void)0); | |||
| 3948 | if (!UseTerminalRule) | |||
| 3949 | return false; | |||
| 3950 | Register SrcReg, DstReg; | |||
| 3951 | unsigned SrcSubReg = 0, DstSubReg = 0; | |||
| 3952 | if (!isMoveInstr(*TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg)) | |||
| 3953 | return false; | |||
| 3954 | // Check if the destination of this copy has any other affinity. | |||
| 3955 | if (DstReg.isPhysical() || | |||
| 3956 | // If SrcReg is a physical register, the copy won't be coalesced. | |||
| 3957 | // Ignoring it may have other side effect (like missing | |||
| 3958 | // rematerialization). So keep it. | |||
| 3959 | SrcReg.isPhysical() || !isTerminalReg(DstReg, Copy, MRI)) | |||
| 3960 | return false; | |||
| 3961 | ||||
| 3962 | // DstReg is a terminal node. Check if it interferes with any other | |||
| 3963 | // copy involving SrcReg. | |||
| 3964 | const MachineBasicBlock *OrigBB = Copy.getParent(); | |||
| 3965 | const LiveInterval &DstLI = LIS->getInterval(DstReg); | |||
| 3966 | for (const MachineInstr &MI : MRI->reg_nodbg_instructions(SrcReg)) { | |||
| 3967 | // Technically we should check if the weight of the new copy is | |||
| 3968 | // interesting compared to the other one and update the weight | |||
| 3969 | // of the copies accordingly. However, this would only work if | |||
| 3970 | // we would gather all the copies first then coalesce, whereas | |||
| 3971 | // right now we interleave both actions. | |||
| 3972 | // For now, just consider the copies that are in the same block. | |||
| 3973 | if (&MI == &Copy || !MI.isCopyLike() || MI.getParent() != OrigBB) | |||
| 3974 | continue; | |||
| 3975 | Register OtherSrcReg, OtherReg; | |||
| 3976 | unsigned OtherSrcSubReg = 0, OtherSubReg = 0; | |||
| 3977 | if (!isMoveInstr(*TRI, &Copy, OtherSrcReg, OtherReg, OtherSrcSubReg, | |||
| 3978 | OtherSubReg)) | |||
| 3979 | return false; | |||
| 3980 | if (OtherReg == SrcReg) | |||
| 3981 | OtherReg = OtherSrcReg; | |||
| 3982 | // Check if OtherReg is a non-terminal. | |||
| 3983 | if (Register::isPhysicalRegister(OtherReg) || | |||
| 3984 | isTerminalReg(OtherReg, MI, MRI)) | |||
| 3985 | continue; | |||
| 3986 | // Check that OtherReg interfere with DstReg. | |||
| 3987 | if (LIS->getInterval(OtherReg).overlaps(DstLI)) { | |||
| 3988 | LLVM_DEBUG(dbgs() << "Apply terminal rule for: " << printReg(DstReg)do { } while (false) | |||
| 3989 | << '\n')do { } while (false); | |||
| 3990 | return true; | |||
| 3991 | } | |||
| 3992 | } | |||
| 3993 | return false; | |||
| 3994 | } | |||
| 3995 | ||||
| 3996 | void | |||
| 3997 | RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) { | |||
| 3998 | LLVM_DEBUG(dbgs() << MBB->getName() << ":\n")do { } while (false); | |||
| 3999 | ||||
| 4000 | // Collect all copy-like instructions in MBB. Don't start coalescing anything | |||
| 4001 | // yet, it might invalidate the iterator. | |||
| 4002 | const unsigned PrevSize = WorkList.size(); | |||
| 4003 | if (JoinGlobalCopies) { | |||
| 4004 | SmallVector<MachineInstr*, 2> LocalTerminals; | |||
| 4005 | SmallVector<MachineInstr*, 2> GlobalTerminals; | |||
| 4006 | // Coalesce copies bottom-up to coalesce local defs before local uses. They | |||
| 4007 | // are not inherently easier to resolve, but slightly preferable until we | |||
| 4008 | // have local live range splitting. In particular this is required by | |||
| 4009 | // cmp+jmp macro fusion. | |||
| 4010 | for (MachineInstr &MI : *MBB) { | |||
| 4011 | if (!MI.isCopyLike()) | |||
| 4012 | continue; | |||
| 4013 | bool ApplyTerminalRule = applyTerminalRule(MI); | |||
| 4014 | if (isLocalCopy(&MI, LIS)) { | |||
| 4015 | if (ApplyTerminalRule) | |||
| 4016 | LocalTerminals.push_back(&MI); | |||
| 4017 | else | |||
| 4018 | LocalWorkList.push_back(&MI); | |||
| 4019 | } else { | |||
| 4020 | if (ApplyTerminalRule) | |||
| 4021 | GlobalTerminals.push_back(&MI); | |||
| 4022 | else | |||
| 4023 | WorkList.push_back(&MI); | |||
| 4024 | } | |||
| 4025 | } | |||
| 4026 | // Append the copies evicted by the terminal rule at the end of the list. | |||
| 4027 | LocalWorkList.append(LocalTerminals.begin(), LocalTerminals.end()); | |||
| 4028 | WorkList.append(GlobalTerminals.begin(), GlobalTerminals.end()); | |||
| 4029 | } | |||
| 4030 | else { | |||
| 4031 | SmallVector<MachineInstr*, 2> Terminals; | |||
| 4032 | for (MachineInstr &MII : *MBB) | |||
| 4033 | if (MII.isCopyLike()) { | |||
| 4034 | if (applyTerminalRule(MII)) | |||
| 4035 | Terminals.push_back(&MII); | |||
| 4036 | else | |||
| 4037 | WorkList.push_back(&MII); | |||
| 4038 | } | |||
| 4039 | // Append the copies evicted by the terminal rule at the end of the list. | |||
| 4040 | WorkList.append(Terminals.begin(), Terminals.end()); | |||
| 4041 | } | |||
| 4042 | // Try coalescing the collected copies immediately, and remove the nulls. | |||
| 4043 | // This prevents the WorkList from getting too large since most copies are | |||
| 4044 | // joinable on the first attempt. | |||
| 4045 | MutableArrayRef<MachineInstr*> | |||
| 4046 | CurrList(WorkList.begin() + PrevSize, WorkList.end()); | |||
| 4047 | if (copyCoalesceWorkList(CurrList)) | |||
| 4048 | WorkList.erase(std::remove(WorkList.begin() + PrevSize, WorkList.end(), | |||
| 4049 | nullptr), WorkList.end()); | |||
| 4050 | } | |||
| 4051 | ||||
| 4052 | void RegisterCoalescer::coalesceLocals() { | |||
| 4053 | copyCoalesceWorkList(LocalWorkList); | |||
| 4054 | for (unsigned j = 0, je = LocalWorkList.size(); j != je; ++j) { | |||
| 4055 | if (LocalWorkList[j]) | |||
| 4056 | WorkList.push_back(LocalWorkList[j]); | |||
| 4057 | } | |||
| 4058 | LocalWorkList.clear(); | |||
| 4059 | } | |||
| 4060 | ||||
| 4061 | void RegisterCoalescer::joinAllIntervals() { | |||
| 4062 | LLVM_DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n")do { } while (false); | |||
| 4063 | assert(WorkList.empty() && LocalWorkList.empty() && "Old data still around.")((void)0); | |||
| 4064 | ||||
| 4065 | std::vector<MBBPriorityInfo> MBBs; | |||
| 4066 | MBBs.reserve(MF->size()); | |||
| 4067 | for (MachineBasicBlock &MBB : *MF) { | |||
| 4068 | MBBs.push_back(MBBPriorityInfo(&MBB, Loops->getLoopDepth(&MBB), | |||
| 4069 | JoinSplitEdges && isSplitEdge(&MBB))); | |||
| 4070 | } | |||
| 4071 | array_pod_sort(MBBs.begin(), MBBs.end(), compareMBBPriority); | |||
| 4072 | ||||
| 4073 | // Coalesce intervals in MBB priority order. | |||
| 4074 | unsigned CurrDepth = std::numeric_limits<unsigned>::max(); | |||
| 4075 | for (unsigned i = 0, e = MBBs.size(); i != e; ++i) { | |||
| 4076 | // Try coalescing the collected local copies for deeper loops. | |||
| 4077 | if (JoinGlobalCopies && MBBs[i].Depth < CurrDepth) { | |||
| 4078 | coalesceLocals(); | |||
| 4079 | CurrDepth = MBBs[i].Depth; | |||
| 4080 | } | |||
| 4081 | copyCoalesceInMBB(MBBs[i].MBB); | |||
| 4082 | } | |||
| 4083 | lateLiveIntervalUpdate(); | |||
| 4084 | coalesceLocals(); | |||
| 4085 | ||||
| 4086 | // Joining intervals can allow other intervals to be joined. Iteratively join | |||
| 4087 | // until we make no progress. | |||
| 4088 | while (copyCoalesceWorkList(WorkList)) | |||
| 4089 | /* empty */ ; | |||
| 4090 | lateLiveIntervalUpdate(); | |||
| 4091 | } | |||
| 4092 | ||||
| 4093 | void RegisterCoalescer::releaseMemory() { | |||
| 4094 | ErasedInstrs.clear(); | |||
| 4095 | WorkList.clear(); | |||
| 4096 | DeadDefs.clear(); | |||
| 4097 | InflateRegs.clear(); | |||
| 4098 | LargeLIVisitCounter.clear(); | |||
| 4099 | } | |||
| 4100 | ||||
| 4101 | bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) { | |||
| 4102 | LLVM_DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"do { } while (false) | |||
| 4103 | << "********** Function: " << fn.getName() << '\n')do { } while (false); | |||
| 4104 | ||||
| 4105 | // Variables changed between a setjmp and a longjump can have undefined value | |||
| 4106 | // after the longjmp. This behaviour can be observed if such a variable is | |||
| 4107 | // spilled, so longjmp won't restore the value in the spill slot. | |||
| 4108 | // RegisterCoalescer should not run in functions with a setjmp to avoid | |||
| 4109 | // merging such undefined variables with predictable ones. | |||
| 4110 | // | |||
| 4111 | // TODO: Could specifically disable coalescing registers live across setjmp | |||
| 4112 | // calls | |||
| 4113 | if (fn.exposesReturnsTwice()) { | |||
| 4114 | LLVM_DEBUG(do { } while (false) | |||
| 4115 | dbgs() << "* Skipped as it exposes funcions that returns twice.\n")do { } while (false); | |||
| 4116 | return false; | |||
| 4117 | } | |||
| 4118 | ||||
| 4119 | MF = &fn; | |||
| 4120 | MRI = &fn.getRegInfo(); | |||
| 4121 | const TargetSubtargetInfo &STI = fn.getSubtarget(); | |||
| 4122 | TRI = STI.getRegisterInfo(); | |||
| 4123 | TII = STI.getInstrInfo(); | |||
| 4124 | LIS = &getAnalysis<LiveIntervals>(); | |||
| 4125 | AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | |||
| 4126 | Loops = &getAnalysis<MachineLoopInfo>(); | |||
| 4127 | if (EnableGlobalCopies == cl::BOU_UNSET) | |||
| 4128 | JoinGlobalCopies = STI.enableJoinGlobalCopies(); | |||
| 4129 | else | |||
| 4130 | JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE); | |||
| 4131 | ||||
| 4132 | // If there are PHIs tracked by debug-info, they will need updating during | |||
| 4133 | // coalescing. Build an index of those PHIs to ease updating. | |||
| 4134 | SlotIndexes *Slots = LIS->getSlotIndexes(); | |||
| 4135 | for (const auto &DebugPHI : MF->DebugPHIPositions) { | |||
| 4136 | MachineBasicBlock *MBB = DebugPHI.second.MBB; | |||
| 4137 | Register Reg = DebugPHI.second.Reg; | |||
| 4138 | unsigned SubReg = DebugPHI.second.SubReg; | |||
| 4139 | SlotIndex SI = Slots->getMBBStartIdx(MBB); | |||
| 4140 | PHIValPos P = {SI, Reg, SubReg}; | |||
| 4141 | PHIValToPos.insert(std::make_pair(DebugPHI.first, P)); | |||
| 4142 | RegToPHIIdx[Reg].push_back(DebugPHI.first); | |||
| 4143 | } | |||
| 4144 | ||||
| 4145 | // The MachineScheduler does not currently require JoinSplitEdges. This will | |||
| 4146 | // either be enabled unconditionally or replaced by a more general live range | |||
| 4147 | // splitting optimization. | |||
| 4148 | JoinSplitEdges = EnableJoinSplits; | |||
| 4149 | ||||
| 4150 | if (VerifyCoalescing) | |||
| 4151 | MF->verify(this, "Before register coalescing"); | |||
| 4152 | ||||
| 4153 | DbgVRegToValues.clear(); | |||
| 4154 | DbgMergedVRegNums.clear(); | |||
| 4155 | buildVRegToDbgValueMap(fn); | |||
| 4156 | ||||
| 4157 | RegClassInfo.runOnMachineFunction(fn); | |||
| 4158 | ||||
| 4159 | // Join (coalesce) intervals if requested. | |||
| 4160 | if (EnableJoining) | |||
| 4161 | joinAllIntervals(); | |||
| 4162 | ||||
| 4163 | // After deleting a lot of copies, register classes may be less constrained. | |||
| 4164 | // Removing sub-register operands may allow GR32_ABCD -> GR32 and DPR_VFP2 -> | |||
| 4165 | // DPR inflation. | |||
| 4166 | array_pod_sort(InflateRegs.begin(), InflateRegs.end()); | |||
| 4167 | InflateRegs.erase(std::unique(InflateRegs.begin(), InflateRegs.end()), | |||
| 4168 | InflateRegs.end()); | |||
| 4169 | LLVM_DEBUG(dbgs() << "Trying to inflate " << InflateRegs.size()do { } while (false) | |||
| 4170 | << " regs.\n")do { } while (false); | |||
| 4171 | for (unsigned i = 0, e = InflateRegs.size(); i != e; ++i) { | |||
| 4172 | Register Reg = InflateRegs[i]; | |||
| 4173 | if (MRI->reg_nodbg_empty(Reg)) | |||
| 4174 | continue; | |||
| 4175 | if (MRI->recomputeRegClass(Reg)) { | |||
| 4176 | LLVM_DEBUG(dbgs() << printReg(Reg) << " inflated to "do { } while (false) | |||
| 4177 | << TRI->getRegClassName(MRI->getRegClass(Reg)) << '\n')do { } while (false); | |||
| 4178 | ++NumInflated; | |||
| 4179 | ||||
| 4180 | LiveInterval &LI = LIS->getInterval(Reg); | |||
| 4181 | if (LI.hasSubRanges()) { | |||
| 4182 | // If the inflated register class does not support subregisters anymore | |||
| 4183 | // remove the subranges. | |||
| 4184 | if (!MRI->shouldTrackSubRegLiveness(Reg)) { | |||
| 4185 | LI.clearSubRanges(); | |||
| 4186 | } else { | |||
| 4187 | #ifndef NDEBUG1 | |||
| 4188 | LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg); | |||
| 4189 | // If subranges are still supported, then the same subregs | |||
| 4190 | // should still be supported. | |||
| 4191 | for (LiveInterval::SubRange &S : LI.subranges()) { | |||
| 4192 | assert((S.LaneMask & ~MaxMask).none())((void)0); | |||
| 4193 | } | |||
| 4194 | #endif | |||
| 4195 | } | |||
| 4196 | } | |||
| 4197 | } | |||
| 4198 | } | |||
| 4199 | ||||
| 4200 | // After coalescing, update any PHIs that are being tracked by debug-info | |||
| 4201 | // with their new VReg locations. | |||
| 4202 | for (auto &p : MF->DebugPHIPositions) { | |||
| 4203 | auto it = PHIValToPos.find(p.first); | |||
| 4204 | assert(it != PHIValToPos.end())((void)0); | |||
| 4205 | p.second.Reg = it->second.Reg; | |||
| 4206 | p.second.SubReg = it->second.SubReg; | |||
| 4207 | } | |||
| 4208 | ||||
| 4209 | PHIValToPos.clear(); | |||
| 4210 | RegToPHIIdx.clear(); | |||
| 4211 | ||||
| 4212 | LLVM_DEBUG(dump())do { } while (false); | |||
| 4213 | if (VerifyCoalescing) | |||
| 4214 | MF->verify(this, "After register coalescing"); | |||
| 4215 | return true; | |||
| 4216 | } | |||
| 4217 | ||||
| 4218 | void RegisterCoalescer::print(raw_ostream &O, const Module* m) const { | |||
| 4219 | LIS->print(O, m); | |||
| 4220 | } |
| 1 | //===- llvm/CodeGen/LiveInterval.h - Interval representation ----*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the LiveRange and LiveInterval classes. Given some |
| 10 | // numbering of each the machine instructions an interval [i, j) is said to be a |
| 11 | // live range for register v if there is no instruction with number j' >= j |
| 12 | // such that v is live at j' and there is no instruction with number i' < i such |
| 13 | // that v is live at i'. In this implementation ranges can have holes, |
| 14 | // i.e. a range might look like [1,20), [50,65), [1000,1001). Each |
| 15 | // individual segment is represented as an instance of LiveRange::Segment, |
| 16 | // and the whole range is represented as an instance of LiveRange. |
| 17 | // |
| 18 | //===----------------------------------------------------------------------===// |
| 19 | |
| 20 | #ifndef LLVM_CODEGEN_LIVEINTERVAL_H |
| 21 | #define LLVM_CODEGEN_LIVEINTERVAL_H |
| 22 | |
| 23 | #include "llvm/ADT/ArrayRef.h" |
| 24 | #include "llvm/ADT/IntEqClasses.h" |
| 25 | #include "llvm/ADT/STLExtras.h" |
| 26 | #include "llvm/ADT/SmallVector.h" |
| 27 | #include "llvm/ADT/iterator_range.h" |
| 28 | #include "llvm/CodeGen/Register.h" |
| 29 | #include "llvm/CodeGen/SlotIndexes.h" |
| 30 | #include "llvm/MC/LaneBitmask.h" |
| 31 | #include "llvm/Support/Allocator.h" |
| 32 | #include "llvm/Support/MathExtras.h" |
| 33 | #include <algorithm> |
| 34 | #include <cassert> |
| 35 | #include <cstddef> |
| 36 | #include <functional> |
| 37 | #include <memory> |
| 38 | #include <set> |
| 39 | #include <tuple> |
| 40 | #include <utility> |
| 41 | |
| 42 | namespace llvm { |
| 43 | |
| 44 | class CoalescerPair; |
| 45 | class LiveIntervals; |
| 46 | class MachineRegisterInfo; |
| 47 | class raw_ostream; |
| 48 | |
| 49 | /// VNInfo - Value Number Information. |
| 50 | /// This class holds information about a machine level values, including |
| 51 | /// definition and use points. |
| 52 | /// |
| 53 | class VNInfo { |
| 54 | public: |
| 55 | using Allocator = BumpPtrAllocator; |
| 56 | |
| 57 | /// The ID number of this value. |
| 58 | unsigned id; |
| 59 | |
| 60 | /// The index of the defining instruction. |
| 61 | SlotIndex def; |
| 62 | |
| 63 | /// VNInfo constructor. |
| 64 | VNInfo(unsigned i, SlotIndex d) : id(i), def(d) {} |
| 65 | |
| 66 | /// VNInfo constructor, copies values from orig, except for the value number. |
| 67 | VNInfo(unsigned i, const VNInfo &orig) : id(i), def(orig.def) {} |
| 68 | |
| 69 | /// Copy from the parameter into this VNInfo. |
| 70 | void copyFrom(VNInfo &src) { |
| 71 | def = src.def; |
| 72 | } |
| 73 | |
| 74 | /// Returns true if this value is defined by a PHI instruction (or was, |
| 75 | /// PHI instructions may have been eliminated). |
| 76 | /// PHI-defs begin at a block boundary, all other defs begin at register or |
| 77 | /// EC slots. |
| 78 | bool isPHIDef() const { return def.isBlock(); } |
| 79 | |
| 80 | /// Returns true if this value is unused. |
| 81 | bool isUnused() const { return !def.isValid(); } |
| 82 | |
| 83 | /// Mark this value as unused. |
| 84 | void markUnused() { def = SlotIndex(); } |
| 85 | }; |
| 86 | |
| 87 | /// Result of a LiveRange query. This class hides the implementation details |
| 88 | /// of live ranges, and it should be used as the primary interface for |
| 89 | /// examining live ranges around instructions. |
| 90 | class LiveQueryResult { |
| 91 | VNInfo *const EarlyVal; |
| 92 | VNInfo *const LateVal; |
| 93 | const SlotIndex EndPoint; |
| 94 | const bool Kill; |
| 95 | |
| 96 | public: |
| 97 | LiveQueryResult(VNInfo *EarlyVal, VNInfo *LateVal, SlotIndex EndPoint, |
| 98 | bool Kill) |
| 99 | : EarlyVal(EarlyVal), LateVal(LateVal), EndPoint(EndPoint), Kill(Kill) |
| 100 | {} |
| 101 | |
| 102 | /// Return the value that is live-in to the instruction. This is the value |
| 103 | /// that will be read by the instruction's use operands. Return NULL if no |
| 104 | /// value is live-in. |
| 105 | VNInfo *valueIn() const { |
| 106 | return EarlyVal; |
| 107 | } |
| 108 | |
| 109 | /// Return true if the live-in value is killed by this instruction. This |
| 110 | /// means that either the live range ends at the instruction, or it changes |
| 111 | /// value. |
| 112 | bool isKill() const { |
| 113 | return Kill; |
| 114 | } |
| 115 | |
| 116 | /// Return true if this instruction has a dead def. |
| 117 | bool isDeadDef() const { |
| 118 | return EndPoint.isDead(); |
| 119 | } |
| 120 | |
| 121 | /// Return the value leaving the instruction, if any. This can be a |
| 122 | /// live-through value, or a live def. A dead def returns NULL. |
| 123 | VNInfo *valueOut() const { |
| 124 | return isDeadDef() ? nullptr : LateVal; |
| 125 | } |
| 126 | |
| 127 | /// Returns the value alive at the end of the instruction, if any. This can |
| 128 | /// be a live-through value, a live def or a dead def. |
| 129 | VNInfo *valueOutOrDead() const { |
| 130 | return LateVal; |
| 131 | } |
| 132 | |
| 133 | /// Return the value defined by this instruction, if any. This includes |
| 134 | /// dead defs, it is the value created by the instruction's def operands. |
| 135 | VNInfo *valueDefined() const { |
| 136 | return EarlyVal == LateVal ? nullptr : LateVal; |
| 137 | } |
| 138 | |
| 139 | /// Return the end point of the last live range segment to interact with |
| 140 | /// the instruction, if any. |
| 141 | /// |
| 142 | /// The end point is an invalid SlotIndex only if the live range doesn't |
| 143 | /// intersect the instruction at all. |
| 144 | /// |
| 145 | /// The end point may be at or past the end of the instruction's basic |
| 146 | /// block. That means the value was live out of the block. |
| 147 | SlotIndex endPoint() const { |
| 148 | return EndPoint; |
| 149 | } |
| 150 | }; |
| 151 | |
| 152 | /// This class represents the liveness of a register, stack slot, etc. |
| 153 | /// It manages an ordered list of Segment objects. |
| 154 | /// The Segments are organized in a static single assignment form: At places |
| 155 | /// where a new value is defined or different values reach a CFG join a new |
| 156 | /// segment with a new value number is used. |
| 157 | class LiveRange { |
| 158 | public: |
| 159 | /// This represents a simple continuous liveness interval for a value. |
| 160 | /// The start point is inclusive, the end point exclusive. These intervals |
| 161 | /// are rendered as [start,end). |
| 162 | struct Segment { |
| 163 | SlotIndex start; // Start point of the interval (inclusive) |
| 164 | SlotIndex end; // End point of the interval (exclusive) |
| 165 | VNInfo *valno = nullptr; // identifier for the value contained in this |
| 166 | // segment. |
| 167 | |
| 168 | Segment() = default; |
| 169 | |
| 170 | Segment(SlotIndex S, SlotIndex E, VNInfo *V) |
| 171 | : start(S), end(E), valno(V) { |
| 172 | assert(S < E && "Cannot create empty or backwards segment")((void)0); |
| 173 | } |
| 174 | |
| 175 | /// Return true if the index is covered by this segment. |
| 176 | bool contains(SlotIndex I) const { |
| 177 | return start <= I && I < end; |
| 178 | } |
| 179 | |
| 180 | /// Return true if the given interval, [S, E), is covered by this segment. |
| 181 | bool containsInterval(SlotIndex S, SlotIndex E) const { |
| 182 | assert((S < E) && "Backwards interval?")((void)0); |
| 183 | return (start <= S && S < end) && (start < E && E <= end); |
| 184 | } |
| 185 | |
| 186 | bool operator<(const Segment &Other) const { |
| 187 | return std::tie(start, end) < std::tie(Other.start, Other.end); |
| 188 | } |
| 189 | bool operator==(const Segment &Other) const { |
| 190 | return start == Other.start && end == Other.end; |
| 191 | } |
| 192 | |
| 193 | bool operator!=(const Segment &Other) const { |
| 194 | return !(*this == Other); |
| 195 | } |
| 196 | |
| 197 | void dump() const; |
| 198 | }; |
| 199 | |
| 200 | using Segments = SmallVector<Segment, 2>; |
| 201 | using VNInfoList = SmallVector<VNInfo *, 2>; |
| 202 | |
| 203 | Segments segments; // the liveness segments |
| 204 | VNInfoList valnos; // value#'s |
| 205 | |
| 206 | // The segment set is used temporarily to accelerate initial computation |
| 207 | // of live ranges of physical registers in computeRegUnitRange. |
| 208 | // After that the set is flushed to the segment vector and deleted. |
| 209 | using SegmentSet = std::set<Segment>; |
| 210 | std::unique_ptr<SegmentSet> segmentSet; |
| 211 | |
| 212 | using iterator = Segments::iterator; |
| 213 | using const_iterator = Segments::const_iterator; |
| 214 | |
| 215 | iterator begin() { return segments.begin(); } |
| 216 | iterator end() { return segments.end(); } |
| 217 | |
| 218 | const_iterator begin() const { return segments.begin(); } |
| 219 | const_iterator end() const { return segments.end(); } |
| 220 | |
| 221 | using vni_iterator = VNInfoList::iterator; |
| 222 | using const_vni_iterator = VNInfoList::const_iterator; |
| 223 | |
| 224 | vni_iterator vni_begin() { return valnos.begin(); } |
| 225 | vni_iterator vni_end() { return valnos.end(); } |
| 226 | |
| 227 | const_vni_iterator vni_begin() const { return valnos.begin(); } |
| 228 | const_vni_iterator vni_end() const { return valnos.end(); } |
| 229 | |
| 230 | /// Constructs a new LiveRange object. |
| 231 | LiveRange(bool UseSegmentSet = false) |
| 232 | : segmentSet(UseSegmentSet ? std::make_unique<SegmentSet>() |
| 233 | : nullptr) {} |
| 234 | |
| 235 | /// Constructs a new LiveRange object by copying segments and valnos from |
| 236 | /// another LiveRange. |
| 237 | LiveRange(const LiveRange &Other, BumpPtrAllocator &Allocator) { |
| 238 | assert(Other.segmentSet == nullptr &&((void)0) |
| 239 | "Copying of LiveRanges with active SegmentSets is not supported")((void)0); |
| 240 | assign(Other, Allocator); |
| 241 | } |
| 242 | |
| 243 | /// Copies values numbers and live segments from \p Other into this range. |
| 244 | void assign(const LiveRange &Other, BumpPtrAllocator &Allocator) { |
| 245 | if (this == &Other) |
| 246 | return; |
| 247 | |
| 248 | assert(Other.segmentSet == nullptr &&((void)0) |
| 249 | "Copying of LiveRanges with active SegmentSets is not supported")((void)0); |
| 250 | // Duplicate valnos. |
| 251 | for (const VNInfo *VNI : Other.valnos) |
| 252 | createValueCopy(VNI, Allocator); |
| 253 | // Now we can copy segments and remap their valnos. |
| 254 | for (const Segment &S : Other.segments) |
| 255 | segments.push_back(Segment(S.start, S.end, valnos[S.valno->id])); |
| 256 | } |
| 257 | |
| 258 | /// advanceTo - Advance the specified iterator to point to the Segment |
| 259 | /// containing the specified position, or end() if the position is past the |
| 260 | /// end of the range. If no Segment contains this position, but the |
| 261 | /// position is in a hole, this method returns an iterator pointing to the |
| 262 | /// Segment immediately after the hole. |
| 263 | iterator advanceTo(iterator I, SlotIndex Pos) { |
| 264 | assert(I != end())((void)0); |
| 265 | if (Pos >= endIndex()) |
| 266 | return end(); |
| 267 | while (I->end <= Pos) ++I; |
| 268 | return I; |
| 269 | } |
| 270 | |
| 271 | const_iterator advanceTo(const_iterator I, SlotIndex Pos) const { |
| 272 | assert(I != end())((void)0); |
| 273 | if (Pos >= endIndex()) |
| 274 | return end(); |
| 275 | while (I->end <= Pos) ++I; |
| 276 | return I; |
| 277 | } |
| 278 | |
| 279 | /// find - Return an iterator pointing to the first segment that ends after |
| 280 | /// Pos, or end(). This is the same as advanceTo(begin(), Pos), but faster |
| 281 | /// when searching large ranges. |
| 282 | /// |
| 283 | /// If Pos is contained in a Segment, that segment is returned. |
| 284 | /// If Pos is in a hole, the following Segment is returned. |
| 285 | /// If Pos is beyond endIndex, end() is returned. |
| 286 | iterator find(SlotIndex Pos); |
| 287 | |
| 288 | const_iterator find(SlotIndex Pos) const { |
| 289 | return const_cast<LiveRange*>(this)->find(Pos); |
| 290 | } |
| 291 | |
| 292 | void clear() { |
| 293 | valnos.clear(); |
| 294 | segments.clear(); |
| 295 | } |
| 296 | |
| 297 | size_t size() const { |
| 298 | return segments.size(); |
| 299 | } |
| 300 | |
| 301 | bool hasAtLeastOneValue() const { return !valnos.empty(); } |
| 302 | |
| 303 | bool containsOneValue() const { return valnos.size() == 1; } |
| 304 | |
| 305 | unsigned getNumValNums() const { return (unsigned)valnos.size(); } |
| 306 | |
| 307 | /// getValNumInfo - Returns pointer to the specified val#. |
| 308 | /// |
| 309 | inline VNInfo *getValNumInfo(unsigned ValNo) { |
| 310 | return valnos[ValNo]; |
| 311 | } |
| 312 | inline const VNInfo *getValNumInfo(unsigned ValNo) const { |
| 313 | return valnos[ValNo]; |
| 314 | } |
| 315 | |
| 316 | /// containsValue - Returns true if VNI belongs to this range. |
| 317 | bool containsValue(const VNInfo *VNI) const { |
| 318 | return VNI && VNI->id < getNumValNums() && VNI == getValNumInfo(VNI->id); |
| 319 | } |
| 320 | |
| 321 | /// getNextValue - Create a new value number and return it. MIIdx specifies |
| 322 | /// the instruction that defines the value number. |
| 323 | VNInfo *getNextValue(SlotIndex def, VNInfo::Allocator &VNInfoAllocator) { |
| 324 | VNInfo *VNI = |
| 325 | new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def); |
| 326 | valnos.push_back(VNI); |
| 327 | return VNI; |
| 328 | } |
| 329 | |
| 330 | /// createDeadDef - Make sure the range has a value defined at Def. |
| 331 | /// If one already exists, return it. Otherwise allocate a new value and |
| 332 | /// add liveness for a dead def. |
| 333 | VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc); |
| 334 | |
| 335 | /// Create a def of value @p VNI. Return @p VNI. If there already exists |
| 336 | /// a definition at VNI->def, the value defined there must be @p VNI. |
| 337 | VNInfo *createDeadDef(VNInfo *VNI); |
| 338 | |
| 339 | /// Create a copy of the given value. The new value will be identical except |
| 340 | /// for the Value number. |
| 341 | VNInfo *createValueCopy(const VNInfo *orig, |
| 342 | VNInfo::Allocator &VNInfoAllocator) { |
| 343 | VNInfo *VNI = |
| 344 | new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig); |
| 345 | valnos.push_back(VNI); |
| 346 | return VNI; |
| 347 | } |
| 348 | |
| 349 | /// RenumberValues - Renumber all values in order of appearance and remove |
| 350 | /// unused values. |
| 351 | void RenumberValues(); |
| 352 | |
| 353 | /// MergeValueNumberInto - This method is called when two value numbers |
| 354 | /// are found to be equivalent. This eliminates V1, replacing all |
| 355 | /// segments with the V1 value number with the V2 value number. This can |
| 356 | /// cause merging of V1/V2 values numbers and compaction of the value space. |
| 357 | VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2); |
| 358 | |
| 359 | /// Merge all of the live segments of a specific val# in RHS into this live |
| 360 | /// range as the specified value number. The segments in RHS are allowed |
| 361 | /// to overlap with segments in the current range, it will replace the |
| 362 | /// value numbers of the overlaped live segments with the specified value |
| 363 | /// number. |
| 364 | void MergeSegmentsInAsValue(const LiveRange &RHS, VNInfo *LHSValNo); |
| 365 | |
| 366 | /// MergeValueInAsValue - Merge all of the segments of a specific val# |
| 367 | /// in RHS into this live range as the specified value number. |
| 368 | /// The segments in RHS are allowed to overlap with segments in the |
| 369 | /// current range, but only if the overlapping segments have the |
| 370 | /// specified value number. |
| 371 | void MergeValueInAsValue(const LiveRange &RHS, |
| 372 | const VNInfo *RHSValNo, VNInfo *LHSValNo); |
| 373 | |
| 374 | bool empty() const { return segments.empty(); } |
| 375 | |
| 376 | /// beginIndex - Return the lowest numbered slot covered. |
| 377 | SlotIndex beginIndex() const { |
| 378 | assert(!empty() && "Call to beginIndex() on empty range.")((void)0); |
| 379 | return segments.front().start; |
| 380 | } |
| 381 | |
| 382 | /// endNumber - return the maximum point of the range of the whole, |
| 383 | /// exclusive. |
| 384 | SlotIndex endIndex() const { |
| 385 | assert(!empty() && "Call to endIndex() on empty range.")((void)0); |
| 386 | return segments.back().end; |
| 387 | } |
| 388 | |
| 389 | bool expiredAt(SlotIndex index) const { |
| 390 | return index >= endIndex(); |
| 391 | } |
| 392 | |
| 393 | bool liveAt(SlotIndex index) const { |
| 394 | const_iterator r = find(index); |
| 395 | return r != end() && r->start <= index; |
| 396 | } |
| 397 | |
| 398 | /// Return the segment that contains the specified index, or null if there |
| 399 | /// is none. |
| 400 | const Segment *getSegmentContaining(SlotIndex Idx) const { |
| 401 | const_iterator I = FindSegmentContaining(Idx); |
| 402 | return I == end() ? nullptr : &*I; |
| 403 | } |
| 404 | |
| 405 | /// Return the live segment that contains the specified index, or null if |
| 406 | /// there is none. |
| 407 | Segment *getSegmentContaining(SlotIndex Idx) { |
| 408 | iterator I = FindSegmentContaining(Idx); |
| 409 | return I == end() ? nullptr : &*I; |
| 410 | } |
| 411 | |
| 412 | /// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL. |
| 413 | VNInfo *getVNInfoAt(SlotIndex Idx) const { |
| 414 | const_iterator I = FindSegmentContaining(Idx); |
| 415 | return I == end() ? nullptr : I->valno; |
| 416 | } |
| 417 | |
| 418 | /// getVNInfoBefore - Return the VNInfo that is live up to but not |
| 419 | /// necessarilly including Idx, or NULL. Use this to find the reaching def |
| 420 | /// used by an instruction at this SlotIndex position. |
| 421 | VNInfo *getVNInfoBefore(SlotIndex Idx) const { |
| 422 | const_iterator I = FindSegmentContaining(Idx.getPrevSlot()); |
| 423 | return I == end() ? nullptr : I->valno; |
| 424 | } |
| 425 | |
| 426 | /// Return an iterator to the segment that contains the specified index, or |
| 427 | /// end() if there is none. |
| 428 | iterator FindSegmentContaining(SlotIndex Idx) { |
| 429 | iterator I = find(Idx); |
| 430 | return I != end() && I->start <= Idx ? I : end(); |
| 431 | } |
| 432 | |
| 433 | const_iterator FindSegmentContaining(SlotIndex Idx) const { |
| 434 | const_iterator I = find(Idx); |
| 435 | return I != end() && I->start <= Idx ? I : end(); |
| 436 | } |
| 437 | |
| 438 | /// overlaps - Return true if the intersection of the two live ranges is |
| 439 | /// not empty. |
| 440 | bool overlaps(const LiveRange &other) const { |
| 441 | if (other.empty()) |
| 442 | return false; |
| 443 | return overlapsFrom(other, other.begin()); |
| 444 | } |
| 445 | |
| 446 | /// overlaps - Return true if the two ranges have overlapping segments |
| 447 | /// that are not coalescable according to CP. |
| 448 | /// |
| 449 | /// Overlapping segments where one range is defined by a coalescable |
| 450 | /// copy are allowed. |
| 451 | bool overlaps(const LiveRange &Other, const CoalescerPair &CP, |
| 452 | const SlotIndexes&) const; |
| 453 | |
| 454 | /// overlaps - Return true if the live range overlaps an interval specified |
| 455 | /// by [Start, End). |
| 456 | bool overlaps(SlotIndex Start, SlotIndex End) const; |
| 457 | |
| 458 | /// overlapsFrom - Return true if the intersection of the two live ranges |
| 459 | /// is not empty. The specified iterator is a hint that we can begin |
| 460 | /// scanning the Other range starting at I. |
| 461 | bool overlapsFrom(const LiveRange &Other, const_iterator StartPos) const; |
| 462 | |
| 463 | /// Returns true if all segments of the @p Other live range are completely |
| 464 | /// covered by this live range. |
| 465 | /// Adjacent live ranges do not affect the covering:the liverange |
| 466 | /// [1,5](5,10] covers (3,7]. |
| 467 | bool covers(const LiveRange &Other) const; |
| 468 | |
| 469 | /// Add the specified Segment to this range, merging segments as |
| 470 | /// appropriate. This returns an iterator to the inserted segment (which |
| 471 | /// may have grown since it was inserted). |
| 472 | iterator addSegment(Segment S); |
| 473 | |
| 474 | /// Attempt to extend a value defined after @p StartIdx to include @p Use. |
| 475 | /// Both @p StartIdx and @p Use should be in the same basic block. In case |
| 476 | /// of subranges, an extension could be prevented by an explicit "undef" |
| 477 | /// caused by a <def,read-undef> on a non-overlapping lane. The list of |
| 478 | /// location of such "undefs" should be provided in @p Undefs. |
| 479 | /// The return value is a pair: the first element is VNInfo of the value |
| 480 | /// that was extended (possibly nullptr), the second is a boolean value |
| 481 | /// indicating whether an "undef" was encountered. |
| 482 | /// If this range is live before @p Use in the basic block that starts at |
| 483 | /// @p StartIdx, and there is no intervening "undef", extend it to be live |
| 484 | /// up to @p Use, and return the pair {value, false}. If there is no |
| 485 | /// segment before @p Use and there is no "undef" between @p StartIdx and |
| 486 | /// @p Use, return {nullptr, false}. If there is an "undef" before @p Use, |
| 487 | /// return {nullptr, true}. |
| 488 | std::pair<VNInfo*,bool> extendInBlock(ArrayRef<SlotIndex> Undefs, |
| 489 | SlotIndex StartIdx, SlotIndex Kill); |
| 490 | |
| 491 | /// Simplified version of the above "extendInBlock", which assumes that |
| 492 | /// no register lanes are undefined by <def,read-undef> operands. |
| 493 | /// If this range is live before @p Use in the basic block that starts |
| 494 | /// at @p StartIdx, extend it to be live up to @p Use, and return the |
| 495 | /// value. If there is no segment before @p Use, return nullptr. |
| 496 | VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Kill); |
| 497 | |
| 498 | /// join - Join two live ranges (this, and other) together. This applies |
| 499 | /// mappings to the value numbers in the LHS/RHS ranges as specified. If |
| 500 | /// the ranges are not joinable, this aborts. |
| 501 | void join(LiveRange &Other, |
| 502 | const int *ValNoAssignments, |
| 503 | const int *RHSValNoAssignments, |
| 504 | SmallVectorImpl<VNInfo *> &NewVNInfo); |
| 505 | |
| 506 | /// True iff this segment is a single segment that lies between the |
| 507 | /// specified boundaries, exclusively. Vregs live across a backedge are not |
| 508 | /// considered local. The boundaries are expected to lie within an extended |
| 509 | /// basic block, so vregs that are not live out should contain no holes. |
| 510 | bool isLocal(SlotIndex Start, SlotIndex End) const { |
| 511 | return beginIndex() > Start.getBaseIndex() && |
| 512 | endIndex() < End.getBoundaryIndex(); |
| 513 | } |
| 514 | |
| 515 | /// Remove the specified segment from this range. Note that the segment |
| 516 | /// must be a single Segment in its entirety. |
| 517 | void removeSegment(SlotIndex Start, SlotIndex End, |
| 518 | bool RemoveDeadValNo = false); |
| 519 | |
| 520 | void removeSegment(Segment S, bool RemoveDeadValNo = false) { |
| 521 | removeSegment(S.start, S.end, RemoveDeadValNo); |
| 522 | } |
| 523 | |
| 524 | /// Remove segment pointed to by iterator @p I from this range. This does |
| 525 | /// not remove dead value numbers. |
| 526 | iterator removeSegment(iterator I) { |
| 527 | return segments.erase(I); |
| 528 | } |
| 529 | |
| 530 | /// Query Liveness at Idx. |
| 531 | /// The sub-instruction slot of Idx doesn't matter, only the instruction |
| 532 | /// it refers to is considered. |
| 533 | LiveQueryResult Query(SlotIndex Idx) const { |
| 534 | // Find the segment that enters the instruction. |
| 535 | const_iterator I = find(Idx.getBaseIndex()); |
| 536 | const_iterator E = end(); |
| 537 | if (I == E) |
| 538 | return LiveQueryResult(nullptr, nullptr, SlotIndex(), false); |
| 539 | |
| 540 | // Is this an instruction live-in segment? |
| 541 | // If Idx is the start index of a basic block, include live-in segments |
| 542 | // that start at Idx.getBaseIndex(). |
| 543 | VNInfo *EarlyVal = nullptr; |
| 544 | VNInfo *LateVal = nullptr; |
| 545 | SlotIndex EndPoint; |
| 546 | bool Kill = false; |
| 547 | if (I->start <= Idx.getBaseIndex()) { |
| 548 | EarlyVal = I->valno; |
| 549 | EndPoint = I->end; |
| 550 | // Move to the potentially live-out segment. |
| 551 | if (SlotIndex::isSameInstr(Idx, I->end)) { |
| 552 | Kill = true; |
| 553 | if (++I == E) |
| 554 | return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill); |
| 555 | } |
| 556 | // Special case: A PHIDef value can have its def in the middle of a |
| 557 | // segment if the value happens to be live out of the layout |
| 558 | // predecessor. |
| 559 | // Such a value is not live-in. |
| 560 | if (EarlyVal->def == Idx.getBaseIndex()) |
| 561 | EarlyVal = nullptr; |
| 562 | } |
| 563 | // I now points to the segment that may be live-through, or defined by |
| 564 | // this instr. Ignore segments starting after the current instr. |
| 565 | if (!SlotIndex::isEarlierInstr(Idx, I->start)) { |
| 566 | LateVal = I->valno; |
| 567 | EndPoint = I->end; |
| 568 | } |
| 569 | return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill); |
| 570 | } |
| 571 | |
| 572 | /// removeValNo - Remove all the segments defined by the specified value#. |
| 573 | /// Also remove the value# from value# list. |
| 574 | void removeValNo(VNInfo *ValNo); |
| 575 | |
| 576 | /// Returns true if the live range is zero length, i.e. no live segments |
| 577 | /// span instructions. It doesn't pay to spill such a range. |
| 578 | bool isZeroLength(SlotIndexes *Indexes) const { |
| 579 | for (const Segment &S : segments) |
| 580 | if (Indexes->getNextNonNullIndex(S.start).getBaseIndex() < |
| 581 | S.end.getBaseIndex()) |
| 582 | return false; |
| 583 | return true; |
| 584 | } |
| 585 | |
| 586 | // Returns true if any segment in the live range contains any of the |
| 587 | // provided slot indexes. Slots which occur in holes between |
| 588 | // segments will not cause the function to return true. |
| 589 | bool isLiveAtIndexes(ArrayRef<SlotIndex> Slots) const; |
| 590 | |
| 591 | bool operator<(const LiveRange& other) const { |
| 592 | const SlotIndex &thisIndex = beginIndex(); |
| 593 | const SlotIndex &otherIndex = other.beginIndex(); |
| 594 | return thisIndex < otherIndex; |
| 595 | } |
| 596 | |
| 597 | /// Returns true if there is an explicit "undef" between @p Begin |
| 598 | /// @p End. |
| 599 | bool isUndefIn(ArrayRef<SlotIndex> Undefs, SlotIndex Begin, |
| 600 | SlotIndex End) const { |
| 601 | return llvm::any_of(Undefs, [Begin, End](SlotIndex Idx) -> bool { |
| 602 | return Begin <= Idx && Idx < End; |
| 603 | }); |
| 604 | } |
| 605 | |
| 606 | /// Flush segment set into the regular segment vector. |
| 607 | /// The method is to be called after the live range |
| 608 | /// has been created, if use of the segment set was |
| 609 | /// activated in the constructor of the live range. |
| 610 | void flushSegmentSet(); |
| 611 | |
| 612 | /// Stores indexes from the input index sequence R at which this LiveRange |
| 613 | /// is live to the output O iterator. |
| 614 | /// R is a range of _ascending sorted_ _random_ access iterators |
| 615 | /// to the input indexes. Indexes stored at O are ascending sorted so it |
| 616 | /// can be used directly in the subsequent search (for example for |
| 617 | /// subranges). Returns true if found at least one index. |
| 618 | template <typename Range, typename OutputIt> |
| 619 | bool findIndexesLiveAt(Range &&R, OutputIt O) const { |
| 620 | assert(llvm::is_sorted(R))((void)0); |
| 621 | auto Idx = R.begin(), EndIdx = R.end(); |
| 622 | auto Seg = segments.begin(), EndSeg = segments.end(); |
| 623 | bool Found = false; |
| 624 | while (Idx != EndIdx && Seg != EndSeg) { |
| 625 | // if the Seg is lower find first segment that is above Idx using binary |
| 626 | // search |
| 627 | if (Seg->end <= *Idx) { |
| 628 | Seg = std::upper_bound( |
| 629 | ++Seg, EndSeg, *Idx, |
| 630 | [=](std::remove_reference_t<decltype(*Idx)> V, |
| 631 | const std::remove_reference_t<decltype(*Seg)> &S) { |
| 632 | return V < S.end; |
| 633 | }); |
| 634 | if (Seg == EndSeg) |
| 635 | break; |
| 636 | } |
| 637 | auto NotLessStart = std::lower_bound(Idx, EndIdx, Seg->start); |
| 638 | if (NotLessStart == EndIdx) |
| 639 | break; |
| 640 | auto NotLessEnd = std::lower_bound(NotLessStart, EndIdx, Seg->end); |
| 641 | if (NotLessEnd != NotLessStart) { |
| 642 | Found = true; |
| 643 | O = std::copy(NotLessStart, NotLessEnd, O); |
| 644 | } |
| 645 | Idx = NotLessEnd; |
| 646 | ++Seg; |
| 647 | } |
| 648 | return Found; |
| 649 | } |
| 650 | |
| 651 | void print(raw_ostream &OS) const; |
| 652 | void dump() const; |
| 653 | |
| 654 | /// Walk the range and assert if any invariants fail to hold. |
| 655 | /// |
| 656 | /// Note that this is a no-op when asserts are disabled. |
| 657 | #ifdef NDEBUG1 |
| 658 | void verify() const {} |
| 659 | #else |
| 660 | void verify() const; |
| 661 | #endif |
| 662 | |
| 663 | protected: |
| 664 | /// Append a segment to the list of segments. |
| 665 | void append(const LiveRange::Segment S); |
| 666 | |
| 667 | private: |
| 668 | friend class LiveRangeUpdater; |
| 669 | void addSegmentToSet(Segment S); |
| 670 | void markValNoForDeletion(VNInfo *V); |
| 671 | }; |
| 672 | |
| 673 | inline raw_ostream &operator<<(raw_ostream &OS, const LiveRange &LR) { |
| 674 | LR.print(OS); |
| 675 | return OS; |
| 676 | } |
| 677 | |
| 678 | /// LiveInterval - This class represents the liveness of a register, |
| 679 | /// or stack slot. |
| 680 | class LiveInterval : public LiveRange { |
| 681 | public: |
| 682 | using super = LiveRange; |
| 683 | |
| 684 | /// A live range for subregisters. The LaneMask specifies which parts of the |
| 685 | /// super register are covered by the interval. |
| 686 | /// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()). |
| 687 | class SubRange : public LiveRange { |
| 688 | public: |
| 689 | SubRange *Next = nullptr; |
| 690 | LaneBitmask LaneMask; |
| 691 | |
| 692 | /// Constructs a new SubRange object. |
| 693 | SubRange(LaneBitmask LaneMask) : LaneMask(LaneMask) {} |
| 694 | |
| 695 | /// Constructs a new SubRange object by copying liveness from @p Other. |
| 696 | SubRange(LaneBitmask LaneMask, const LiveRange &Other, |
| 697 | BumpPtrAllocator &Allocator) |
| 698 | : LiveRange(Other, Allocator), LaneMask(LaneMask) {} |
| 699 | |
| 700 | void print(raw_ostream &OS) const; |
| 701 | void dump() const; |
| 702 | }; |
| 703 | |
| 704 | private: |
| 705 | SubRange *SubRanges = nullptr; ///< Single linked list of subregister live |
| 706 | /// ranges. |
| 707 | const Register Reg; // the register or stack slot of this interval. |
| 708 | float Weight = 0.0; // weight of this interval |
| 709 | |
| 710 | public: |
| 711 | Register reg() const { return Reg; } |
| 712 | float weight() const { return Weight; } |
| 713 | void incrementWeight(float Inc) { Weight += Inc; } |
| 714 | void setWeight(float Value) { Weight = Value; } |
| 715 | |
| 716 | LiveInterval(unsigned Reg, float Weight) : Reg(Reg), Weight(Weight) {} |
| 717 | |
| 718 | ~LiveInterval() { |
| 719 | clearSubRanges(); |
| 720 | } |
| 721 | |
| 722 | template<typename T> |
| 723 | class SingleLinkedListIterator { |
| 724 | T *P; |
| 725 | |
| 726 | public: |
| 727 | SingleLinkedListIterator<T>(T *P) : P(P) {} |
| 728 | |
| 729 | SingleLinkedListIterator<T> &operator++() { |
| 730 | P = P->Next; |
| 731 | return *this; |
| 732 | } |
| 733 | SingleLinkedListIterator<T> operator++(int) { |
| 734 | SingleLinkedListIterator res = *this; |
| 735 | ++*this; |
| 736 | return res; |
| 737 | } |
| 738 | bool operator!=(const SingleLinkedListIterator<T> &Other) const { |
| 739 | return P != Other.operator->(); |
| 740 | } |
| 741 | bool operator==(const SingleLinkedListIterator<T> &Other) const { |
| 742 | return P == Other.operator->(); |
| 743 | } |
| 744 | T &operator*() const { |
| 745 | return *P; |
| 746 | } |
| 747 | T *operator->() const { |
| 748 | return P; |
| 749 | } |
| 750 | }; |
| 751 | |
| 752 | using subrange_iterator = SingleLinkedListIterator<SubRange>; |
| 753 | using const_subrange_iterator = SingleLinkedListIterator<const SubRange>; |
| 754 | |
| 755 | subrange_iterator subrange_begin() { |
| 756 | return subrange_iterator(SubRanges); |
| 757 | } |
| 758 | subrange_iterator subrange_end() { |
| 759 | return subrange_iterator(nullptr); |
| 760 | } |
| 761 | |
| 762 | const_subrange_iterator subrange_begin() const { |
| 763 | return const_subrange_iterator(SubRanges); |
| 764 | } |
| 765 | const_subrange_iterator subrange_end() const { |
| 766 | return const_subrange_iterator(nullptr); |
| 767 | } |
| 768 | |
| 769 | iterator_range<subrange_iterator> subranges() { |
| 770 | return make_range(subrange_begin(), subrange_end()); |
| 771 | } |
| 772 | |
| 773 | iterator_range<const_subrange_iterator> subranges() const { |
| 774 | return make_range(subrange_begin(), subrange_end()); |
| 775 | } |
| 776 | |
| 777 | /// Creates a new empty subregister live range. The range is added at the |
| 778 | /// beginning of the subrange list; subrange iterators stay valid. |
| 779 | SubRange *createSubRange(BumpPtrAllocator &Allocator, |
| 780 | LaneBitmask LaneMask) { |
| 781 | SubRange *Range = new (Allocator) SubRange(LaneMask); |
| 782 | appendSubRange(Range); |
| 783 | return Range; |
| 784 | } |
| 785 | |
| 786 | /// Like createSubRange() but the new range is filled with a copy of the |
| 787 | /// liveness information in @p CopyFrom. |
| 788 | SubRange *createSubRangeFrom(BumpPtrAllocator &Allocator, |
| 789 | LaneBitmask LaneMask, |
| 790 | const LiveRange &CopyFrom) { |
| 791 | SubRange *Range = new (Allocator) SubRange(LaneMask, CopyFrom, Allocator); |
| 792 | appendSubRange(Range); |
| 793 | return Range; |
| 794 | } |
| 795 | |
| 796 | /// Returns true if subregister liveness information is available. |
| 797 | bool hasSubRanges() const { |
| 798 | return SubRanges != nullptr; |
| 799 | } |
| 800 | |
| 801 | /// Removes all subregister liveness information. |
| 802 | void clearSubRanges(); |
| 803 | |
| 804 | /// Removes all subranges without any segments (subranges without segments |
| 805 | /// are not considered valid and should only exist temporarily). |
| 806 | void removeEmptySubRanges(); |
| 807 | |
| 808 | /// getSize - Returns the sum of sizes of all the LiveRange's. |
| 809 | /// |
| 810 | unsigned getSize() const; |
| 811 | |
| 812 | /// isSpillable - Can this interval be spilled? |
| 813 | bool isSpillable() const { return Weight != huge_valf; } |
| 814 | |
| 815 | /// markNotSpillable - Mark interval as not spillable |
| 816 | void markNotSpillable() { Weight = huge_valf; } |
| 817 | |
| 818 | /// For a given lane mask @p LaneMask, compute indexes at which the |
| 819 | /// lane is marked undefined by subregister <def,read-undef> definitions. |
| 820 | void computeSubRangeUndefs(SmallVectorImpl<SlotIndex> &Undefs, |
| 821 | LaneBitmask LaneMask, |
| 822 | const MachineRegisterInfo &MRI, |
| 823 | const SlotIndexes &Indexes) const; |
| 824 | |
| 825 | /// Refines the subranges to support \p LaneMask. This may only be called |
| 826 | /// for LI.hasSubrange()==true. Subregister ranges are split or created |
| 827 | /// until \p LaneMask can be matched exactly. \p Mod is executed on the |
| 828 | /// matching subranges. |
| 829 | /// |
| 830 | /// Example: |
| 831 | /// Given an interval with subranges with lanemasks L0F00, L00F0 and |
| 832 | /// L000F, refining for mask L0018. Will split the L00F0 lane into |
| 833 | /// L00E0 and L0010 and the L000F lane into L0007 and L0008. The Mod |
| 834 | /// function will be applied to the L0010 and L0008 subranges. |
| 835 | /// |
| 836 | /// \p Indexes and \p TRI are required to clean up the VNIs that |
| 837 | /// don't define the related lane masks after they get shrunk. E.g., |
| 838 | /// when L000F gets split into L0007 and L0008 maybe only a subset |
| 839 | /// of the VNIs that defined L000F defines L0007. |
| 840 | /// |
| 841 | /// The clean up of the VNIs need to look at the actual instructions |
| 842 | /// to decide what is or is not live at a definition point. If the |
| 843 | /// update of the subranges occurs while the IR does not reflect these |
| 844 | /// changes, \p ComposeSubRegIdx can be used to specify how the |
| 845 | /// definition are going to be rewritten. |
| 846 | /// E.g., let say we want to merge: |
| 847 | /// V1.sub1:<2 x s32> = COPY V2.sub3:<4 x s32> |
| 848 | /// We do that by choosing a class where sub1:<2 x s32> and sub3:<4 x s32> |
| 849 | /// overlap, i.e., by choosing a class where we can find "offset + 1 == 3". |
| 850 | /// Put differently we align V2's sub3 with V1's sub1: |
| 851 | /// V2: sub0 sub1 sub2 sub3 |
| 852 | /// V1: <offset> sub0 sub1 |
| 853 | /// |
| 854 | /// This offset will look like a composed subregidx in the the class: |
| 855 | /// V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32> |
| 856 | /// => V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32> |
| 857 | /// |
| 858 | /// Now if we didn't rewrite the uses and def of V1, all the checks for V1 |
| 859 | /// need to account for this offset. |
| 860 | /// This happens during coalescing where we update the live-ranges while |
| 861 | /// still having the old IR around because updating the IR on-the-fly |
| 862 | /// would actually clobber some information on how the live-ranges that |
| 863 | /// are being updated look like. |
| 864 | void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, |
| 865 | std::function<void(LiveInterval::SubRange &)> Apply, |
| 866 | const SlotIndexes &Indexes, |
| 867 | const TargetRegisterInfo &TRI, |
| 868 | unsigned ComposeSubRegIdx = 0); |
| 869 | |
| 870 | bool operator<(const LiveInterval& other) const { |
| 871 | const SlotIndex &thisIndex = beginIndex(); |
| 872 | const SlotIndex &otherIndex = other.beginIndex(); |
| 873 | return std::tie(thisIndex, Reg) < std::tie(otherIndex, other.Reg); |
| 874 | } |
| 875 | |
| 876 | void print(raw_ostream &OS) const; |
| 877 | void dump() const; |
| 878 | |
| 879 | /// Walks the interval and assert if any invariants fail to hold. |
| 880 | /// |
| 881 | /// Note that this is a no-op when asserts are disabled. |
| 882 | #ifdef NDEBUG1 |
| 883 | void verify(const MachineRegisterInfo *MRI = nullptr) const {} |
| 884 | #else |
| 885 | void verify(const MachineRegisterInfo *MRI = nullptr) const; |
| 886 | #endif |
| 887 | |
| 888 | private: |
| 889 | /// Appends @p Range to SubRanges list. |
| 890 | void appendSubRange(SubRange *Range) { |
| 891 | Range->Next = SubRanges; |
| 892 | SubRanges = Range; |
| 893 | } |
| 894 | |
| 895 | /// Free memory held by SubRange. |
| 896 | void freeSubRange(SubRange *S); |
| 897 | }; |
| 898 | |
| 899 | inline raw_ostream &operator<<(raw_ostream &OS, |
| 900 | const LiveInterval::SubRange &SR) { |
| 901 | SR.print(OS); |
| 902 | return OS; |
| 903 | } |
| 904 | |
| 905 | inline raw_ostream &operator<<(raw_ostream &OS, const LiveInterval &LI) { |
| 906 | LI.print(OS); |
| 907 | return OS; |
| 908 | } |
| 909 | |
| 910 | raw_ostream &operator<<(raw_ostream &OS, const LiveRange::Segment &S); |
| 911 | |
| 912 | inline bool operator<(SlotIndex V, const LiveRange::Segment &S) { |
| 913 | return V < S.start; |
| 914 | } |
| 915 | |
| 916 | inline bool operator<(const LiveRange::Segment &S, SlotIndex V) { |
| 917 | return S.start < V; |
| 918 | } |
| 919 | |
| 920 | /// Helper class for performant LiveRange bulk updates. |
| 921 | /// |
| 922 | /// Calling LiveRange::addSegment() repeatedly can be expensive on large |
| 923 | /// live ranges because segments after the insertion point may need to be |
| 924 | /// shifted. The LiveRangeUpdater class can defer the shifting when adding |
| 925 | /// many segments in order. |
| 926 | /// |
| 927 | /// The LiveRange will be in an invalid state until flush() is called. |
| 928 | class LiveRangeUpdater { |
| 929 | LiveRange *LR; |
| 930 | SlotIndex LastStart; |
| 931 | LiveRange::iterator WriteI; |
| 932 | LiveRange::iterator ReadI; |
| 933 | SmallVector<LiveRange::Segment, 16> Spills; |
| 934 | void mergeSpills(); |
| 935 | |
| 936 | public: |
| 937 | /// Create a LiveRangeUpdater for adding segments to LR. |
| 938 | /// LR will temporarily be in an invalid state until flush() is called. |
| 939 | LiveRangeUpdater(LiveRange *lr = nullptr) : LR(lr) {} |
| 940 | |
| 941 | ~LiveRangeUpdater() { flush(); } |
| 942 | |
| 943 | /// Add a segment to LR and coalesce when possible, just like |
| 944 | /// LR.addSegment(). Segments should be added in increasing start order for |
| 945 | /// best performance. |
| 946 | void add(LiveRange::Segment); |
| 947 | |
| 948 | void add(SlotIndex Start, SlotIndex End, VNInfo *VNI) { |
| 949 | add(LiveRange::Segment(Start, End, VNI)); |
| 950 | } |
| 951 | |
| 952 | /// Return true if the LR is currently in an invalid state, and flush() |
| 953 | /// needs to be called. |
| 954 | bool isDirty() const { return LastStart.isValid(); } |
| 955 | |
| 956 | /// Flush the updater state to LR so it is valid and contains all added |
| 957 | /// segments. |
| 958 | void flush(); |
| 959 | |
| 960 | /// Select a different destination live range. |
| 961 | void setDest(LiveRange *lr) { |
| 962 | if (LR != lr && isDirty()) |
| 963 | flush(); |
| 964 | LR = lr; |
| 965 | } |
| 966 | |
| 967 | /// Get the current destination live range. |
| 968 | LiveRange *getDest() const { return LR; } |
| 969 | |
| 970 | void dump() const; |
| 971 | void print(raw_ostream&) const; |
| 972 | }; |
| 973 | |
| 974 | inline raw_ostream &operator<<(raw_ostream &OS, const LiveRangeUpdater &X) { |
| 975 | X.print(OS); |
| 976 | return OS; |
| 977 | } |
| 978 | |
| 979 | /// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a |
| 980 | /// LiveInterval into equivalence clases of connected components. A |
| 981 | /// LiveInterval that has multiple connected components can be broken into |
| 982 | /// multiple LiveIntervals. |
| 983 | /// |
| 984 | /// Given a LiveInterval that may have multiple connected components, run: |
| 985 | /// |
| 986 | /// unsigned numComps = ConEQ.Classify(LI); |
| 987 | /// if (numComps > 1) { |
| 988 | /// // allocate numComps-1 new LiveIntervals into LIS[1..] |
| 989 | /// ConEQ.Distribute(LIS); |
| 990 | /// } |
| 991 | |
| 992 | class ConnectedVNInfoEqClasses { |
| 993 | LiveIntervals &LIS; |
| 994 | IntEqClasses EqClass; |
| 995 | |
| 996 | public: |
| 997 | explicit ConnectedVNInfoEqClasses(LiveIntervals &lis) : LIS(lis) {} |
| 998 | |
| 999 | /// Classify the values in \p LR into connected components. |
| 1000 | /// Returns the number of connected components. |
| 1001 | unsigned Classify(const LiveRange &LR); |
| 1002 | |
| 1003 | /// getEqClass - Classify creates equivalence classes numbered 0..N. Return |
| 1004 | /// the equivalence class assigned the VNI. |
| 1005 | unsigned getEqClass(const VNInfo *VNI) const { return EqClass[VNI->id]; } |
| 1006 | |
| 1007 | /// Distribute values in \p LI into a separate LiveIntervals |
| 1008 | /// for each connected component. LIV must have an empty LiveInterval for |
| 1009 | /// each additional connected component. The first connected component is |
| 1010 | /// left in \p LI. |
| 1011 | void Distribute(LiveInterval &LI, LiveInterval *LIV[], |
| 1012 | MachineRegisterInfo &MRI); |
| 1013 | }; |
| 1014 | |
| 1015 | } // end namespace llvm |
| 1016 | |
| 1017 | #endif // LLVM_CODEGEN_LIVEINTERVAL_H |
| 1 | //===- llvm/CodeGen/SlotIndexes.h - Slot indexes representation -*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements SlotIndex and related classes. The purpose of SlotIndex |
| 10 | // is to describe a position at which a register can become live, or cease to |
| 11 | // be live. |
| 12 | // |
| 13 | // SlotIndex is mostly a proxy for entries of the SlotIndexList, a class which |
| 14 | // is held is LiveIntervals and provides the real numbering. This allows |
| 15 | // LiveIntervals to perform largely transparent renumbering. |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | |
| 18 | #ifndef LLVM_CODEGEN_SLOTINDEXES_H |
| 19 | #define LLVM_CODEGEN_SLOTINDEXES_H |
| 20 | |
| 21 | #include "llvm/ADT/DenseMap.h" |
| 22 | #include "llvm/ADT/IntervalMap.h" |
| 23 | #include "llvm/ADT/PointerIntPair.h" |
| 24 | #include "llvm/ADT/SmallVector.h" |
| 25 | #include "llvm/ADT/ilist.h" |
| 26 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 27 | #include "llvm/CodeGen/MachineFunction.h" |
| 28 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 29 | #include "llvm/CodeGen/MachineInstr.h" |
| 30 | #include "llvm/CodeGen/MachineInstrBundle.h" |
| 31 | #include "llvm/Pass.h" |
| 32 | #include "llvm/Support/Allocator.h" |
| 33 | #include <algorithm> |
| 34 | #include <cassert> |
| 35 | #include <iterator> |
| 36 | #include <utility> |
| 37 | |
| 38 | namespace llvm { |
| 39 | |
| 40 | class raw_ostream; |
| 41 | |
| 42 | /// This class represents an entry in the slot index list held in the |
| 43 | /// SlotIndexes pass. It should not be used directly. See the |
| 44 | /// SlotIndex & SlotIndexes classes for the public interface to this |
| 45 | /// information. |
| 46 | class IndexListEntry : public ilist_node<IndexListEntry> { |
| 47 | MachineInstr *mi; |
| 48 | unsigned index; |
| 49 | |
| 50 | public: |
| 51 | IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {} |
| 52 | |
| 53 | MachineInstr* getInstr() const { return mi; } |
| 54 | void setInstr(MachineInstr *mi) { |
| 55 | this->mi = mi; |
| 56 | } |
| 57 | |
| 58 | unsigned getIndex() const { return index; } |
| 59 | void setIndex(unsigned index) { |
| 60 | this->index = index; |
| 61 | } |
| 62 | |
| 63 | #ifdef EXPENSIVE_CHECKS |
| 64 | // When EXPENSIVE_CHECKS is defined, "erased" index list entries will |
| 65 | // actually be moved to a "graveyard" list, and have their pointers |
| 66 | // poisoned, so that dangling SlotIndex access can be reliably detected. |
| 67 | void setPoison() { |
| 68 | intptr_t tmp = reinterpret_cast<intptr_t>(mi); |
| 69 | assert(((tmp & 0x1) == 0x0) && "Pointer already poisoned?")((void)0); |
| 70 | tmp |= 0x1; |
| 71 | mi = reinterpret_cast<MachineInstr*>(tmp); |
| 72 | } |
| 73 | |
| 74 | bool isPoisoned() const { return (reinterpret_cast<intptr_t>(mi) & 0x1) == 0x1; } |
| 75 | #endif // EXPENSIVE_CHECKS |
| 76 | }; |
| 77 | |
| 78 | template <> |
| 79 | struct ilist_alloc_traits<IndexListEntry> |
| 80 | : public ilist_noalloc_traits<IndexListEntry> {}; |
| 81 | |
| 82 | /// SlotIndex - An opaque wrapper around machine indexes. |
| 83 | class SlotIndex { |
| 84 | friend class SlotIndexes; |
| 85 | |
| 86 | enum Slot { |
| 87 | /// Basic block boundary. Used for live ranges entering and leaving a |
| 88 | /// block without being live in the layout neighbor. Also used as the |
| 89 | /// def slot of PHI-defs. |
| 90 | Slot_Block, |
| 91 | |
| 92 | /// Early-clobber register use/def slot. A live range defined at |
| 93 | /// Slot_EarlyClobber interferes with normal live ranges killed at |
| 94 | /// Slot_Register. Also used as the kill slot for live ranges tied to an |
| 95 | /// early-clobber def. |
| 96 | Slot_EarlyClobber, |
| 97 | |
| 98 | /// Normal register use/def slot. Normal instructions kill and define |
| 99 | /// register live ranges at this slot. |
| 100 | Slot_Register, |
| 101 | |
| 102 | /// Dead def kill point. Kill slot for a live range that is defined by |
| 103 | /// the same instruction (Slot_Register or Slot_EarlyClobber), but isn't |
| 104 | /// used anywhere. |
| 105 | Slot_Dead, |
| 106 | |
| 107 | Slot_Count |
| 108 | }; |
| 109 | |
| 110 | PointerIntPair<IndexListEntry*, 2, unsigned> lie; |
| 111 | |
| 112 | SlotIndex(IndexListEntry *entry, unsigned slot) |
| 113 | : lie(entry, slot) {} |
| 114 | |
| 115 | IndexListEntry* listEntry() const { |
| 116 | assert(isValid() && "Attempt to compare reserved index.")((void)0); |
| 117 | #ifdef EXPENSIVE_CHECKS |
| 118 | assert(!lie.getPointer()->isPoisoned() &&((void)0) |
| 119 | "Attempt to access deleted list-entry.")((void)0); |
| 120 | #endif // EXPENSIVE_CHECKS |
| 121 | return lie.getPointer(); |
| 122 | } |
| 123 | |
| 124 | unsigned getIndex() const { |
| 125 | return listEntry()->getIndex() | getSlot(); |
| 126 | } |
| 127 | |
| 128 | /// Returns the slot for this SlotIndex. |
| 129 | Slot getSlot() const { |
| 130 | return static_cast<Slot>(lie.getInt()); |
| 131 | } |
| 132 | |
| 133 | public: |
| 134 | enum { |
| 135 | /// The default distance between instructions as returned by distance(). |
| 136 | /// This may vary as instructions are inserted and removed. |
| 137 | InstrDist = 4 * Slot_Count |
| 138 | }; |
| 139 | |
| 140 | /// Construct an invalid index. |
| 141 | SlotIndex() = default; |
| 142 | |
| 143 | // Construct a new slot index from the given one, and set the slot. |
| 144 | SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) { |
| 145 | assert(lie.getPointer() != nullptr &&((void)0) |
| 146 | "Attempt to construct index with 0 pointer.")((void)0); |
| 147 | } |
| 148 | |
| 149 | /// Returns true if this is a valid index. Invalid indices do |
| 150 | /// not point into an index table, and cannot be compared. |
| 151 | bool isValid() const { |
| 152 | return lie.getPointer(); |
| 153 | } |
| 154 | |
| 155 | /// Return true for a valid index. |
| 156 | explicit operator bool() const { return isValid(); } |
| 157 | |
| 158 | /// Print this index to the given raw_ostream. |
| 159 | void print(raw_ostream &os) const; |
| 160 | |
| 161 | /// Dump this index to stderr. |
| 162 | void dump() const; |
| 163 | |
| 164 | /// Compare two SlotIndex objects for equality. |
| 165 | bool operator==(SlotIndex other) const { |
| 166 | return lie == other.lie; |
| 167 | } |
| 168 | /// Compare two SlotIndex objects for inequality. |
| 169 | bool operator!=(SlotIndex other) const { |
| 170 | return lie != other.lie; |
| 171 | } |
| 172 | |
| 173 | /// Compare two SlotIndex objects. Return true if the first index |
| 174 | /// is strictly lower than the second. |
| 175 | bool operator<(SlotIndex other) const { |
| 176 | return getIndex() < other.getIndex(); |
| 177 | } |
| 178 | /// Compare two SlotIndex objects. Return true if the first index |
| 179 | /// is lower than, or equal to, the second. |
| 180 | bool operator<=(SlotIndex other) const { |
| 181 | return getIndex() <= other.getIndex(); |
| 182 | } |
| 183 | |
| 184 | /// Compare two SlotIndex objects. Return true if the first index |
| 185 | /// is greater than the second. |
| 186 | bool operator>(SlotIndex other) const { |
| 187 | return getIndex() > other.getIndex(); |
| 188 | } |
| 189 | |
| 190 | /// Compare two SlotIndex objects. Return true if the first index |
| 191 | /// is greater than, or equal to, the second. |
| 192 | bool operator>=(SlotIndex other) const { |
| 193 | return getIndex() >= other.getIndex(); |
| 194 | } |
| 195 | |
| 196 | /// isSameInstr - Return true if A and B refer to the same instruction. |
| 197 | static bool isSameInstr(SlotIndex A, SlotIndex B) { |
| 198 | return A.lie.getPointer() == B.lie.getPointer(); |
| 199 | } |
| 200 | |
| 201 | /// isEarlierInstr - Return true if A refers to an instruction earlier than |
| 202 | /// B. This is equivalent to A < B && !isSameInstr(A, B). |
| 203 | static bool isEarlierInstr(SlotIndex A, SlotIndex B) { |
| 204 | return A.listEntry()->getIndex() < B.listEntry()->getIndex(); |
| 205 | } |
| 206 | |
| 207 | /// Return true if A refers to the same instruction as B or an earlier one. |
| 208 | /// This is equivalent to !isEarlierInstr(B, A). |
| 209 | static bool isEarlierEqualInstr(SlotIndex A, SlotIndex B) { |
| 210 | return !isEarlierInstr(B, A); |
| 211 | } |
| 212 | |
| 213 | /// Return the distance from this index to the given one. |
| 214 | int distance(SlotIndex other) const { |
| 215 | return other.getIndex() - getIndex(); |
| 216 | } |
| 217 | |
| 218 | /// Return the scaled distance from this index to the given one, where all |
| 219 | /// slots on the same instruction have zero distance. |
| 220 | int getInstrDistance(SlotIndex other) const { |
| 221 | return (other.listEntry()->getIndex() - listEntry()->getIndex()) |
| 222 | / Slot_Count; |
| 223 | } |
| 224 | |
| 225 | /// isBlock - Returns true if this is a block boundary slot. |
| 226 | bool isBlock() const { return getSlot() == Slot_Block; } |
| 227 | |
| 228 | /// isEarlyClobber - Returns true if this is an early-clobber slot. |
| 229 | bool isEarlyClobber() const { return getSlot() == Slot_EarlyClobber; } |
| 230 | |
| 231 | /// isRegister - Returns true if this is a normal register use/def slot. |
| 232 | /// Note that early-clobber slots may also be used for uses and defs. |
| 233 | bool isRegister() const { return getSlot() == Slot_Register; } |
| 234 | |
| 235 | /// isDead - Returns true if this is a dead def kill slot. |
| 236 | bool isDead() const { return getSlot() == Slot_Dead; } |
| 237 | |
| 238 | /// Returns the base index for associated with this index. The base index |
| 239 | /// is the one associated with the Slot_Block slot for the instruction |
| 240 | /// pointed to by this index. |
| 241 | SlotIndex getBaseIndex() const { |
| 242 | return SlotIndex(listEntry(), Slot_Block); |
| 243 | } |
| 244 | |
| 245 | /// Returns the boundary index for associated with this index. The boundary |
| 246 | /// index is the one associated with the Slot_Block slot for the instruction |
| 247 | /// pointed to by this index. |
| 248 | SlotIndex getBoundaryIndex() const { |
| 249 | return SlotIndex(listEntry(), Slot_Dead); |
| 250 | } |
| 251 | |
| 252 | /// Returns the register use/def slot in the current instruction for a |
| 253 | /// normal or early-clobber def. |
| 254 | SlotIndex getRegSlot(bool EC = false) const { |
| 255 | return SlotIndex(listEntry(), EC ? Slot_EarlyClobber : Slot_Register); |
| 256 | } |
| 257 | |
| 258 | /// Returns the dead def kill slot for the current instruction. |
| 259 | SlotIndex getDeadSlot() const { |
| 260 | return SlotIndex(listEntry(), Slot_Dead); |
| 261 | } |
| 262 | |
| 263 | /// Returns the next slot in the index list. This could be either the |
| 264 | /// next slot for the instruction pointed to by this index or, if this |
| 265 | /// index is a STORE, the first slot for the next instruction. |
| 266 | /// WARNING: This method is considerably more expensive than the methods |
| 267 | /// that return specific slots (getUseIndex(), etc). If you can - please |
| 268 | /// use one of those methods. |
| 269 | SlotIndex getNextSlot() const { |
| 270 | Slot s = getSlot(); |
| 271 | if (s == Slot_Dead) { |
| 272 | return SlotIndex(&*++listEntry()->getIterator(), Slot_Block); |
| 273 | } |
| 274 | return SlotIndex(listEntry(), s + 1); |
| 275 | } |
| 276 | |
| 277 | /// Returns the next index. This is the index corresponding to the this |
| 278 | /// index's slot, but for the next instruction. |
| 279 | SlotIndex getNextIndex() const { |
| 280 | return SlotIndex(&*++listEntry()->getIterator(), getSlot()); |
| 281 | } |
| 282 | |
| 283 | /// Returns the previous slot in the index list. This could be either the |
| 284 | /// previous slot for the instruction pointed to by this index or, if this |
| 285 | /// index is a Slot_Block, the last slot for the previous instruction. |
| 286 | /// WARNING: This method is considerably more expensive than the methods |
| 287 | /// that return specific slots (getUseIndex(), etc). If you can - please |
| 288 | /// use one of those methods. |
| 289 | SlotIndex getPrevSlot() const { |
| 290 | Slot s = getSlot(); |
| 291 | if (s == Slot_Block) { |
| 292 | return SlotIndex(&*--listEntry()->getIterator(), Slot_Dead); |
| 293 | } |
| 294 | return SlotIndex(listEntry(), s - 1); |
| 295 | } |
| 296 | |
| 297 | /// Returns the previous index. This is the index corresponding to this |
| 298 | /// index's slot, but for the previous instruction. |
| 299 | SlotIndex getPrevIndex() const { |
| 300 | return SlotIndex(&*--listEntry()->getIterator(), getSlot()); |
| 301 | } |
| 302 | }; |
| 303 | |
| 304 | inline raw_ostream& operator<<(raw_ostream &os, SlotIndex li) { |
| 305 | li.print(os); |
| 306 | return os; |
| 307 | } |
| 308 | |
| 309 | using IdxMBBPair = std::pair<SlotIndex, MachineBasicBlock *>; |
| 310 | |
| 311 | /// SlotIndexes pass. |
| 312 | /// |
| 313 | /// This pass assigns indexes to each instruction. |
| 314 | class SlotIndexes : public MachineFunctionPass { |
| 315 | private: |
| 316 | // IndexListEntry allocator. |
| 317 | BumpPtrAllocator ileAllocator; |
| 318 | |
| 319 | using IndexList = ilist<IndexListEntry>; |
| 320 | IndexList indexList; |
| 321 | |
| 322 | MachineFunction *mf; |
| 323 | |
| 324 | using Mi2IndexMap = DenseMap<const MachineInstr *, SlotIndex>; |
| 325 | Mi2IndexMap mi2iMap; |
| 326 | |
| 327 | /// MBBRanges - Map MBB number to (start, stop) indexes. |
| 328 | SmallVector<std::pair<SlotIndex, SlotIndex>, 8> MBBRanges; |
| 329 | |
| 330 | /// Idx2MBBMap - Sorted list of pairs of index of first instruction |
| 331 | /// and MBB id. |
| 332 | SmallVector<IdxMBBPair, 8> idx2MBBMap; |
| 333 | |
| 334 | IndexListEntry* createEntry(MachineInstr *mi, unsigned index) { |
| 335 | IndexListEntry *entry = |
| 336 | static_cast<IndexListEntry *>(ileAllocator.Allocate( |
| 337 | sizeof(IndexListEntry), alignof(IndexListEntry))); |
| 338 | |
| 339 | new (entry) IndexListEntry(mi, index); |
| 340 | |
| 341 | return entry; |
| 342 | } |
| 343 | |
| 344 | /// Renumber locally after inserting curItr. |
| 345 | void renumberIndexes(IndexList::iterator curItr); |
| 346 | |
| 347 | public: |
| 348 | static char ID; |
| 349 | |
| 350 | SlotIndexes(); |
| 351 | |
| 352 | ~SlotIndexes() override; |
| 353 | |
| 354 | void getAnalysisUsage(AnalysisUsage &au) const override; |
| 355 | void releaseMemory() override; |
| 356 | |
| 357 | bool runOnMachineFunction(MachineFunction &fn) override; |
| 358 | |
| 359 | /// Dump the indexes. |
| 360 | void dump() const; |
| 361 | |
| 362 | /// Repair indexes after adding and removing instructions. |
| 363 | void repairIndexesInRange(MachineBasicBlock *MBB, |
| 364 | MachineBasicBlock::iterator Begin, |
| 365 | MachineBasicBlock::iterator End); |
| 366 | |
| 367 | /// Returns the zero index for this analysis. |
| 368 | SlotIndex getZeroIndex() { |
| 369 | assert(indexList.front().getIndex() == 0 && "First index is not 0?")((void)0); |
| 370 | return SlotIndex(&indexList.front(), 0); |
| 371 | } |
| 372 | |
| 373 | /// Returns the base index of the last slot in this analysis. |
| 374 | SlotIndex getLastIndex() { |
| 375 | return SlotIndex(&indexList.back(), 0); |
| 376 | } |
| 377 | |
| 378 | /// Returns true if the given machine instr is mapped to an index, |
| 379 | /// otherwise returns false. |
| 380 | bool hasIndex(const MachineInstr &instr) const { |
| 381 | return mi2iMap.count(&instr); |
| 382 | } |
| 383 | |
| 384 | /// Returns the base index for the given instruction. |
| 385 | SlotIndex getInstructionIndex(const MachineInstr &MI, |
| 386 | bool IgnoreBundle = false) const { |
| 387 | // Instructions inside a bundle have the same number as the bundle itself. |
| 388 | auto BundleStart = getBundleStart(MI.getIterator()); |
| 389 | auto BundleEnd = getBundleEnd(MI.getIterator()); |
| 390 | // Use the first non-debug instruction in the bundle to get SlotIndex. |
| 391 | const MachineInstr &BundleNonDebug = |
| 392 | IgnoreBundle ? MI |
| 393 | : *skipDebugInstructionsForward(BundleStart, BundleEnd); |
| 394 | assert(!BundleNonDebug.isDebugInstr() &&((void)0) |
| 395 | "Could not use a debug instruction to query mi2iMap.")((void)0); |
| 396 | Mi2IndexMap::const_iterator itr = mi2iMap.find(&BundleNonDebug); |
| 397 | assert(itr != mi2iMap.end() && "Instruction not found in maps.")((void)0); |
| 398 | return itr->second; |
| 399 | } |
| 400 | |
| 401 | /// Returns the instruction for the given index, or null if the given |
| 402 | /// index has no instruction associated with it. |
| 403 | MachineInstr* getInstructionFromIndex(SlotIndex index) const { |
| 404 | return index.isValid() ? index.listEntry()->getInstr() : nullptr; |
| 405 | } |
| 406 | |
| 407 | /// Returns the next non-null index, if one exists. |
| 408 | /// Otherwise returns getLastIndex(). |
| 409 | SlotIndex getNextNonNullIndex(SlotIndex Index) { |
| 410 | IndexList::iterator I = Index.listEntry()->getIterator(); |
| 411 | IndexList::iterator E = indexList.end(); |
| 412 | while (++I != E) |
| 413 | if (I->getInstr()) |
| 414 | return SlotIndex(&*I, Index.getSlot()); |
| 415 | // We reached the end of the function. |
| 416 | return getLastIndex(); |
| 417 | } |
| 418 | |
| 419 | /// getIndexBefore - Returns the index of the last indexed instruction |
| 420 | /// before MI, or the start index of its basic block. |
| 421 | /// MI is not required to have an index. |
| 422 | SlotIndex getIndexBefore(const MachineInstr &MI) const { |
| 423 | const MachineBasicBlock *MBB = MI.getParent(); |
| 424 | assert(MBB && "MI must be inserted in a basic block")((void)0); |
| 425 | MachineBasicBlock::const_iterator I = MI, B = MBB->begin(); |
| 426 | while (true) { |
| 427 | if (I == B) |
| 428 | return getMBBStartIdx(MBB); |
| 429 | --I; |
| 430 | Mi2IndexMap::const_iterator MapItr = mi2iMap.find(&*I); |
| 431 | if (MapItr != mi2iMap.end()) |
| 432 | return MapItr->second; |
| 433 | } |
| 434 | } |
| 435 | |
| 436 | /// getIndexAfter - Returns the index of the first indexed instruction |
| 437 | /// after MI, or the end index of its basic block. |
| 438 | /// MI is not required to have an index. |
| 439 | SlotIndex getIndexAfter(const MachineInstr &MI) const { |
| 440 | const MachineBasicBlock *MBB = MI.getParent(); |
| 441 | assert(MBB && "MI must be inserted in a basic block")((void)0); |
| 442 | MachineBasicBlock::const_iterator I = MI, E = MBB->end(); |
| 443 | while (true) { |
| 444 | ++I; |
| 445 | if (I == E) |
| 446 | return getMBBEndIdx(MBB); |
| 447 | Mi2IndexMap::const_iterator MapItr = mi2iMap.find(&*I); |
| 448 | if (MapItr != mi2iMap.end()) |
| 449 | return MapItr->second; |
| 450 | } |
| 451 | } |
| 452 | |
| 453 | /// Return the (start,end) range of the given basic block number. |
| 454 | const std::pair<SlotIndex, SlotIndex> & |
| 455 | getMBBRange(unsigned Num) const { |
| 456 | return MBBRanges[Num]; |
| 457 | } |
| 458 | |
| 459 | /// Return the (start,end) range of the given basic block. |
| 460 | const std::pair<SlotIndex, SlotIndex> & |
| 461 | getMBBRange(const MachineBasicBlock *MBB) const { |
| 462 | return getMBBRange(MBB->getNumber()); |
| 463 | } |
| 464 | |
| 465 | /// Returns the first index in the given basic block number. |
| 466 | SlotIndex getMBBStartIdx(unsigned Num) const { |
| 467 | return getMBBRange(Num).first; |
| 468 | } |
| 469 | |
| 470 | /// Returns the first index in the given basic block. |
| 471 | SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const { |
| 472 | return getMBBRange(mbb).first; |
| 473 | } |
| 474 | |
| 475 | /// Returns the last index in the given basic block number. |
| 476 | SlotIndex getMBBEndIdx(unsigned Num) const { |
| 477 | return getMBBRange(Num).second; |
| 478 | } |
| 479 | |
| 480 | /// Returns the last index in the given basic block. |
| 481 | SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const { |
| 482 | return getMBBRange(mbb).second; |
| 483 | } |
| 484 | |
| 485 | /// Iterator over the idx2MBBMap (sorted pairs of slot index of basic block |
| 486 | /// begin and basic block) |
| 487 | using MBBIndexIterator = SmallVectorImpl<IdxMBBPair>::const_iterator; |
| 488 | |
| 489 | /// Move iterator to the next IdxMBBPair where the SlotIndex is greater or |
| 490 | /// equal to \p To. |
| 491 | MBBIndexIterator advanceMBBIndex(MBBIndexIterator I, SlotIndex To) const { |
| 492 | return std::partition_point( |
| 493 | I, idx2MBBMap.end(), |
| 494 | [=](const IdxMBBPair &IM) { return IM.first < To; }); |
| 495 | } |
| 496 | |
| 497 | /// Get an iterator pointing to the IdxMBBPair with the biggest SlotIndex |
| 498 | /// that is greater or equal to \p Idx. |
| 499 | MBBIndexIterator findMBBIndex(SlotIndex Idx) const { |
| 500 | return advanceMBBIndex(idx2MBBMap.begin(), Idx); |
| 501 | } |
| 502 | |
| 503 | /// Returns an iterator for the begin of the idx2MBBMap. |
| 504 | MBBIndexIterator MBBIndexBegin() const { |
| 505 | return idx2MBBMap.begin(); |
| 506 | } |
| 507 | |
| 508 | /// Return an iterator for the end of the idx2MBBMap. |
| 509 | MBBIndexIterator MBBIndexEnd() const { |
| 510 | return idx2MBBMap.end(); |
| 511 | } |
| 512 | |
| 513 | /// Returns the basic block which the given index falls in. |
| 514 | MachineBasicBlock* getMBBFromIndex(SlotIndex index) const { |
| 515 | if (MachineInstr *MI = getInstructionFromIndex(index)) |
| 516 | return MI->getParent(); |
| 517 | |
| 518 | MBBIndexIterator I = findMBBIndex(index); |
| 519 | // Take the pair containing the index |
| 520 | MBBIndexIterator J = |
| 521 | ((I != MBBIndexEnd() && I->first > index) || |
| 522 | (I == MBBIndexEnd() && !idx2MBBMap.empty())) ? std::prev(I) : I; |
| 523 | |
| 524 | assert(J != MBBIndexEnd() && J->first <= index &&((void)0) |
| 525 | index < getMBBEndIdx(J->second) &&((void)0) |
| 526 | "index does not correspond to an MBB")((void)0); |
| 527 | return J->second; |
| 528 | } |
| 529 | |
| 530 | /// Insert the given machine instruction into the mapping. Returns the |
| 531 | /// assigned index. |
| 532 | /// If Late is set and there are null indexes between mi's neighboring |
| 533 | /// instructions, create the new index after the null indexes instead of |
| 534 | /// before them. |
| 535 | SlotIndex insertMachineInstrInMaps(MachineInstr &MI, bool Late = false) { |
| 536 | assert(!MI.isInsideBundle() &&((void)0) |
| 537 | "Instructions inside bundles should use bundle start's slot.")((void)0); |
| 538 | assert(mi2iMap.find(&MI) == mi2iMap.end() && "Instr already indexed.")((void)0); |
| 539 | // Numbering debug instructions could cause code generation to be |
| 540 | // affected by debug information. |
| 541 | assert(!MI.isDebugInstr() && "Cannot number debug instructions.")((void)0); |
| 542 | |
| 543 | assert(MI.getParent() != nullptr && "Instr must be added to function.")((void)0); |
| 544 | |
| 545 | // Get the entries where MI should be inserted. |
| 546 | IndexList::iterator prevItr, nextItr; |
| 547 | if (Late) { |
| 548 | // Insert MI's index immediately before the following instruction. |
| 549 | nextItr = getIndexAfter(MI).listEntry()->getIterator(); |
| 550 | prevItr = std::prev(nextItr); |
| 551 | } else { |
| 552 | // Insert MI's index immediately after the preceding instruction. |
| 553 | prevItr = getIndexBefore(MI).listEntry()->getIterator(); |
| 554 | nextItr = std::next(prevItr); |
| 555 | } |
| 556 | |
| 557 | // Get a number for the new instr, or 0 if there's no room currently. |
| 558 | // In the latter case we'll force a renumber later. |
| 559 | unsigned dist = ((nextItr->getIndex() - prevItr->getIndex())/2) & ~3u; |
| 560 | unsigned newNumber = prevItr->getIndex() + dist; |
| 561 | |
| 562 | // Insert a new list entry for MI. |
| 563 | IndexList::iterator newItr = |
| 564 | indexList.insert(nextItr, createEntry(&MI, newNumber)); |
| 565 | |
| 566 | // Renumber locally if we need to. |
| 567 | if (dist == 0) |
| 568 | renumberIndexes(newItr); |
| 569 | |
| 570 | SlotIndex newIndex(&*newItr, SlotIndex::Slot_Block); |
| 571 | mi2iMap.insert(std::make_pair(&MI, newIndex)); |
| 572 | return newIndex; |
| 573 | } |
| 574 | |
| 575 | /// Removes machine instruction (bundle) \p MI from the mapping. |
| 576 | /// This should be called before MachineInstr::eraseFromParent() is used to |
| 577 | /// remove a whole bundle or an unbundled instruction. |
| 578 | /// If \p AllowBundled is set then this can be used on a bundled |
| 579 | /// instruction; however, this exists to support handleMoveIntoBundle, |
| 580 | /// and in general removeSingleMachineInstrFromMaps should be used instead. |
| 581 | void removeMachineInstrFromMaps(MachineInstr &MI, |
| 582 | bool AllowBundled = false); |
| 583 | |
| 584 | /// Removes a single machine instruction \p MI from the mapping. |
| 585 | /// This should be called before MachineInstr::eraseFromBundle() is used to |
| 586 | /// remove a single instruction (out of a bundle). |
| 587 | void removeSingleMachineInstrFromMaps(MachineInstr &MI); |
| 588 | |
| 589 | /// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in |
| 590 | /// maps used by register allocator. \returns the index where the new |
| 591 | /// instruction was inserted. |
| 592 | SlotIndex replaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) { |
| 593 | Mi2IndexMap::iterator mi2iItr = mi2iMap.find(&MI); |
| 594 | if (mi2iItr == mi2iMap.end()) |
| 595 | return SlotIndex(); |
| 596 | SlotIndex replaceBaseIndex = mi2iItr->second; |
| 597 | IndexListEntry *miEntry(replaceBaseIndex.listEntry()); |
| 598 | assert(miEntry->getInstr() == &MI &&((void)0) |
| 599 | "Mismatched instruction in index tables.")((void)0); |
| 600 | miEntry->setInstr(&NewMI); |
| 601 | mi2iMap.erase(mi2iItr); |
| 602 | mi2iMap.insert(std::make_pair(&NewMI, replaceBaseIndex)); |
| 603 | return replaceBaseIndex; |
| 604 | } |
| 605 | |
| 606 | /// Add the given MachineBasicBlock into the maps. |
| 607 | /// If it contains any instructions then they must already be in the maps. |
| 608 | /// This is used after a block has been split by moving some suffix of its |
| 609 | /// instructions into a newly created block. |
| 610 | void insertMBBInMaps(MachineBasicBlock *mbb) { |
| 611 | assert(mbb != &mbb->getParent()->front() &&((void)0) |
| 612 | "Can't insert a new block at the beginning of a function.")((void)0); |
| 613 | auto prevMBB = std::prev(MachineFunction::iterator(mbb)); |
| 614 | |
| 615 | // Create a new entry to be used for the start of mbb and the end of |
| 616 | // prevMBB. |
| 617 | IndexListEntry *startEntry = createEntry(nullptr, 0); |
| 618 | IndexListEntry *endEntry = getMBBEndIdx(&*prevMBB).listEntry(); |
| 619 | IndexListEntry *insEntry = |
| 620 | mbb->empty() ? endEntry |
| 621 | : getInstructionIndex(mbb->front()).listEntry(); |
| 622 | IndexList::iterator newItr = |
| 623 | indexList.insert(insEntry->getIterator(), startEntry); |
| 624 | |
| 625 | SlotIndex startIdx(startEntry, SlotIndex::Slot_Block); |
| 626 | SlotIndex endIdx(endEntry, SlotIndex::Slot_Block); |
| 627 | |
| 628 | MBBRanges[prevMBB->getNumber()].second = startIdx; |
| 629 | |
| 630 | assert(unsigned(mbb->getNumber()) == MBBRanges.size() &&((void)0) |
| 631 | "Blocks must be added in order")((void)0); |
| 632 | MBBRanges.push_back(std::make_pair(startIdx, endIdx)); |
| 633 | idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb)); |
| 634 | |
| 635 | renumberIndexes(newItr); |
| 636 | llvm::sort(idx2MBBMap, less_first()); |
| 637 | } |
| 638 | }; |
| 639 | |
| 640 | // Specialize IntervalMapInfo for half-open slot index intervals. |
| 641 | template <> |
| 642 | struct IntervalMapInfo<SlotIndex> : IntervalMapHalfOpenInfo<SlotIndex> { |
| 643 | }; |
| 644 | |
| 645 | } // end namespace llvm |
| 646 | |
| 647 | #endif // LLVM_CODEGEN_SLOTINDEXES_H |
| 1 | //===-- llvm/CodeGen/Register.h ---------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #ifndef LLVM_CODEGEN_REGISTER_H |
| 10 | #define LLVM_CODEGEN_REGISTER_H |
| 11 | |
| 12 | #include "llvm/MC/MCRegister.h" |
| 13 | #include <cassert> |
| 14 | |
| 15 | namespace llvm { |
| 16 | |
| 17 | /// Wrapper class representing virtual and physical registers. Should be passed |
| 18 | /// by value. |
| 19 | class Register { |
| 20 | unsigned Reg; |
| 21 | |
| 22 | public: |
| 23 | constexpr Register(unsigned Val = 0): Reg(Val) {} |
| 24 | constexpr Register(MCRegister Val): Reg(Val) {} |
| 25 | |
| 26 | // Register numbers can represent physical registers, virtual registers, and |
| 27 | // sometimes stack slots. The unsigned values are divided into these ranges: |
| 28 | // |
| 29 | // 0 Not a register, can be used as a sentinel. |
| 30 | // [1;2^30) Physical registers assigned by TableGen. |
| 31 | // [2^30;2^31) Stack slots. (Rarely used.) |
| 32 | // [2^31;2^32) Virtual registers assigned by MachineRegisterInfo. |
| 33 | // |
| 34 | // Further sentinels can be allocated from the small negative integers. |
| 35 | // DenseMapInfo<unsigned> uses -1u and -2u. |
| 36 | static_assert(std::numeric_limits<decltype(Reg)>::max() >= 0xFFFFFFFF, |
| 37 | "Reg isn't large enough to hold full range."); |
| 38 | |
| 39 | /// isStackSlot - Sometimes it is useful the be able to store a non-negative |
| 40 | /// frame index in a variable that normally holds a register. isStackSlot() |
| 41 | /// returns true if Reg is in the range used for stack slots. |
| 42 | /// |
| 43 | /// FIXME: remove in favor of member. |
| 44 | static bool isStackSlot(unsigned Reg) { |
| 45 | return MCRegister::isStackSlot(Reg); |
| 46 | } |
| 47 | |
| 48 | /// Return true if this is a stack slot. |
| 49 | bool isStack() const { return MCRegister::isStackSlot(Reg); } |
| 50 | |
| 51 | /// Compute the frame index from a register value representing a stack slot. |
| 52 | static int stackSlot2Index(Register Reg) { |
| 53 | assert(Reg.isStack() && "Not a stack slot")((void)0); |
| 54 | return int(Reg - MCRegister::FirstStackSlot); |
| 55 | } |
| 56 | |
| 57 | /// Convert a non-negative frame index to a stack slot register value. |
| 58 | static Register index2StackSlot(int FI) { |
| 59 | assert(FI >= 0 && "Cannot hold a negative frame index.")((void)0); |
| 60 | return Register(FI + MCRegister::FirstStackSlot); |
| 61 | } |
| 62 | |
| 63 | /// Return true if the specified register number is in |
| 64 | /// the physical register namespace. |
| 65 | static bool isPhysicalRegister(unsigned Reg) { |
| 66 | return MCRegister::isPhysicalRegister(Reg); |
| 67 | } |
| 68 | |
| 69 | /// Return true if the specified register number is in |
| 70 | /// the virtual register namespace. |
| 71 | static bool isVirtualRegister(unsigned Reg) { |
| 72 | return Reg & MCRegister::VirtualRegFlag && !isStackSlot(Reg); |
| 73 | } |
| 74 | |
| 75 | /// Convert a virtual register number to a 0-based index. |
| 76 | /// The first virtual register in a function will get the index 0. |
| 77 | static unsigned virtReg2Index(Register Reg) { |
| 78 | assert(isVirtualRegister(Reg) && "Not a virtual register")((void)0); |
| 79 | return Reg & ~MCRegister::VirtualRegFlag; |
| 80 | } |
| 81 | |
| 82 | /// Convert a 0-based index to a virtual register number. |
| 83 | /// This is the inverse operation of VirtReg2IndexFunctor below. |
| 84 | static Register index2VirtReg(unsigned Index) { |
| 85 | assert(Index < (1u << 31) && "Index too large for virtual register range.")((void)0); |
| 86 | return Index | MCRegister::VirtualRegFlag; |
| 87 | } |
| 88 | |
| 89 | /// Return true if the specified register number is in the virtual register |
| 90 | /// namespace. |
| 91 | bool isVirtual() const { |
| 92 | return isVirtualRegister(Reg); |
| 93 | } |
| 94 | |
| 95 | /// Return true if the specified register number is in the physical register |
| 96 | /// namespace. |
| 97 | bool isPhysical() const { |
| 98 | return isPhysicalRegister(Reg); |
| 99 | } |
| 100 | |
| 101 | /// Convert a virtual register number to a 0-based index. The first virtual |
| 102 | /// register in a function will get the index 0. |
| 103 | unsigned virtRegIndex() const { |
| 104 | return virtReg2Index(Reg); |
| 105 | } |
| 106 | |
| 107 | constexpr operator unsigned() const { |
| 108 | return Reg; |
| 109 | } |
| 110 | |
| 111 | unsigned id() const { return Reg; } |
| 112 | |
| 113 | operator MCRegister() const { |
| 114 | return MCRegister(Reg); |
| 115 | } |
| 116 | |
| 117 | /// Utility to check-convert this value to a MCRegister. The caller is |
| 118 | /// expected to have already validated that this Register is, indeed, |
| 119 | /// physical. |
| 120 | MCRegister asMCReg() const { |
| 121 | assert(Reg == MCRegister::NoRegister ||((void)0) |
| 122 | MCRegister::isPhysicalRegister(Reg))((void)0); |
| 123 | return MCRegister(Reg); |
| 124 | } |
| 125 | |
| 126 | bool isValid() const { return Reg != MCRegister::NoRegister; } |
| 127 | |
| 128 | /// Comparisons between register objects |
| 129 | bool operator==(const Register &Other) const { return Reg == Other.Reg; } |
| 130 | bool operator!=(const Register &Other) const { return Reg != Other.Reg; } |
| 131 | bool operator==(const MCRegister &Other) const { return Reg == Other.id(); } |
| 132 | bool operator!=(const MCRegister &Other) const { return Reg != Other.id(); } |
| 133 | |
| 134 | /// Comparisons against register constants. E.g. |
| 135 | /// * R == AArch64::WZR |
| 136 | /// * R == 0 |
| 137 | /// * R == VirtRegMap::NO_PHYS_REG |
| 138 | bool operator==(unsigned Other) const { return Reg == Other; } |
| 139 | bool operator!=(unsigned Other) const { return Reg != Other; } |
| 140 | bool operator==(int Other) const { return Reg == unsigned(Other); } |
| 141 | bool operator!=(int Other) const { return Reg != unsigned(Other); } |
| 142 | // MSVC requires that we explicitly declare these two as well. |
| 143 | bool operator==(MCPhysReg Other) const { return Reg == unsigned(Other); } |
| 144 | bool operator!=(MCPhysReg Other) const { return Reg != unsigned(Other); } |
| 145 | }; |
| 146 | |
| 147 | // Provide DenseMapInfo for Register |
| 148 | template<> struct DenseMapInfo<Register> { |
| 149 | static inline unsigned getEmptyKey() { |
| 150 | return DenseMapInfo<unsigned>::getEmptyKey(); |
| 151 | } |
| 152 | static inline unsigned getTombstoneKey() { |
| 153 | return DenseMapInfo<unsigned>::getTombstoneKey(); |
| 154 | } |
| 155 | static unsigned getHashValue(const Register &Val) { |
| 156 | return DenseMapInfo<unsigned>::getHashValue(Val.id()); |
| 157 | } |
| 158 | static bool isEqual(const Register &LHS, const Register &RHS) { |
| 159 | return DenseMapInfo<unsigned>::isEqual(LHS.id(), RHS.id()); |
| 160 | } |
| 161 | }; |
| 162 | |
| 163 | } |
| 164 | |
| 165 | #endif // LLVM_CODEGEN_REGISTER_H |