| File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp |
| Warning: | line 738, column 24 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===// | ||||
| 2 | // | ||||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
| 6 | // | ||||
| 7 | //===----------------------------------------------------------------------===// | ||||
| 8 | // | ||||
| 9 | // The code below implements dead store elimination using MemorySSA. It uses | ||||
| 10 | // the following general approach: given a MemoryDef, walk upwards to find | ||||
| 11 | // clobbering MemoryDefs that may be killed by the starting def. Then check | ||||
| 12 | // that there are no uses that may read the location of the original MemoryDef | ||||
| 13 | // in between both MemoryDefs. A bit more concretely: | ||||
| 14 | // | ||||
| 15 | // For all MemoryDefs StartDef: | ||||
| 16 | // 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking | ||||
| 17 | // upwards. | ||||
| 18 | // 2. Check that there are no reads between EarlierAccess and the StartDef by | ||||
| 19 | // checking all uses starting at EarlierAccess and walking until we see | ||||
| 20 | // StartDef. | ||||
| 21 | // 3. For each found CurrentDef, check that: | ||||
| 22 | // 1. There are no barrier instructions between CurrentDef and StartDef (like | ||||
| 23 | // throws or stores with ordering constraints). | ||||
| 24 | // 2. StartDef is executed whenever CurrentDef is executed. | ||||
| 25 | // 3. StartDef completely overwrites CurrentDef. | ||||
| 26 | // 4. Erase CurrentDef from the function and MemorySSA. | ||||
| 27 | // | ||||
| 28 | //===----------------------------------------------------------------------===// | ||||
| 29 | |||||
| 30 | #include "llvm/Transforms/Scalar/DeadStoreElimination.h" | ||||
| 31 | #include "llvm/ADT/APInt.h" | ||||
| 32 | #include "llvm/ADT/DenseMap.h" | ||||
| 33 | #include "llvm/ADT/MapVector.h" | ||||
| 34 | #include "llvm/ADT/PostOrderIterator.h" | ||||
| 35 | #include "llvm/ADT/SetVector.h" | ||||
| 36 | #include "llvm/ADT/SmallPtrSet.h" | ||||
| 37 | #include "llvm/ADT/SmallVector.h" | ||||
| 38 | #include "llvm/ADT/Statistic.h" | ||||
| 39 | #include "llvm/ADT/StringRef.h" | ||||
| 40 | #include "llvm/Analysis/AliasAnalysis.h" | ||||
| 41 | #include "llvm/Analysis/CaptureTracking.h" | ||||
| 42 | #include "llvm/Analysis/GlobalsModRef.h" | ||||
| 43 | #include "llvm/Analysis/LoopInfo.h" | ||||
| 44 | #include "llvm/Analysis/MemoryBuiltins.h" | ||||
| 45 | #include "llvm/Analysis/MemoryLocation.h" | ||||
| 46 | #include "llvm/Analysis/MemorySSA.h" | ||||
| 47 | #include "llvm/Analysis/MemorySSAUpdater.h" | ||||
| 48 | #include "llvm/Analysis/MustExecute.h" | ||||
| 49 | #include "llvm/Analysis/PostDominators.h" | ||||
| 50 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||
| 51 | #include "llvm/Analysis/ValueTracking.h" | ||||
| 52 | #include "llvm/IR/Argument.h" | ||||
| 53 | #include "llvm/IR/BasicBlock.h" | ||||
| 54 | #include "llvm/IR/Constant.h" | ||||
| 55 | #include "llvm/IR/Constants.h" | ||||
| 56 | #include "llvm/IR/DataLayout.h" | ||||
| 57 | #include "llvm/IR/Dominators.h" | ||||
| 58 | #include "llvm/IR/Function.h" | ||||
| 59 | #include "llvm/IR/InstIterator.h" | ||||
| 60 | #include "llvm/IR/InstrTypes.h" | ||||
| 61 | #include "llvm/IR/Instruction.h" | ||||
| 62 | #include "llvm/IR/Instructions.h" | ||||
| 63 | #include "llvm/IR/IntrinsicInst.h" | ||||
| 64 | #include "llvm/IR/Intrinsics.h" | ||||
| 65 | #include "llvm/IR/LLVMContext.h" | ||||
| 66 | #include "llvm/IR/Module.h" | ||||
| 67 | #include "llvm/IR/PassManager.h" | ||||
| 68 | #include "llvm/IR/PatternMatch.h" | ||||
| 69 | #include "llvm/IR/Value.h" | ||||
| 70 | #include "llvm/InitializePasses.h" | ||||
| 71 | #include "llvm/Pass.h" | ||||
| 72 | #include "llvm/Support/Casting.h" | ||||
| 73 | #include "llvm/Support/CommandLine.h" | ||||
| 74 | #include "llvm/Support/Debug.h" | ||||
| 75 | #include "llvm/Support/DebugCounter.h" | ||||
| 76 | #include "llvm/Support/ErrorHandling.h" | ||||
| 77 | #include "llvm/Support/MathExtras.h" | ||||
| 78 | #include "llvm/Support/raw_ostream.h" | ||||
| 79 | #include "llvm/Transforms/Scalar.h" | ||||
| 80 | #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" | ||||
| 81 | #include "llvm/Transforms/Utils/Local.h" | ||||
| 82 | #include <algorithm> | ||||
| 83 | #include <cassert> | ||||
| 84 | #include <cstddef> | ||||
| 85 | #include <cstdint> | ||||
| 86 | #include <iterator> | ||||
| 87 | #include <map> | ||||
| 88 | #include <utility> | ||||
| 89 | |||||
| 90 | using namespace llvm; | ||||
| 91 | using namespace PatternMatch; | ||||
| 92 | |||||
| 93 | #define DEBUG_TYPE"dse" "dse" | ||||
| 94 | |||||
| 95 | STATISTIC(NumRemainingStores, "Number of stores remaining after DSE")static llvm::Statistic NumRemainingStores = {"dse", "NumRemainingStores" , "Number of stores remaining after DSE"}; | ||||
| 96 | STATISTIC(NumRedundantStores, "Number of redundant stores deleted")static llvm::Statistic NumRedundantStores = {"dse", "NumRedundantStores" , "Number of redundant stores deleted"}; | ||||
| 97 | STATISTIC(NumFastStores, "Number of stores deleted")static llvm::Statistic NumFastStores = {"dse", "NumFastStores" , "Number of stores deleted"}; | ||||
| 98 | STATISTIC(NumFastOther, "Number of other instrs removed")static llvm::Statistic NumFastOther = {"dse", "NumFastOther", "Number of other instrs removed"}; | ||||
| 99 | STATISTIC(NumCompletePartials, "Number of stores dead by later partials")static llvm::Statistic NumCompletePartials = {"dse", "NumCompletePartials" , "Number of stores dead by later partials"}; | ||||
| 100 | STATISTIC(NumModifiedStores, "Number of stores modified")static llvm::Statistic NumModifiedStores = {"dse", "NumModifiedStores" , "Number of stores modified"}; | ||||
| 101 | STATISTIC(NumCFGChecks, "Number of stores modified")static llvm::Statistic NumCFGChecks = {"dse", "NumCFGChecks", "Number of stores modified"}; | ||||
| 102 | STATISTIC(NumCFGTries, "Number of stores modified")static llvm::Statistic NumCFGTries = {"dse", "NumCFGTries", "Number of stores modified" }; | ||||
| 103 | STATISTIC(NumCFGSuccess, "Number of stores modified")static llvm::Statistic NumCFGSuccess = {"dse", "NumCFGSuccess" , "Number of stores modified"}; | ||||
| 104 | STATISTIC(NumGetDomMemoryDefPassed,static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed" , "Number of times a valid candidate is returned from getDomMemoryDef" } | ||||
| 105 | "Number of times a valid candidate is returned from getDomMemoryDef")static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed" , "Number of times a valid candidate is returned from getDomMemoryDef" }; | ||||
| 106 | STATISTIC(NumDomMemDefChecks,static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks" , "Number iterations check for reads in getDomMemoryDef"} | ||||
| 107 | "Number iterations check for reads in getDomMemoryDef")static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks" , "Number iterations check for reads in getDomMemoryDef"}; | ||||
| 108 | |||||
| 109 | DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",static const unsigned MemorySSACounter = DebugCounter::registerCounter ("dse-memoryssa", "Controls which MemoryDefs are eliminated." ) | ||||
| 110 | "Controls which MemoryDefs are eliminated.")static const unsigned MemorySSACounter = DebugCounter::registerCounter ("dse-memoryssa", "Controls which MemoryDefs are eliminated." ); | ||||
| 111 | |||||
| 112 | static cl::opt<bool> | ||||
| 113 | EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", | ||||
| 114 | cl::init(true), cl::Hidden, | ||||
| 115 | cl::desc("Enable partial-overwrite tracking in DSE")); | ||||
| 116 | |||||
| 117 | static cl::opt<bool> | ||||
| 118 | EnablePartialStoreMerging("enable-dse-partial-store-merging", | ||||
| 119 | cl::init(true), cl::Hidden, | ||||
| 120 | cl::desc("Enable partial store merging in DSE")); | ||||
| 121 | |||||
| 122 | static cl::opt<unsigned> | ||||
| 123 | MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, | ||||
| 124 | cl::desc("The number of memory instructions to scan for " | ||||
| 125 | "dead store elimination (default = 100)")); | ||||
| 126 | static cl::opt<unsigned> MemorySSAUpwardsStepLimit( | ||||
| 127 | "dse-memoryssa-walklimit", cl::init(90), cl::Hidden, | ||||
| 128 | cl::desc("The maximum number of steps while walking upwards to find " | ||||
| 129 | "MemoryDefs that may be killed (default = 90)")); | ||||
| 130 | |||||
| 131 | static cl::opt<unsigned> MemorySSAPartialStoreLimit( | ||||
| 132 | "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, | ||||
| 133 | cl::desc("The maximum number candidates that only partially overwrite the " | ||||
| 134 | "killing MemoryDef to consider" | ||||
| 135 | " (default = 5)")); | ||||
| 136 | |||||
| 137 | static cl::opt<unsigned> MemorySSADefsPerBlockLimit( | ||||
| 138 | "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, | ||||
| 139 | cl::desc("The number of MemoryDefs we consider as candidates to eliminated " | ||||
| 140 | "other stores per basic block (default = 5000)")); | ||||
| 141 | |||||
| 142 | static cl::opt<unsigned> MemorySSASameBBStepCost( | ||||
| 143 | "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, | ||||
| 144 | cl::desc( | ||||
| 145 | "The cost of a step in the same basic block as the killing MemoryDef" | ||||
| 146 | "(default = 1)")); | ||||
| 147 | |||||
| 148 | static cl::opt<unsigned> | ||||
| 149 | MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), | ||||
| 150 | cl::Hidden, | ||||
| 151 | cl::desc("The cost of a step in a different basic " | ||||
| 152 | "block than the killing MemoryDef" | ||||
| 153 | "(default = 5)")); | ||||
| 154 | |||||
| 155 | static cl::opt<unsigned> MemorySSAPathCheckLimit( | ||||
| 156 | "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, | ||||
| 157 | cl::desc("The maximum number of blocks to check when trying to prove that " | ||||
| 158 | "all paths to an exit go through a killing block (default = 50)")); | ||||
| 159 | |||||
| 160 | //===----------------------------------------------------------------------===// | ||||
| 161 | // Helper functions | ||||
| 162 | //===----------------------------------------------------------------------===// | ||||
| 163 | using OverlapIntervalsTy = std::map<int64_t, int64_t>; | ||||
| 164 | using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>; | ||||
| 165 | |||||
| 166 | /// Does this instruction write some memory? This only returns true for things | ||||
| 167 | /// that we can analyze with other helpers below. | ||||
| 168 | static bool hasAnalyzableMemoryWrite(Instruction *I, | ||||
| 169 | const TargetLibraryInfo &TLI) { | ||||
| 170 | if (isa<StoreInst>(I)) | ||||
| 171 | return true; | ||||
| 172 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | ||||
| 173 | switch (II->getIntrinsicID()) { | ||||
| 174 | default: | ||||
| 175 | return false; | ||||
| 176 | case Intrinsic::memset: | ||||
| 177 | case Intrinsic::memmove: | ||||
| 178 | case Intrinsic::memcpy: | ||||
| 179 | case Intrinsic::memcpy_inline: | ||||
| 180 | case Intrinsic::memcpy_element_unordered_atomic: | ||||
| 181 | case Intrinsic::memmove_element_unordered_atomic: | ||||
| 182 | case Intrinsic::memset_element_unordered_atomic: | ||||
| 183 | case Intrinsic::init_trampoline: | ||||
| 184 | case Intrinsic::lifetime_end: | ||||
| 185 | case Intrinsic::masked_store: | ||||
| 186 | return true; | ||||
| 187 | } | ||||
| 188 | } | ||||
| 189 | if (auto *CB = dyn_cast<CallBase>(I)) { | ||||
| 190 | LibFunc LF; | ||||
| 191 | if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) { | ||||
| 192 | switch (LF) { | ||||
| 193 | case LibFunc_strcpy: | ||||
| 194 | case LibFunc_strncpy: | ||||
| 195 | case LibFunc_strcat: | ||||
| 196 | case LibFunc_strncat: | ||||
| 197 | return true; | ||||
| 198 | default: | ||||
| 199 | return false; | ||||
| 200 | } | ||||
| 201 | } | ||||
| 202 | } | ||||
| 203 | return false; | ||||
| 204 | } | ||||
| 205 | |||||
| 206 | /// Return a Location stored to by the specified instruction. If isRemovable | ||||
| 207 | /// returns true, this function and getLocForRead completely describe the memory | ||||
| 208 | /// operations for this instruction. | ||||
| 209 | static MemoryLocation getLocForWrite(Instruction *Inst, | ||||
| 210 | const TargetLibraryInfo &TLI) { | ||||
| 211 | if (StoreInst *SI
| ||||
| 212 | return MemoryLocation::get(SI); | ||||
| 213 | |||||
| 214 | // memcpy/memmove/memset. | ||||
| 215 | if (auto *MI
| ||||
| 216 | return MemoryLocation::getForDest(MI); | ||||
| 217 | |||||
| 218 | if (IntrinsicInst *II
| ||||
| 219 | switch (II->getIntrinsicID()) { | ||||
| 220 | default: | ||||
| 221 | return MemoryLocation(); // Unhandled intrinsic. | ||||
| 222 | case Intrinsic::init_trampoline: | ||||
| 223 | return MemoryLocation::getAfter(II->getArgOperand(0)); | ||||
| 224 | case Intrinsic::masked_store: | ||||
| 225 | return MemoryLocation::getForArgument(II, 1, TLI); | ||||
| 226 | case Intrinsic::lifetime_end: { | ||||
| 227 | uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); | ||||
| 228 | return MemoryLocation(II->getArgOperand(1), Len); | ||||
| 229 | } | ||||
| 230 | } | ||||
| 231 | } | ||||
| 232 | if (auto *CB
| ||||
| 233 | // All the supported TLI functions so far happen to have dest as their | ||||
| 234 | // first argument. | ||||
| 235 | return MemoryLocation::getAfter(CB->getArgOperand(0)); | ||||
| 236 | return MemoryLocation(); | ||||
| 237 | } | ||||
| 238 | |||||
| 239 | /// If the value of this instruction and the memory it writes to is unused, may | ||||
| 240 | /// we delete this instruction? | ||||
| 241 | static bool isRemovable(Instruction *I) { | ||||
| 242 | // Don't remove volatile/atomic stores. | ||||
| 243 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) | ||||
| 244 | return SI->isUnordered(); | ||||
| 245 | |||||
| 246 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | ||||
| 247 | switch (II->getIntrinsicID()) { | ||||
| 248 | default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate")__builtin_unreachable(); | ||||
| 249 | case Intrinsic::lifetime_end: | ||||
| 250 | // Never remove dead lifetime_end's, e.g. because it is followed by a | ||||
| 251 | // free. | ||||
| 252 | return false; | ||||
| 253 | case Intrinsic::init_trampoline: | ||||
| 254 | // Always safe to remove init_trampoline. | ||||
| 255 | return true; | ||||
| 256 | case Intrinsic::memset: | ||||
| 257 | case Intrinsic::memmove: | ||||
| 258 | case Intrinsic::memcpy: | ||||
| 259 | case Intrinsic::memcpy_inline: | ||||
| 260 | // Don't remove volatile memory intrinsics. | ||||
| 261 | return !cast<MemIntrinsic>(II)->isVolatile(); | ||||
| 262 | case Intrinsic::memcpy_element_unordered_atomic: | ||||
| 263 | case Intrinsic::memmove_element_unordered_atomic: | ||||
| 264 | case Intrinsic::memset_element_unordered_atomic: | ||||
| 265 | case Intrinsic::masked_store: | ||||
| 266 | return true; | ||||
| 267 | } | ||||
| 268 | } | ||||
| 269 | |||||
| 270 | // note: only get here for calls with analyzable writes - i.e. libcalls | ||||
| 271 | if (auto *CB = dyn_cast<CallBase>(I)) | ||||
| 272 | return CB->use_empty(); | ||||
| 273 | |||||
| 274 | return false; | ||||
| 275 | } | ||||
| 276 | |||||
| 277 | /// Returns true if the end of this instruction can be safely shortened in | ||||
| 278 | /// length. | ||||
| 279 | static bool isShortenableAtTheEnd(Instruction *I) { | ||||
| 280 | // Don't shorten stores for now | ||||
| 281 | if (isa<StoreInst>(I)) | ||||
| 282 | return false; | ||||
| 283 | |||||
| 284 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | ||||
| 285 | switch (II->getIntrinsicID()) { | ||||
| 286 | default: return false; | ||||
| 287 | case Intrinsic::memset: | ||||
| 288 | case Intrinsic::memcpy: | ||||
| 289 | case Intrinsic::memcpy_element_unordered_atomic: | ||||
| 290 | case Intrinsic::memset_element_unordered_atomic: | ||||
| 291 | // Do shorten memory intrinsics. | ||||
| 292 | // FIXME: Add memmove if it's also safe to transform. | ||||
| 293 | return true; | ||||
| 294 | } | ||||
| 295 | } | ||||
| 296 | |||||
| 297 | // Don't shorten libcalls calls for now. | ||||
| 298 | |||||
| 299 | return false; | ||||
| 300 | } | ||||
| 301 | |||||
| 302 | /// Returns true if the beginning of this instruction can be safely shortened | ||||
| 303 | /// in length. | ||||
| 304 | static bool isShortenableAtTheBeginning(Instruction *I) { | ||||
| 305 | // FIXME: Handle only memset for now. Supporting memcpy/memmove should be | ||||
| 306 | // easily done by offsetting the source address. | ||||
| 307 | return isa<AnyMemSetInst>(I); | ||||
| 308 | } | ||||
| 309 | |||||
| 310 | static uint64_t getPointerSize(const Value *V, const DataLayout &DL, | ||||
| 311 | const TargetLibraryInfo &TLI, | ||||
| 312 | const Function *F) { | ||||
| 313 | uint64_t Size; | ||||
| 314 | ObjectSizeOpts Opts; | ||||
| 315 | Opts.NullIsUnknownSize = NullPointerIsDefined(F); | ||||
| 316 | |||||
| 317 | if (getObjectSize(V, Size, DL, &TLI, Opts)) | ||||
| 318 | return Size; | ||||
| 319 | return MemoryLocation::UnknownSize; | ||||
| 320 | } | ||||
| 321 | |||||
| 322 | namespace { | ||||
| 323 | |||||
| 324 | enum OverwriteResult { | ||||
| 325 | OW_Begin, | ||||
| 326 | OW_Complete, | ||||
| 327 | OW_End, | ||||
| 328 | OW_PartialEarlierWithFullLater, | ||||
| 329 | OW_MaybePartial, | ||||
| 330 | OW_Unknown | ||||
| 331 | }; | ||||
| 332 | |||||
| 333 | } // end anonymous namespace | ||||
| 334 | |||||
| 335 | /// Check if two instruction are masked stores that completely | ||||
| 336 | /// overwrite one another. More specifically, \p Later has to | ||||
| 337 | /// overwrite \p Earlier. | ||||
| 338 | static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later, | ||||
| 339 | const Instruction *Earlier, | ||||
| 340 | BatchAAResults &AA) { | ||||
| 341 | const auto *IIL = dyn_cast<IntrinsicInst>(Later); | ||||
| 342 | const auto *IIE = dyn_cast<IntrinsicInst>(Earlier); | ||||
| 343 | if (IIL == nullptr || IIE == nullptr) | ||||
| 344 | return OW_Unknown; | ||||
| 345 | if (IIL->getIntrinsicID() != Intrinsic::masked_store || | ||||
| 346 | IIE->getIntrinsicID() != Intrinsic::masked_store) | ||||
| 347 | return OW_Unknown; | ||||
| 348 | // Pointers. | ||||
| 349 | Value *LP = IIL->getArgOperand(1)->stripPointerCasts(); | ||||
| 350 | Value *EP = IIE->getArgOperand(1)->stripPointerCasts(); | ||||
| 351 | if (LP != EP && !AA.isMustAlias(LP, EP)) | ||||
| 352 | return OW_Unknown; | ||||
| 353 | // Masks. | ||||
| 354 | // TODO: check that Later's mask is a superset of the Earlier's mask. | ||||
| 355 | if (IIL->getArgOperand(3) != IIE->getArgOperand(3)) | ||||
| 356 | return OW_Unknown; | ||||
| 357 | return OW_Complete; | ||||
| 358 | } | ||||
| 359 | |||||
| 360 | /// Return 'OW_Complete' if a store to the 'Later' location completely | ||||
| 361 | /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the | ||||
| 362 | /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the | ||||
| 363 | /// beginning of the 'Earlier' location is overwritten by 'Later'. | ||||
| 364 | /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was | ||||
| 365 | /// overwritten by a latter (smaller) store which doesn't write outside the big | ||||
| 366 | /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined. | ||||
| 367 | /// NOTE: This function must only be called if both \p Later and \p Earlier | ||||
| 368 | /// write to the same underlying object with valid \p EarlierOff and \p | ||||
| 369 | /// LaterOff. | ||||
| 370 | static OverwriteResult isPartialOverwrite(const MemoryLocation &Later, | ||||
| 371 | const MemoryLocation &Earlier, | ||||
| 372 | int64_t EarlierOff, int64_t LaterOff, | ||||
| 373 | Instruction *DepWrite, | ||||
| 374 | InstOverlapIntervalsTy &IOL) { | ||||
| 375 | const uint64_t LaterSize = Later.Size.getValue(); | ||||
| 376 | const uint64_t EarlierSize = Earlier.Size.getValue(); | ||||
| 377 | // We may now overlap, although the overlap is not complete. There might also | ||||
| 378 | // be other incomplete overlaps, and together, they might cover the complete | ||||
| 379 | // earlier write. | ||||
| 380 | // Note: The correctness of this logic depends on the fact that this function | ||||
| 381 | // is not even called providing DepWrite when there are any intervening reads. | ||||
| 382 | if (EnablePartialOverwriteTracking && | ||||
| 383 | LaterOff < int64_t(EarlierOff + EarlierSize) && | ||||
| 384 | int64_t(LaterOff + LaterSize) >= EarlierOff) { | ||||
| 385 | |||||
| 386 | // Insert our part of the overlap into the map. | ||||
| 387 | auto &IM = IOL[DepWrite]; | ||||
| 388 | LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOffdo { } while (false) | ||||
| 389 | << ", " << int64_t(EarlierOff + EarlierSize)do { } while (false) | ||||
| 390 | << ") Later [" << LaterOff << ", "do { } while (false) | ||||
| 391 | << int64_t(LaterOff + LaterSize) << ")\n")do { } while (false); | ||||
| 392 | |||||
| 393 | // Make sure that we only insert non-overlapping intervals and combine | ||||
| 394 | // adjacent intervals. The intervals are stored in the map with the ending | ||||
| 395 | // offset as the key (in the half-open sense) and the starting offset as | ||||
| 396 | // the value. | ||||
| 397 | int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize; | ||||
| 398 | |||||
| 399 | // Find any intervals ending at, or after, LaterIntStart which start | ||||
| 400 | // before LaterIntEnd. | ||||
| 401 | auto ILI = IM.lower_bound(LaterIntStart); | ||||
| 402 | if (ILI != IM.end() && ILI->second <= LaterIntEnd) { | ||||
| 403 | // This existing interval is overlapped with the current store somewhere | ||||
| 404 | // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing | ||||
| 405 | // intervals and adjusting our start and end. | ||||
| 406 | LaterIntStart = std::min(LaterIntStart, ILI->second); | ||||
| 407 | LaterIntEnd = std::max(LaterIntEnd, ILI->first); | ||||
| 408 | ILI = IM.erase(ILI); | ||||
| 409 | |||||
| 410 | // Continue erasing and adjusting our end in case other previous | ||||
| 411 | // intervals are also overlapped with the current store. | ||||
| 412 | // | ||||
| 413 | // |--- ealier 1 ---| |--- ealier 2 ---| | ||||
| 414 | // |------- later---------| | ||||
| 415 | // | ||||
| 416 | while (ILI != IM.end() && ILI->second <= LaterIntEnd) { | ||||
| 417 | assert(ILI->second > LaterIntStart && "Unexpected interval")((void)0); | ||||
| 418 | LaterIntEnd = std::max(LaterIntEnd, ILI->first); | ||||
| 419 | ILI = IM.erase(ILI); | ||||
| 420 | } | ||||
| 421 | } | ||||
| 422 | |||||
| 423 | IM[LaterIntEnd] = LaterIntStart; | ||||
| 424 | |||||
| 425 | ILI = IM.begin(); | ||||
| 426 | if (ILI->second <= EarlierOff && | ||||
| 427 | ILI->first >= int64_t(EarlierOff + EarlierSize)) { | ||||
| 428 | LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["do { } while (false) | ||||
| 429 | << EarlierOff << ", "do { } while (false) | ||||
| 430 | << int64_t(EarlierOff + EarlierSize)do { } while (false) | ||||
| 431 | << ") Composite Later [" << ILI->second << ", "do { } while (false) | ||||
| 432 | << ILI->first << ")\n")do { } while (false); | ||||
| 433 | ++NumCompletePartials; | ||||
| 434 | return OW_Complete; | ||||
| 435 | } | ||||
| 436 | } | ||||
| 437 | |||||
| 438 | // Check for an earlier store which writes to all the memory locations that | ||||
| 439 | // the later store writes to. | ||||
| 440 | if (EnablePartialStoreMerging && LaterOff >= EarlierOff && | ||||
| 441 | int64_t(EarlierOff + EarlierSize) > LaterOff && | ||||
| 442 | uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) { | ||||
| 443 | LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["do { } while (false) | ||||
| 444 | << EarlierOff << ", "do { } while (false) | ||||
| 445 | << int64_t(EarlierOff + EarlierSize)do { } while (false) | ||||
| 446 | << ") by a later store [" << LaterOff << ", "do { } while (false) | ||||
| 447 | << int64_t(LaterOff + LaterSize) << ")\n")do { } while (false); | ||||
| 448 | // TODO: Maybe come up with a better name? | ||||
| 449 | return OW_PartialEarlierWithFullLater; | ||||
| 450 | } | ||||
| 451 | |||||
| 452 | // Another interesting case is if the later store overwrites the end of the | ||||
| 453 | // earlier store. | ||||
| 454 | // | ||||
| 455 | // |--earlier--| | ||||
| 456 | // |-- later --| | ||||
| 457 | // | ||||
| 458 | // In this case we may want to trim the size of earlier to avoid generating | ||||
| 459 | // writes to addresses which will definitely be overwritten later | ||||
| 460 | if (!EnablePartialOverwriteTracking && | ||||
| 461 | (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) && | ||||
| 462 | int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize))) | ||||
| 463 | return OW_End; | ||||
| 464 | |||||
| 465 | // Finally, we also need to check if the later store overwrites the beginning | ||||
| 466 | // of the earlier store. | ||||
| 467 | // | ||||
| 468 | // |--earlier--| | ||||
| 469 | // |-- later --| | ||||
| 470 | // | ||||
| 471 | // In this case we may want to move the destination address and trim the size | ||||
| 472 | // of earlier to avoid generating writes to addresses which will definitely | ||||
| 473 | // be overwritten later. | ||||
| 474 | if (!EnablePartialOverwriteTracking && | ||||
| 475 | (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) { | ||||
| 476 | assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&((void)0) | ||||
| 477 | "Expect to be handled as OW_Complete")((void)0); | ||||
| 478 | return OW_Begin; | ||||
| 479 | } | ||||
| 480 | // Otherwise, they don't completely overlap. | ||||
| 481 | return OW_Unknown; | ||||
| 482 | } | ||||
| 483 | |||||
| 484 | /// Returns true if the memory which is accessed by the second instruction is not | ||||
| 485 | /// modified between the first and the second instruction. | ||||
| 486 | /// Precondition: Second instruction must be dominated by the first | ||||
| 487 | /// instruction. | ||||
| 488 | static bool | ||||
| 489 | memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, | ||||
| 490 | BatchAAResults &AA, const DataLayout &DL, | ||||
| 491 | DominatorTree *DT) { | ||||
| 492 | // Do a backwards scan through the CFG from SecondI to FirstI. Look for | ||||
| 493 | // instructions which can modify the memory location accessed by SecondI. | ||||
| 494 | // | ||||
| 495 | // While doing the walk keep track of the address to check. It might be | ||||
| 496 | // different in different basic blocks due to PHI translation. | ||||
| 497 | using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>; | ||||
| 498 | SmallVector<BlockAddressPair, 16> WorkList; | ||||
| 499 | // Keep track of the address we visited each block with. Bail out if we | ||||
| 500 | // visit a block with different addresses. | ||||
| 501 | DenseMap<BasicBlock *, Value *> Visited; | ||||
| 502 | |||||
| 503 | BasicBlock::iterator FirstBBI(FirstI); | ||||
| 504 | ++FirstBBI; | ||||
| 505 | BasicBlock::iterator SecondBBI(SecondI); | ||||
| 506 | BasicBlock *FirstBB = FirstI->getParent(); | ||||
| 507 | BasicBlock *SecondBB = SecondI->getParent(); | ||||
| 508 | MemoryLocation MemLoc = MemoryLocation::get(SecondI); | ||||
| 509 | auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr); | ||||
| 510 | |||||
| 511 | // Start checking the SecondBB. | ||||
| 512 | WorkList.push_back( | ||||
| 513 | std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr))); | ||||
| 514 | bool isFirstBlock = true; | ||||
| 515 | |||||
| 516 | // Check all blocks going backward until we reach the FirstBB. | ||||
| 517 | while (!WorkList.empty()) { | ||||
| 518 | BlockAddressPair Current = WorkList.pop_back_val(); | ||||
| 519 | BasicBlock *B = Current.first; | ||||
| 520 | PHITransAddr &Addr = Current.second; | ||||
| 521 | Value *Ptr = Addr.getAddr(); | ||||
| 522 | |||||
| 523 | // Ignore instructions before FirstI if this is the FirstBB. | ||||
| 524 | BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin()); | ||||
| 525 | |||||
| 526 | BasicBlock::iterator EI; | ||||
| 527 | if (isFirstBlock) { | ||||
| 528 | // Ignore instructions after SecondI if this is the first visit of SecondBB. | ||||
| 529 | assert(B == SecondBB && "first block is not the store block")((void)0); | ||||
| 530 | EI = SecondBBI; | ||||
| 531 | isFirstBlock = false; | ||||
| 532 | } else { | ||||
| 533 | // It's not SecondBB or (in case of a loop) the second visit of SecondBB. | ||||
| 534 | // In this case we also have to look at instructions after SecondI. | ||||
| 535 | EI = B->end(); | ||||
| 536 | } | ||||
| 537 | for (; BI != EI; ++BI) { | ||||
| 538 | Instruction *I = &*BI; | ||||
| 539 | if (I->mayWriteToMemory() && I != SecondI) | ||||
| 540 | if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr)))) | ||||
| 541 | return false; | ||||
| 542 | } | ||||
| 543 | if (B != FirstBB) { | ||||
| 544 | assert(B != &FirstBB->getParent()->getEntryBlock() &&((void)0) | ||||
| 545 | "Should not hit the entry block because SI must be dominated by LI")((void)0); | ||||
| 546 | for (BasicBlock *Pred : predecessors(B)) { | ||||
| 547 | PHITransAddr PredAddr = Addr; | ||||
| 548 | if (PredAddr.NeedsPHITranslationFromBlock(B)) { | ||||
| 549 | if (!PredAddr.IsPotentiallyPHITranslatable()) | ||||
| 550 | return false; | ||||
| 551 | if (PredAddr.PHITranslateValue(B, Pred, DT, false)) | ||||
| 552 | return false; | ||||
| 553 | } | ||||
| 554 | Value *TranslatedPtr = PredAddr.getAddr(); | ||||
| 555 | auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr)); | ||||
| 556 | if (!Inserted.second) { | ||||
| 557 | // We already visited this block before. If it was with a different | ||||
| 558 | // address - bail out! | ||||
| 559 | if (TranslatedPtr != Inserted.first->second) | ||||
| 560 | return false; | ||||
| 561 | // ... otherwise just skip it. | ||||
| 562 | continue; | ||||
| 563 | } | ||||
| 564 | WorkList.push_back(std::make_pair(Pred, PredAddr)); | ||||
| 565 | } | ||||
| 566 | } | ||||
| 567 | } | ||||
| 568 | return true; | ||||
| 569 | } | ||||
| 570 | |||||
| 571 | static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart, | ||||
| 572 | uint64_t &EarlierSize, int64_t LaterStart, | ||||
| 573 | uint64_t LaterSize, bool IsOverwriteEnd) { | ||||
| 574 | auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite); | ||||
| 575 | Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne(); | ||||
| 576 | |||||
| 577 | // We assume that memet/memcpy operates in chunks of the "largest" native | ||||
| 578 | // type size and aligned on the same value. That means optimal start and size | ||||
| 579 | // of memset/memcpy should be modulo of preferred alignment of that type. That | ||||
| 580 | // is it there is no any sense in trying to reduce store size any further | ||||
| 581 | // since any "extra" stores comes for free anyway. | ||||
| 582 | // On the other hand, maximum alignment we can achieve is limited by alignment | ||||
| 583 | // of initial store. | ||||
| 584 | |||||
| 585 | // TODO: Limit maximum alignment by preferred (or abi?) alignment of the | ||||
| 586 | // "largest" native type. | ||||
| 587 | // Note: What is the proper way to get that value? | ||||
| 588 | // Should TargetTransformInfo::getRegisterBitWidth be used or anything else? | ||||
| 589 | // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign); | ||||
| 590 | |||||
| 591 | int64_t ToRemoveStart = 0; | ||||
| 592 | uint64_t ToRemoveSize = 0; | ||||
| 593 | // Compute start and size of the region to remove. Make sure 'PrefAlign' is | ||||
| 594 | // maintained on the remaining store. | ||||
| 595 | if (IsOverwriteEnd) { | ||||
| 596 | // Calculate required adjustment for 'LaterStart'in order to keep remaining | ||||
| 597 | // store size aligned on 'PerfAlign'. | ||||
| 598 | uint64_t Off = | ||||
| 599 | offsetToAlignment(uint64_t(LaterStart - EarlierStart), PrefAlign); | ||||
| 600 | ToRemoveStart = LaterStart + Off; | ||||
| 601 | if (EarlierSize <= uint64_t(ToRemoveStart - EarlierStart)) | ||||
| 602 | return false; | ||||
| 603 | ToRemoveSize = EarlierSize - uint64_t(ToRemoveStart - EarlierStart); | ||||
| 604 | } else { | ||||
| 605 | ToRemoveStart = EarlierStart; | ||||
| 606 | assert(LaterSize >= uint64_t(EarlierStart - LaterStart) &&((void)0) | ||||
| 607 | "Not overlapping accesses?")((void)0); | ||||
| 608 | ToRemoveSize = LaterSize - uint64_t(EarlierStart - LaterStart); | ||||
| 609 | // Calculate required adjustment for 'ToRemoveSize'in order to keep | ||||
| 610 | // start of the remaining store aligned on 'PerfAlign'. | ||||
| 611 | uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign); | ||||
| 612 | if (Off != 0) { | ||||
| 613 | if (ToRemoveSize <= (PrefAlign.value() - Off)) | ||||
| 614 | return false; | ||||
| 615 | ToRemoveSize -= PrefAlign.value() - Off; | ||||
| 616 | } | ||||
| 617 | assert(isAligned(PrefAlign, ToRemoveSize) &&((void)0) | ||||
| 618 | "Should preserve selected alignment")((void)0); | ||||
| 619 | } | ||||
| 620 | |||||
| 621 | assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove")((void)0); | ||||
| 622 | assert(EarlierSize > ToRemoveSize && "Can't remove more than original size")((void)0); | ||||
| 623 | |||||
| 624 | uint64_t NewSize = EarlierSize - ToRemoveSize; | ||||
| 625 | if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) { | ||||
| 626 | // When shortening an atomic memory intrinsic, the newly shortened | ||||
| 627 | // length must remain an integer multiple of the element size. | ||||
| 628 | const uint32_t ElementSize = AMI->getElementSizeInBytes(); | ||||
| 629 | if (0 != NewSize % ElementSize) | ||||
| 630 | return false; | ||||
| 631 | } | ||||
| 632 | |||||
| 633 | LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "do { } while (false) | ||||
| 634 | << (IsOverwriteEnd ? "END" : "BEGIN") << ": "do { } while (false) | ||||
| 635 | << *EarlierWrite << "\n KILLER [" << ToRemoveStart << ", "do { } while (false) | ||||
| 636 | << int64_t(ToRemoveStart + ToRemoveSize) << ")\n")do { } while (false); | ||||
| 637 | |||||
| 638 | Value *EarlierWriteLength = EarlierIntrinsic->getLength(); | ||||
| 639 | Value *TrimmedLength = | ||||
| 640 | ConstantInt::get(EarlierWriteLength->getType(), NewSize); | ||||
| 641 | EarlierIntrinsic->setLength(TrimmedLength); | ||||
| 642 | EarlierIntrinsic->setDestAlignment(PrefAlign); | ||||
| 643 | |||||
| 644 | if (!IsOverwriteEnd) { | ||||
| 645 | Value *OrigDest = EarlierIntrinsic->getRawDest(); | ||||
| 646 | Type *Int8PtrTy = | ||||
| 647 | Type::getInt8PtrTy(EarlierIntrinsic->getContext(), | ||||
| 648 | OrigDest->getType()->getPointerAddressSpace()); | ||||
| 649 | Value *Dest = OrigDest; | ||||
| 650 | if (OrigDest->getType() != Int8PtrTy) | ||||
| 651 | Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", EarlierWrite); | ||||
| 652 | Value *Indices[1] = { | ||||
| 653 | ConstantInt::get(EarlierWriteLength->getType(), ToRemoveSize)}; | ||||
| 654 | Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds( | ||||
| 655 | Type::getInt8Ty(EarlierIntrinsic->getContext()), | ||||
| 656 | Dest, Indices, "", EarlierWrite); | ||||
| 657 | NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc()); | ||||
| 658 | if (NewDestGEP->getType() != OrigDest->getType()) | ||||
| 659 | NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(), | ||||
| 660 | "", EarlierWrite); | ||||
| 661 | EarlierIntrinsic->setDest(NewDestGEP); | ||||
| 662 | } | ||||
| 663 | |||||
| 664 | // Finally update start and size of earlier access. | ||||
| 665 | if (!IsOverwriteEnd) | ||||
| 666 | EarlierStart += ToRemoveSize; | ||||
| 667 | EarlierSize = NewSize; | ||||
| 668 | |||||
| 669 | return true; | ||||
| 670 | } | ||||
| 671 | |||||
| 672 | static bool tryToShortenEnd(Instruction *EarlierWrite, | ||||
| 673 | OverlapIntervalsTy &IntervalMap, | ||||
| 674 | int64_t &EarlierStart, uint64_t &EarlierSize) { | ||||
| 675 | if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite)) | ||||
| 676 | return false; | ||||
| 677 | |||||
| 678 | OverlapIntervalsTy::iterator OII = --IntervalMap.end(); | ||||
| 679 | int64_t LaterStart = OII->second; | ||||
| 680 | uint64_t LaterSize = OII->first - LaterStart; | ||||
| 681 | |||||
| 682 | assert(OII->first - LaterStart >= 0 && "Size expected to be positive")((void)0); | ||||
| 683 | |||||
| 684 | if (LaterStart > EarlierStart && | ||||
| 685 | // Note: "LaterStart - EarlierStart" is known to be positive due to | ||||
| 686 | // preceding check. | ||||
| 687 | (uint64_t)(LaterStart - EarlierStart) < EarlierSize && | ||||
| 688 | // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to | ||||
| 689 | // be non negative due to preceding checks. | ||||
| 690 | LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) { | ||||
| 691 | if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart, | ||||
| 692 | LaterSize, true)) { | ||||
| 693 | IntervalMap.erase(OII); | ||||
| 694 | return true; | ||||
| 695 | } | ||||
| 696 | } | ||||
| 697 | return false; | ||||
| 698 | } | ||||
| 699 | |||||
| 700 | static bool tryToShortenBegin(Instruction *EarlierWrite, | ||||
| 701 | OverlapIntervalsTy &IntervalMap, | ||||
| 702 | int64_t &EarlierStart, uint64_t &EarlierSize) { | ||||
| 703 | if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite)) | ||||
| 704 | return false; | ||||
| 705 | |||||
| 706 | OverlapIntervalsTy::iterator OII = IntervalMap.begin(); | ||||
| 707 | int64_t LaterStart = OII->second; | ||||
| 708 | uint64_t LaterSize = OII->first - LaterStart; | ||||
| 709 | |||||
| 710 | assert(OII->first - LaterStart >= 0 && "Size expected to be positive")((void)0); | ||||
| 711 | |||||
| 712 | if (LaterStart <= EarlierStart && | ||||
| 713 | // Note: "EarlierStart - LaterStart" is known to be non negative due to | ||||
| 714 | // preceding check. | ||||
| 715 | LaterSize > (uint64_t)(EarlierStart - LaterStart)) { | ||||
| 716 | // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be | ||||
| 717 | // positive due to preceding checks. | ||||
| 718 | assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&((void)0) | ||||
| 719 | "Should have been handled as OW_Complete")((void)0); | ||||
| 720 | if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart, | ||||
| 721 | LaterSize, false)) { | ||||
| 722 | IntervalMap.erase(OII); | ||||
| 723 | return true; | ||||
| 724 | } | ||||
| 725 | } | ||||
| 726 | return false; | ||||
| 727 | } | ||||
| 728 | |||||
| 729 | static bool removePartiallyOverlappedStores(const DataLayout &DL, | ||||
| 730 | InstOverlapIntervalsTy &IOL, | ||||
| 731 | const TargetLibraryInfo &TLI) { | ||||
| 732 | bool Changed = false; | ||||
| 733 | for (auto OI : IOL) { | ||||
| 734 | Instruction *EarlierWrite = OI.first; | ||||
| 735 | MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI); | ||||
| 736 | assert(isRemovable(EarlierWrite) && "Expect only removable instruction")((void)0); | ||||
| 737 | |||||
| 738 | const Value *Ptr = Loc.Ptr->stripPointerCasts(); | ||||
| |||||
| 739 | int64_t EarlierStart = 0; | ||||
| 740 | uint64_t EarlierSize = Loc.Size.getValue(); | ||||
| 741 | GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL); | ||||
| 742 | OverlapIntervalsTy &IntervalMap = OI.second; | ||||
| 743 | Changed |= | ||||
| 744 | tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize); | ||||
| 745 | if (IntervalMap.empty()) | ||||
| 746 | continue; | ||||
| 747 | Changed |= | ||||
| 748 | tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize); | ||||
| 749 | } | ||||
| 750 | return Changed; | ||||
| 751 | } | ||||
| 752 | |||||
| 753 | static Constant *tryToMergePartialOverlappingStores( | ||||
| 754 | StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset, | ||||
| 755 | int64_t DepWriteOffset, const DataLayout &DL, BatchAAResults &AA, | ||||
| 756 | DominatorTree *DT) { | ||||
| 757 | |||||
| 758 | if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) && | ||||
| 759 | DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) && | ||||
| 760 | Later && isa<ConstantInt>(Later->getValueOperand()) && | ||||
| 761 | DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) && | ||||
| 762 | memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) { | ||||
| 763 | // If the store we find is: | ||||
| 764 | // a) partially overwritten by the store to 'Loc' | ||||
| 765 | // b) the later store is fully contained in the earlier one and | ||||
| 766 | // c) they both have a constant value | ||||
| 767 | // d) none of the two stores need padding | ||||
| 768 | // Merge the two stores, replacing the earlier store's value with a | ||||
| 769 | // merge of both values. | ||||
| 770 | // TODO: Deal with other constant types (vectors, etc), and probably | ||||
| 771 | // some mem intrinsics (if needed) | ||||
| 772 | |||||
| 773 | APInt EarlierValue = | ||||
| 774 | cast<ConstantInt>(Earlier->getValueOperand())->getValue(); | ||||
| 775 | APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue(); | ||||
| 776 | unsigned LaterBits = LaterValue.getBitWidth(); | ||||
| 777 | assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth())((void)0); | ||||
| 778 | LaterValue = LaterValue.zext(EarlierValue.getBitWidth()); | ||||
| 779 | |||||
| 780 | // Offset of the smaller store inside the larger store | ||||
| 781 | unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8; | ||||
| 782 | unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() - | ||||
| 783 | BitOffsetDiff - LaterBits | ||||
| 784 | : BitOffsetDiff; | ||||
| 785 | APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount, | ||||
| 786 | LShiftAmount + LaterBits); | ||||
| 787 | // Clear the bits we'll be replacing, then OR with the smaller | ||||
| 788 | // store, shifted appropriately. | ||||
| 789 | APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount); | ||||
| 790 | LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *Earlierdo { } while (false) | ||||
| 791 | << "\n Later: " << *Laterdo { } while (false) | ||||
| 792 | << "\n Merged Value: " << Merged << '\n')do { } while (false); | ||||
| 793 | return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged); | ||||
| 794 | } | ||||
| 795 | return nullptr; | ||||
| 796 | } | ||||
| 797 | |||||
| 798 | namespace { | ||||
| 799 | // Returns true if \p I is an intrisnic that does not read or write memory. | ||||
| 800 | bool isNoopIntrinsic(Instruction *I) { | ||||
| 801 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | ||||
| 802 | switch (II->getIntrinsicID()) { | ||||
| 803 | case Intrinsic::lifetime_start: | ||||
| 804 | case Intrinsic::lifetime_end: | ||||
| 805 | case Intrinsic::invariant_end: | ||||
| 806 | case Intrinsic::launder_invariant_group: | ||||
| 807 | case Intrinsic::assume: | ||||
| 808 | return true; | ||||
| 809 | case Intrinsic::dbg_addr: | ||||
| 810 | case Intrinsic::dbg_declare: | ||||
| 811 | case Intrinsic::dbg_label: | ||||
| 812 | case Intrinsic::dbg_value: | ||||
| 813 | llvm_unreachable("Intrinsic should not be modeled in MemorySSA")__builtin_unreachable(); | ||||
| 814 | default: | ||||
| 815 | return false; | ||||
| 816 | } | ||||
| 817 | } | ||||
| 818 | return false; | ||||
| 819 | } | ||||
| 820 | |||||
| 821 | // Check if we can ignore \p D for DSE. | ||||
| 822 | bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) { | ||||
| 823 | Instruction *DI = D->getMemoryInst(); | ||||
| 824 | // Calls that only access inaccessible memory cannot read or write any memory | ||||
| 825 | // locations we consider for elimination. | ||||
| 826 | if (auto *CB = dyn_cast<CallBase>(DI)) | ||||
| 827 | if (CB->onlyAccessesInaccessibleMemory()) | ||||
| 828 | return true; | ||||
| 829 | |||||
| 830 | // We can eliminate stores to locations not visible to the caller across | ||||
| 831 | // throwing instructions. | ||||
| 832 | if (DI->mayThrow() && !DefVisibleToCaller) | ||||
| 833 | return true; | ||||
| 834 | |||||
| 835 | // We can remove the dead stores, irrespective of the fence and its ordering | ||||
| 836 | // (release/acquire/seq_cst). Fences only constraints the ordering of | ||||
| 837 | // already visible stores, it does not make a store visible to other | ||||
| 838 | // threads. So, skipping over a fence does not change a store from being | ||||
| 839 | // dead. | ||||
| 840 | if (isa<FenceInst>(DI)) | ||||
| 841 | return true; | ||||
| 842 | |||||
| 843 | // Skip intrinsics that do not really read or modify memory. | ||||
| 844 | if (isNoopIntrinsic(D->getMemoryInst())) | ||||
| 845 | return true; | ||||
| 846 | |||||
| 847 | return false; | ||||
| 848 | } | ||||
| 849 | |||||
| 850 | struct DSEState { | ||||
| 851 | Function &F; | ||||
| 852 | AliasAnalysis &AA; | ||||
| 853 | |||||
| 854 | /// The single BatchAA instance that is used to cache AA queries. It will | ||||
| 855 | /// not be invalidated over the whole run. This is safe, because: | ||||
| 856 | /// 1. Only memory writes are removed, so the alias cache for memory | ||||
| 857 | /// locations remains valid. | ||||
| 858 | /// 2. No new instructions are added (only instructions removed), so cached | ||||
| 859 | /// information for a deleted value cannot be accessed by a re-used new | ||||
| 860 | /// value pointer. | ||||
| 861 | BatchAAResults BatchAA; | ||||
| 862 | |||||
| 863 | MemorySSA &MSSA; | ||||
| 864 | DominatorTree &DT; | ||||
| 865 | PostDominatorTree &PDT; | ||||
| 866 | const TargetLibraryInfo &TLI; | ||||
| 867 | const DataLayout &DL; | ||||
| 868 | const LoopInfo &LI; | ||||
| 869 | |||||
| 870 | // Whether the function contains any irreducible control flow, useful for | ||||
| 871 | // being accurately able to detect loops. | ||||
| 872 | bool ContainsIrreducibleLoops; | ||||
| 873 | |||||
| 874 | // All MemoryDefs that potentially could kill other MemDefs. | ||||
| 875 | SmallVector<MemoryDef *, 64> MemDefs; | ||||
| 876 | // Any that should be skipped as they are already deleted | ||||
| 877 | SmallPtrSet<MemoryAccess *, 4> SkipStores; | ||||
| 878 | // Keep track of all of the objects that are invisible to the caller before | ||||
| 879 | // the function returns. | ||||
| 880 | // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet; | ||||
| 881 | DenseMap<const Value *, bool> InvisibleToCallerBeforeRet; | ||||
| 882 | // Keep track of all of the objects that are invisible to the caller after | ||||
| 883 | // the function returns. | ||||
| 884 | DenseMap<const Value *, bool> InvisibleToCallerAfterRet; | ||||
| 885 | // Keep track of blocks with throwing instructions not modeled in MemorySSA. | ||||
| 886 | SmallPtrSet<BasicBlock *, 16> ThrowingBlocks; | ||||
| 887 | // Post-order numbers for each basic block. Used to figure out if memory | ||||
| 888 | // accesses are executed before another access. | ||||
| 889 | DenseMap<BasicBlock *, unsigned> PostOrderNumbers; | ||||
| 890 | |||||
| 891 | /// Keep track of instructions (partly) overlapping with killing MemoryDefs per | ||||
| 892 | /// basic block. | ||||
| 893 | DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs; | ||||
| 894 | |||||
| 895 | DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, | ||||
| 896 | PostDominatorTree &PDT, const TargetLibraryInfo &TLI, | ||||
| 897 | const LoopInfo &LI) | ||||
| 898 | : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI), | ||||
| 899 | DL(F.getParent()->getDataLayout()), LI(LI) {} | ||||
| 900 | |||||
| 901 | static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, | ||||
| 902 | DominatorTree &DT, PostDominatorTree &PDT, | ||||
| 903 | const TargetLibraryInfo &TLI, const LoopInfo &LI) { | ||||
| 904 | DSEState State(F, AA, MSSA, DT, PDT, TLI, LI); | ||||
| 905 | // Collect blocks with throwing instructions not modeled in MemorySSA and | ||||
| 906 | // alloc-like objects. | ||||
| 907 | unsigned PO = 0; | ||||
| 908 | for (BasicBlock *BB : post_order(&F)) { | ||||
| 909 | State.PostOrderNumbers[BB] = PO++; | ||||
| 910 | for (Instruction &I : *BB) { | ||||
| 911 | MemoryAccess *MA = MSSA.getMemoryAccess(&I); | ||||
| 912 | if (I.mayThrow() && !MA) | ||||
| 913 | State.ThrowingBlocks.insert(I.getParent()); | ||||
| 914 | |||||
| 915 | auto *MD = dyn_cast_or_null<MemoryDef>(MA); | ||||
| 916 | if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit && | ||||
| 917 | (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I))) | ||||
| 918 | State.MemDefs.push_back(MD); | ||||
| 919 | } | ||||
| 920 | } | ||||
| 921 | |||||
| 922 | // Treat byval or inalloca arguments the same as Allocas, stores to them are | ||||
| 923 | // dead at the end of the function. | ||||
| 924 | for (Argument &AI : F.args()) | ||||
| 925 | if (AI.hasPassPointeeByValueCopyAttr()) { | ||||
| 926 | // For byval, the caller doesn't know the address of the allocation. | ||||
| 927 | if (AI.hasByValAttr()) | ||||
| 928 | State.InvisibleToCallerBeforeRet.insert({&AI, true}); | ||||
| 929 | State.InvisibleToCallerAfterRet.insert({&AI, true}); | ||||
| 930 | } | ||||
| 931 | |||||
| 932 | // Collect whether there is any irreducible control flow in the function. | ||||
| 933 | State.ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI); | ||||
| 934 | |||||
| 935 | return State; | ||||
| 936 | } | ||||
| 937 | |||||
| 938 | /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI | ||||
| 939 | /// instruction) completely overwrites a store to the 'Earlier' location. | ||||
| 940 | /// (by \p EarlierI instruction). | ||||
| 941 | /// Return OW_MaybePartial if \p Later does not completely overwrite | ||||
| 942 | /// \p Earlier, but they both write to the same underlying object. In that | ||||
| 943 | /// case, use isPartialOverwrite to check if \p Later partially overwrites | ||||
| 944 | /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined. | ||||
| 945 | OverwriteResult | ||||
| 946 | isOverwrite(const Instruction *LaterI, const Instruction *EarlierI, | ||||
| 947 | const MemoryLocation &Later, const MemoryLocation &Earlier, | ||||
| 948 | int64_t &EarlierOff, int64_t &LaterOff) { | ||||
| 949 | // AliasAnalysis does not always account for loops. Limit overwrite checks | ||||
| 950 | // to dependencies for which we can guarantee they are independant of any | ||||
| 951 | // loops they are in. | ||||
| 952 | if (!isGuaranteedLoopIndependent(EarlierI, LaterI, Earlier)) | ||||
| 953 | return OW_Unknown; | ||||
| 954 | |||||
| 955 | // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll | ||||
| 956 | // get imprecise values here, though (except for unknown sizes). | ||||
| 957 | if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) { | ||||
| 958 | // In case no constant size is known, try to an IR values for the number | ||||
| 959 | // of bytes written and check if they match. | ||||
| 960 | const auto *LaterMemI = dyn_cast<MemIntrinsic>(LaterI); | ||||
| 961 | const auto *EarlierMemI = dyn_cast<MemIntrinsic>(EarlierI); | ||||
| 962 | if (LaterMemI && EarlierMemI) { | ||||
| 963 | const Value *LaterV = LaterMemI->getLength(); | ||||
| 964 | const Value *EarlierV = EarlierMemI->getLength(); | ||||
| 965 | if (LaterV == EarlierV && BatchAA.isMustAlias(Earlier, Later)) | ||||
| 966 | return OW_Complete; | ||||
| 967 | } | ||||
| 968 | |||||
| 969 | // Masked stores have imprecise locations, but we can reason about them | ||||
| 970 | // to some extent. | ||||
| 971 | return isMaskedStoreOverwrite(LaterI, EarlierI, BatchAA); | ||||
| 972 | } | ||||
| 973 | |||||
| 974 | const uint64_t LaterSize = Later.Size.getValue(); | ||||
| 975 | const uint64_t EarlierSize = Earlier.Size.getValue(); | ||||
| 976 | |||||
| 977 | // Query the alias information | ||||
| 978 | AliasResult AAR = BatchAA.alias(Later, Earlier); | ||||
| 979 | |||||
| 980 | // If the start pointers are the same, we just have to compare sizes to see if | ||||
| 981 | // the later store was larger than the earlier store. | ||||
| 982 | if (AAR == AliasResult::MustAlias) { | ||||
| 983 | // Make sure that the Later size is >= the Earlier size. | ||||
| 984 | if (LaterSize >= EarlierSize) | ||||
| 985 | return OW_Complete; | ||||
| 986 | } | ||||
| 987 | |||||
| 988 | // If we hit a partial alias we may have a full overwrite | ||||
| 989 | if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) { | ||||
| 990 | int32_t Off = AAR.getOffset(); | ||||
| 991 | if (Off >= 0 && (uint64_t)Off + EarlierSize <= LaterSize) | ||||
| 992 | return OW_Complete; | ||||
| 993 | } | ||||
| 994 | |||||
| 995 | // Check to see if the later store is to the entire object (either a global, | ||||
| 996 | // an alloca, or a byval/inalloca argument). If so, then it clearly | ||||
| 997 | // overwrites any other store to the same object. | ||||
| 998 | const Value *P1 = Earlier.Ptr->stripPointerCasts(); | ||||
| 999 | const Value *P2 = Later.Ptr->stripPointerCasts(); | ||||
| 1000 | const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2); | ||||
| 1001 | |||||
| 1002 | // If we can't resolve the same pointers to the same object, then we can't | ||||
| 1003 | // analyze them at all. | ||||
| 1004 | if (UO1 != UO2) | ||||
| 1005 | return OW_Unknown; | ||||
| 1006 | |||||
| 1007 | // If the "Later" store is to a recognizable object, get its size. | ||||
| 1008 | uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, &F); | ||||
| 1009 | if (ObjectSize != MemoryLocation::UnknownSize) | ||||
| 1010 | if (ObjectSize == LaterSize && ObjectSize >= EarlierSize) | ||||
| 1011 | return OW_Complete; | ||||
| 1012 | |||||
| 1013 | // Okay, we have stores to two completely different pointers. Try to | ||||
| 1014 | // decompose the pointer into a "base + constant_offset" form. If the base | ||||
| 1015 | // pointers are equal, then we can reason about the two stores. | ||||
| 1016 | EarlierOff = 0; | ||||
| 1017 | LaterOff = 0; | ||||
| 1018 | const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL); | ||||
| 1019 | const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL); | ||||
| 1020 | |||||
| 1021 | // If the base pointers still differ, we have two completely different stores. | ||||
| 1022 | if (BP1 != BP2) | ||||
| 1023 | return OW_Unknown; | ||||
| 1024 | |||||
| 1025 | // The later access completely overlaps the earlier store if and only if | ||||
| 1026 | // both start and end of the earlier one is "inside" the later one: | ||||
| 1027 | // |<->|--earlier--|<->| | ||||
| 1028 | // |-------later-------| | ||||
| 1029 | // Accesses may overlap if and only if start of one of them is "inside" | ||||
| 1030 | // another one: | ||||
| 1031 | // |<->|--earlier--|<----->| | ||||
| 1032 | // |-------later-------| | ||||
| 1033 | // OR | ||||
| 1034 | // |----- earlier -----| | ||||
| 1035 | // |<->|---later---|<----->| | ||||
| 1036 | // | ||||
| 1037 | // We have to be careful here as *Off is signed while *.Size is unsigned. | ||||
| 1038 | |||||
| 1039 | // Check if the earlier access starts "not before" the later one. | ||||
| 1040 | if (EarlierOff >= LaterOff) { | ||||
| 1041 | // If the earlier access ends "not after" the later access then the earlier | ||||
| 1042 | // one is completely overwritten by the later one. | ||||
| 1043 | if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize) | ||||
| 1044 | return OW_Complete; | ||||
| 1045 | // If start of the earlier access is "before" end of the later access then | ||||
| 1046 | // accesses overlap. | ||||
| 1047 | else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize) | ||||
| 1048 | return OW_MaybePartial; | ||||
| 1049 | } | ||||
| 1050 | // If start of the later access is "before" end of the earlier access then | ||||
| 1051 | // accesses overlap. | ||||
| 1052 | else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) { | ||||
| 1053 | return OW_MaybePartial; | ||||
| 1054 | } | ||||
| 1055 | |||||
| 1056 | // Can reach here only if accesses are known not to overlap. There is no | ||||
| 1057 | // dedicated code to indicate no overlap so signal "unknown". | ||||
| 1058 | return OW_Unknown; | ||||
| 1059 | } | ||||
| 1060 | |||||
| 1061 | bool isInvisibleToCallerAfterRet(const Value *V) { | ||||
| 1062 | if (isa<AllocaInst>(V)) | ||||
| 1063 | return true; | ||||
| 1064 | auto I = InvisibleToCallerAfterRet.insert({V, false}); | ||||
| 1065 | if (I.second) { | ||||
| 1066 | if (!isInvisibleToCallerBeforeRet(V)) { | ||||
| 1067 | I.first->second = false; | ||||
| 1068 | } else { | ||||
| 1069 | auto *Inst = dyn_cast<Instruction>(V); | ||||
| 1070 | if (Inst && isAllocLikeFn(Inst, &TLI)) | ||||
| 1071 | I.first->second = !PointerMayBeCaptured(V, true, false); | ||||
| 1072 | } | ||||
| 1073 | } | ||||
| 1074 | return I.first->second; | ||||
| 1075 | } | ||||
| 1076 | |||||
| 1077 | bool isInvisibleToCallerBeforeRet(const Value *V) { | ||||
| 1078 | if (isa<AllocaInst>(V)) | ||||
| 1079 | return true; | ||||
| 1080 | auto I = InvisibleToCallerBeforeRet.insert({V, false}); | ||||
| 1081 | if (I.second) { | ||||
| 1082 | auto *Inst = dyn_cast<Instruction>(V); | ||||
| 1083 | if (Inst && isAllocLikeFn(Inst, &TLI)) | ||||
| 1084 | // NOTE: This could be made more precise by PointerMayBeCapturedBefore | ||||
| 1085 | // with the killing MemoryDef. But we refrain from doing so for now to | ||||
| 1086 | // limit compile-time and this does not cause any changes to the number | ||||
| 1087 | // of stores removed on a large test set in practice. | ||||
| 1088 | I.first->second = !PointerMayBeCaptured(V, false, true); | ||||
| 1089 | } | ||||
| 1090 | return I.first->second; | ||||
| 1091 | } | ||||
| 1092 | |||||
| 1093 | Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const { | ||||
| 1094 | if (!I->mayWriteToMemory()) | ||||
| 1095 | return None; | ||||
| 1096 | |||||
| 1097 | if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I)) | ||||
| 1098 | return {MemoryLocation::getForDest(MTI)}; | ||||
| 1099 | |||||
| 1100 | if (auto *CB = dyn_cast<CallBase>(I)) { | ||||
| 1101 | // If the functions may write to memory we do not know about, bail out. | ||||
| 1102 | if (!CB->onlyAccessesArgMemory() && | ||||
| 1103 | !CB->onlyAccessesInaccessibleMemOrArgMem()) | ||||
| 1104 | return None; | ||||
| 1105 | |||||
| 1106 | LibFunc LF; | ||||
| 1107 | if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) { | ||||
| 1108 | switch (LF) { | ||||
| 1109 | case LibFunc_strcpy: | ||||
| 1110 | case LibFunc_strncpy: | ||||
| 1111 | case LibFunc_strcat: | ||||
| 1112 | case LibFunc_strncat: | ||||
| 1113 | return {MemoryLocation::getAfter(CB->getArgOperand(0))}; | ||||
| 1114 | default: | ||||
| 1115 | break; | ||||
| 1116 | } | ||||
| 1117 | } | ||||
| 1118 | switch (CB->getIntrinsicID()) { | ||||
| 1119 | case Intrinsic::init_trampoline: | ||||
| 1120 | return {MemoryLocation::getAfter(CB->getArgOperand(0))}; | ||||
| 1121 | case Intrinsic::masked_store: | ||||
| 1122 | return {MemoryLocation::getForArgument(CB, 1, TLI)}; | ||||
| 1123 | default: | ||||
| 1124 | break; | ||||
| 1125 | } | ||||
| 1126 | return None; | ||||
| 1127 | } | ||||
| 1128 | |||||
| 1129 | return MemoryLocation::getOrNone(I); | ||||
| 1130 | } | ||||
| 1131 | |||||
| 1132 | /// Returns true if \p UseInst completely overwrites \p DefLoc | ||||
| 1133 | /// (stored by \p DefInst). | ||||
| 1134 | bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst, | ||||
| 1135 | Instruction *UseInst) { | ||||
| 1136 | // UseInst has a MemoryDef associated in MemorySSA. It's possible for a | ||||
| 1137 | // MemoryDef to not write to memory, e.g. a volatile load is modeled as a | ||||
| 1138 | // MemoryDef. | ||||
| 1139 | if (!UseInst->mayWriteToMemory()) | ||||
| 1140 | return false; | ||||
| 1141 | |||||
| 1142 | if (auto *CB = dyn_cast<CallBase>(UseInst)) | ||||
| 1143 | if (CB->onlyAccessesInaccessibleMemory()) | ||||
| 1144 | return false; | ||||
| 1145 | |||||
| 1146 | int64_t InstWriteOffset, DepWriteOffset; | ||||
| 1147 | if (auto CC = getLocForWriteEx(UseInst)) | ||||
| 1148 | return isOverwrite(UseInst, DefInst, *CC, DefLoc, DepWriteOffset, | ||||
| 1149 | InstWriteOffset) == OW_Complete; | ||||
| 1150 | return false; | ||||
| 1151 | } | ||||
| 1152 | |||||
| 1153 | /// Returns true if \p Def is not read before returning from the function. | ||||
| 1154 | bool isWriteAtEndOfFunction(MemoryDef *Def) { | ||||
| 1155 | LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("do { } while (false) | ||||
| 1156 | << *Def->getMemoryInst()do { } while (false) | ||||
| 1157 | << ") is at the end the function \n")do { } while (false); | ||||
| 1158 | |||||
| 1159 | auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst()); | ||||
| 1160 | if (!MaybeLoc) { | ||||
| 1161 | LLVM_DEBUG(dbgs() << " ... could not get location for write.\n")do { } while (false); | ||||
| 1162 | return false; | ||||
| 1163 | } | ||||
| 1164 | |||||
| 1165 | SmallVector<MemoryAccess *, 4> WorkList; | ||||
| 1166 | SmallPtrSet<MemoryAccess *, 8> Visited; | ||||
| 1167 | auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) { | ||||
| 1168 | if (!Visited.insert(Acc).second) | ||||
| 1169 | return; | ||||
| 1170 | for (Use &U : Acc->uses()) | ||||
| 1171 | WorkList.push_back(cast<MemoryAccess>(U.getUser())); | ||||
| 1172 | }; | ||||
| 1173 | PushMemUses(Def); | ||||
| 1174 | for (unsigned I = 0; I < WorkList.size(); I++) { | ||||
| 1175 | if (WorkList.size() >= MemorySSAScanLimit) { | ||||
| 1176 | LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n")do { } while (false); | ||||
| 1177 | return false; | ||||
| 1178 | } | ||||
| 1179 | |||||
| 1180 | MemoryAccess *UseAccess = WorkList[I]; | ||||
| 1181 | // Simply adding the users of MemoryPhi to the worklist is not enough, | ||||
| 1182 | // because we might miss read clobbers in different iterations of a loop, | ||||
| 1183 | // for example. | ||||
| 1184 | // TODO: Add support for phi translation to handle the loop case. | ||||
| 1185 | if (isa<MemoryPhi>(UseAccess)) | ||||
| 1186 | return false; | ||||
| 1187 | |||||
| 1188 | // TODO: Checking for aliasing is expensive. Consider reducing the amount | ||||
| 1189 | // of times this is called and/or caching it. | ||||
| 1190 | Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); | ||||
| 1191 | if (isReadClobber(*MaybeLoc, UseInst)) { | ||||
| 1192 | LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n")do { } while (false); | ||||
| 1193 | return false; | ||||
| 1194 | } | ||||
| 1195 | |||||
| 1196 | if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) | ||||
| 1197 | PushMemUses(UseDef); | ||||
| 1198 | } | ||||
| 1199 | return true; | ||||
| 1200 | } | ||||
| 1201 | |||||
| 1202 | /// If \p I is a memory terminator like llvm.lifetime.end or free, return a | ||||
| 1203 | /// pair with the MemoryLocation terminated by \p I and a boolean flag | ||||
| 1204 | /// indicating whether \p I is a free-like call. | ||||
| 1205 | Optional<std::pair<MemoryLocation, bool>> | ||||
| 1206 | getLocForTerminator(Instruction *I) const { | ||||
| 1207 | uint64_t Len; | ||||
| 1208 | Value *Ptr; | ||||
| 1209 | if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len), | ||||
| 1210 | m_Value(Ptr)))) | ||||
| 1211 | return {std::make_pair(MemoryLocation(Ptr, Len), false)}; | ||||
| 1212 | |||||
| 1213 | if (auto *CB = dyn_cast<CallBase>(I)) { | ||||
| 1214 | if (isFreeCall(I, &TLI)) | ||||
| 1215 | return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)), | ||||
| 1216 | true)}; | ||||
| 1217 | } | ||||
| 1218 | |||||
| 1219 | return None; | ||||
| 1220 | } | ||||
| 1221 | |||||
| 1222 | /// Returns true if \p I is a memory terminator instruction like | ||||
| 1223 | /// llvm.lifetime.end or free. | ||||
| 1224 | bool isMemTerminatorInst(Instruction *I) const { | ||||
| 1225 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(I); | ||||
| 1226 | return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) || | ||||
| 1227 | isFreeCall(I, &TLI); | ||||
| 1228 | } | ||||
| 1229 | |||||
| 1230 | /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from | ||||
| 1231 | /// instruction \p AccessI. | ||||
| 1232 | bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI, | ||||
| 1233 | Instruction *MaybeTerm) { | ||||
| 1234 | Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc = | ||||
| 1235 | getLocForTerminator(MaybeTerm); | ||||
| 1236 | |||||
| 1237 | if (!MaybeTermLoc) | ||||
| 1238 | return false; | ||||
| 1239 | |||||
| 1240 | // If the terminator is a free-like call, all accesses to the underlying | ||||
| 1241 | // object can be considered terminated. | ||||
| 1242 | if (getUnderlyingObject(Loc.Ptr) != | ||||
| 1243 | getUnderlyingObject(MaybeTermLoc->first.Ptr)) | ||||
| 1244 | return false; | ||||
| 1245 | |||||
| 1246 | auto TermLoc = MaybeTermLoc->first; | ||||
| 1247 | if (MaybeTermLoc->second) { | ||||
| 1248 | const Value *LocUO = getUnderlyingObject(Loc.Ptr); | ||||
| 1249 | return BatchAA.isMustAlias(TermLoc.Ptr, LocUO); | ||||
| 1250 | } | ||||
| 1251 | int64_t InstWriteOffset, DepWriteOffset; | ||||
| 1252 | return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DepWriteOffset, | ||||
| 1253 | InstWriteOffset) == OW_Complete; | ||||
| 1254 | } | ||||
| 1255 | |||||
| 1256 | // Returns true if \p Use may read from \p DefLoc. | ||||
| 1257 | bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) { | ||||
| 1258 | if (isNoopIntrinsic(UseInst)) | ||||
| 1259 | return false; | ||||
| 1260 | |||||
| 1261 | // Monotonic or weaker atomic stores can be re-ordered and do not need to be | ||||
| 1262 | // treated as read clobber. | ||||
| 1263 | if (auto SI = dyn_cast<StoreInst>(UseInst)) | ||||
| 1264 | return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic); | ||||
| 1265 | |||||
| 1266 | if (!UseInst->mayReadFromMemory()) | ||||
| 1267 | return false; | ||||
| 1268 | |||||
| 1269 | if (auto *CB = dyn_cast<CallBase>(UseInst)) | ||||
| 1270 | if (CB->onlyAccessesInaccessibleMemory()) | ||||
| 1271 | return false; | ||||
| 1272 | |||||
| 1273 | // NOTE: For calls, the number of stores removed could be slightly improved | ||||
| 1274 | // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to | ||||
| 1275 | // be expensive compared to the benefits in practice. For now, avoid more | ||||
| 1276 | // expensive analysis to limit compile-time. | ||||
| 1277 | return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc)); | ||||
| 1278 | } | ||||
| 1279 | |||||
| 1280 | /// Returns true if a dependency between \p Current and \p KillingDef is | ||||
| 1281 | /// guaranteed to be loop invariant for the loops that they are in. Either | ||||
| 1282 | /// because they are known to be in the same block, in the same loop level or | ||||
| 1283 | /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation | ||||
| 1284 | /// during execution of the containing function. | ||||
| 1285 | bool isGuaranteedLoopIndependent(const Instruction *Current, | ||||
| 1286 | const Instruction *KillingDef, | ||||
| 1287 | const MemoryLocation &CurrentLoc) { | ||||
| 1288 | // If the dependency is within the same block or loop level (being careful | ||||
| 1289 | // of irreducible loops), we know that AA will return a valid result for the | ||||
| 1290 | // memory dependency. (Both at the function level, outside of any loop, | ||||
| 1291 | // would also be valid but we currently disable that to limit compile time). | ||||
| 1292 | if (Current->getParent() == KillingDef->getParent()) | ||||
| 1293 | return true; | ||||
| 1294 | const Loop *CurrentLI = LI.getLoopFor(Current->getParent()); | ||||
| 1295 | if (!ContainsIrreducibleLoops && CurrentLI && | ||||
| 1296 | CurrentLI == LI.getLoopFor(KillingDef->getParent())) | ||||
| 1297 | return true; | ||||
| 1298 | // Otherwise check the memory location is invariant to any loops. | ||||
| 1299 | return isGuaranteedLoopInvariant(CurrentLoc.Ptr); | ||||
| 1300 | } | ||||
| 1301 | |||||
| 1302 | /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible | ||||
| 1303 | /// loop. In particular, this guarantees that it only references a single | ||||
| 1304 | /// MemoryLocation during execution of the containing function. | ||||
| 1305 | bool isGuaranteedLoopInvariant(const Value *Ptr) { | ||||
| 1306 | auto IsGuaranteedLoopInvariantBase = [this](const Value *Ptr) { | ||||
| 1307 | Ptr = Ptr->stripPointerCasts(); | ||||
| 1308 | if (auto *I = dyn_cast<Instruction>(Ptr)) { | ||||
| 1309 | if (isa<AllocaInst>(Ptr)) | ||||
| 1310 | return true; | ||||
| 1311 | |||||
| 1312 | if (isAllocLikeFn(I, &TLI)) | ||||
| 1313 | return true; | ||||
| 1314 | |||||
| 1315 | return false; | ||||
| 1316 | } | ||||
| 1317 | return true; | ||||
| 1318 | }; | ||||
| 1319 | |||||
| 1320 | Ptr = Ptr->stripPointerCasts(); | ||||
| 1321 | if (auto *I = dyn_cast<Instruction>(Ptr)) { | ||||
| 1322 | if (I->getParent()->isEntryBlock()) | ||||
| 1323 | return true; | ||||
| 1324 | } | ||||
| 1325 | if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { | ||||
| 1326 | return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) && | ||||
| 1327 | GEP->hasAllConstantIndices(); | ||||
| 1328 | } | ||||
| 1329 | return IsGuaranteedLoopInvariantBase(Ptr); | ||||
| 1330 | } | ||||
| 1331 | |||||
| 1332 | // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with | ||||
| 1333 | // no read access between them or on any other path to a function exit block | ||||
| 1334 | // if \p DefLoc is not accessible after the function returns. If there is no | ||||
| 1335 | // such MemoryDef, return None. The returned value may not (completely) | ||||
| 1336 | // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing | ||||
| 1337 | // MemoryUse (read). | ||||
| 1338 | Optional<MemoryAccess *> | ||||
| 1339 | getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess, | ||||
| 1340 | const MemoryLocation &DefLoc, const Value *DefUO, | ||||
| 1341 | unsigned &ScanLimit, unsigned &WalkerStepLimit, | ||||
| 1342 | bool IsMemTerm, unsigned &PartialLimit) { | ||||
| 1343 | if (ScanLimit == 0 || WalkerStepLimit == 0) { | ||||
| 1344 | LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { } while (false); | ||||
| 1345 | return None; | ||||
| 1346 | } | ||||
| 1347 | |||||
| 1348 | MemoryAccess *Current = StartAccess; | ||||
| 1349 | Instruction *KillingI = KillingDef->getMemoryInst(); | ||||
| 1350 | LLVM_DEBUG(dbgs() << " trying to get dominating access\n")do { } while (false); | ||||
| 1351 | |||||
| 1352 | // Find the next clobbering Mod access for DefLoc, starting at StartAccess. | ||||
| 1353 | Optional<MemoryLocation> CurrentLoc; | ||||
| 1354 | for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) { | ||||
| 1355 | LLVM_DEBUG({do { } while (false) | ||||
| 1356 | dbgs() << " visiting " << *Current;do { } while (false) | ||||
| 1357 | if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))do { } while (false) | ||||
| 1358 | dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()do { } while (false) | ||||
| 1359 | << ")";do { } while (false) | ||||
| 1360 | dbgs() << "\n";do { } while (false) | ||||
| 1361 | })do { } while (false); | ||||
| 1362 | |||||
| 1363 | // Reached TOP. | ||||
| 1364 | if (MSSA.isLiveOnEntryDef(Current)) { | ||||
| 1365 | LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n")do { } while (false); | ||||
| 1366 | return None; | ||||
| 1367 | } | ||||
| 1368 | |||||
| 1369 | // Cost of a step. Accesses in the same block are more likely to be valid | ||||
| 1370 | // candidates for elimination, hence consider them cheaper. | ||||
| 1371 | unsigned StepCost = KillingDef->getBlock() == Current->getBlock() | ||||
| 1372 | ? MemorySSASameBBStepCost | ||||
| 1373 | : MemorySSAOtherBBStepCost; | ||||
| 1374 | if (WalkerStepLimit <= StepCost) { | ||||
| 1375 | LLVM_DEBUG(dbgs() << " ... hit walker step limit\n")do { } while (false); | ||||
| 1376 | return None; | ||||
| 1377 | } | ||||
| 1378 | WalkerStepLimit -= StepCost; | ||||
| 1379 | |||||
| 1380 | // Return for MemoryPhis. They cannot be eliminated directly and the | ||||
| 1381 | // caller is responsible for traversing them. | ||||
| 1382 | if (isa<MemoryPhi>(Current)) { | ||||
| 1383 | LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n")do { } while (false); | ||||
| 1384 | return Current; | ||||
| 1385 | } | ||||
| 1386 | |||||
| 1387 | // Below, check if CurrentDef is a valid candidate to be eliminated by | ||||
| 1388 | // KillingDef. If it is not, check the next candidate. | ||||
| 1389 | MemoryDef *CurrentDef = cast<MemoryDef>(Current); | ||||
| 1390 | Instruction *CurrentI = CurrentDef->getMemoryInst(); | ||||
| 1391 | |||||
| 1392 | if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO))) | ||||
| 1393 | continue; | ||||
| 1394 | |||||
| 1395 | // Before we try to remove anything, check for any extra throwing | ||||
| 1396 | // instructions that block us from DSEing | ||||
| 1397 | if (mayThrowBetween(KillingI, CurrentI, DefUO)) { | ||||
| 1398 | LLVM_DEBUG(dbgs() << " ... skip, may throw!\n")do { } while (false); | ||||
| 1399 | return None; | ||||
| 1400 | } | ||||
| 1401 | |||||
| 1402 | // Check for anything that looks like it will be a barrier to further | ||||
| 1403 | // removal | ||||
| 1404 | if (isDSEBarrier(DefUO, CurrentI)) { | ||||
| 1405 | LLVM_DEBUG(dbgs() << " ... skip, barrier\n")do { } while (false); | ||||
| 1406 | return None; | ||||
| 1407 | } | ||||
| 1408 | |||||
| 1409 | // If Current is known to be on path that reads DefLoc or is a read | ||||
| 1410 | // clobber, bail out, as the path is not profitable. We skip this check | ||||
| 1411 | // for intrinsic calls, because the code knows how to handle memcpy | ||||
| 1412 | // intrinsics. | ||||
| 1413 | if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(DefLoc, CurrentI)) | ||||
| 1414 | return None; | ||||
| 1415 | |||||
| 1416 | // Quick check if there are direct uses that are read-clobbers. | ||||
| 1417 | if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) { | ||||
| 1418 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser())) | ||||
| 1419 | return !MSSA.dominates(StartAccess, UseOrDef) && | ||||
| 1420 | isReadClobber(DefLoc, UseOrDef->getMemoryInst()); | ||||
| 1421 | return false; | ||||
| 1422 | })) { | ||||
| 1423 | LLVM_DEBUG(dbgs() << " ... found a read clobber\n")do { } while (false); | ||||
| 1424 | return None; | ||||
| 1425 | } | ||||
| 1426 | |||||
| 1427 | // If Current cannot be analyzed or is not removable, check the next | ||||
| 1428 | // candidate. | ||||
| 1429 | if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI)) | ||||
| 1430 | continue; | ||||
| 1431 | |||||
| 1432 | // If Current does not have an analyzable write location, skip it | ||||
| 1433 | CurrentLoc = getLocForWriteEx(CurrentI); | ||||
| 1434 | if (!CurrentLoc) | ||||
| 1435 | continue; | ||||
| 1436 | |||||
| 1437 | // AliasAnalysis does not account for loops. Limit elimination to | ||||
| 1438 | // candidates for which we can guarantee they always store to the same | ||||
| 1439 | // memory location and not located in different loops. | ||||
| 1440 | if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) { | ||||
| 1441 | LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n")do { } while (false); | ||||
| 1442 | WalkerStepLimit -= 1; | ||||
| 1443 | continue; | ||||
| 1444 | } | ||||
| 1445 | |||||
| 1446 | if (IsMemTerm) { | ||||
| 1447 | // If the killing def is a memory terminator (e.g. lifetime.end), check | ||||
| 1448 | // the next candidate if the current Current does not write the same | ||||
| 1449 | // underlying object as the terminator. | ||||
| 1450 | if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) | ||||
| 1451 | continue; | ||||
| 1452 | } else { | ||||
| 1453 | int64_t InstWriteOffset, DepWriteOffset; | ||||
| 1454 | auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc, | ||||
| 1455 | DepWriteOffset, InstWriteOffset); | ||||
| 1456 | // If Current does not write to the same object as KillingDef, check | ||||
| 1457 | // the next candidate. | ||||
| 1458 | if (OR == OW_Unknown) | ||||
| 1459 | continue; | ||||
| 1460 | else if (OR == OW_MaybePartial) { | ||||
| 1461 | // If KillingDef only partially overwrites Current, check the next | ||||
| 1462 | // candidate if the partial step limit is exceeded. This aggressively | ||||
| 1463 | // limits the number of candidates for partial store elimination, | ||||
| 1464 | // which are less likely to be removable in the end. | ||||
| 1465 | if (PartialLimit <= 1) { | ||||
| 1466 | WalkerStepLimit -= 1; | ||||
| 1467 | continue; | ||||
| 1468 | } | ||||
| 1469 | PartialLimit -= 1; | ||||
| 1470 | } | ||||
| 1471 | } | ||||
| 1472 | break; | ||||
| 1473 | }; | ||||
| 1474 | |||||
| 1475 | // Accesses to objects accessible after the function returns can only be | ||||
| 1476 | // eliminated if the access is killed along all paths to the exit. Collect | ||||
| 1477 | // the blocks with killing (=completely overwriting MemoryDefs) and check if | ||||
| 1478 | // they cover all paths from EarlierAccess to any function exit. | ||||
| 1479 | SmallPtrSet<Instruction *, 16> KillingDefs; | ||||
| 1480 | KillingDefs.insert(KillingDef->getMemoryInst()); | ||||
| 1481 | MemoryAccess *EarlierAccess = Current; | ||||
| 1482 | Instruction *EarlierMemInst = | ||||
| 1483 | cast<MemoryDef>(EarlierAccess)->getMemoryInst(); | ||||
| 1484 | LLVM_DEBUG(dbgs() << " Checking for reads of " << *EarlierAccess << " ("do { } while (false) | ||||
| 1485 | << *EarlierMemInst << ")\n")do { } while (false); | ||||
| 1486 | |||||
| 1487 | SmallSetVector<MemoryAccess *, 32> WorkList; | ||||
| 1488 | auto PushMemUses = [&WorkList](MemoryAccess *Acc) { | ||||
| 1489 | for (Use &U : Acc->uses()) | ||||
| 1490 | WorkList.insert(cast<MemoryAccess>(U.getUser())); | ||||
| 1491 | }; | ||||
| 1492 | PushMemUses(EarlierAccess); | ||||
| 1493 | |||||
| 1494 | // Optimistically collect all accesses for reads. If we do not find any | ||||
| 1495 | // read clobbers, add them to the cache. | ||||
| 1496 | SmallPtrSet<MemoryAccess *, 16> KnownNoReads; | ||||
| 1497 | if (!EarlierMemInst->mayReadFromMemory()) | ||||
| 1498 | KnownNoReads.insert(EarlierAccess); | ||||
| 1499 | // Check if EarlierDef may be read. | ||||
| 1500 | for (unsigned I = 0; I < WorkList.size(); I++) { | ||||
| 1501 | MemoryAccess *UseAccess = WorkList[I]; | ||||
| 1502 | |||||
| 1503 | LLVM_DEBUG(dbgs() << " " << *UseAccess)do { } while (false); | ||||
| 1504 | // Bail out if the number of accesses to check exceeds the scan limit. | ||||
| 1505 | if (ScanLimit < (WorkList.size() - I)) { | ||||
| 1506 | LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { } while (false); | ||||
| 1507 | return None; | ||||
| 1508 | } | ||||
| 1509 | --ScanLimit; | ||||
| 1510 | NumDomMemDefChecks++; | ||||
| 1511 | KnownNoReads.insert(UseAccess); | ||||
| 1512 | |||||
| 1513 | if (isa<MemoryPhi>(UseAccess)) { | ||||
| 1514 | if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) { | ||||
| 1515 | return DT.properlyDominates(KI->getParent(), | ||||
| 1516 | UseAccess->getBlock()); | ||||
| 1517 | })) { | ||||
| 1518 | LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n")do { } while (false); | ||||
| 1519 | continue; | ||||
| 1520 | } | ||||
| 1521 | LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n")do { } while (false); | ||||
| 1522 | PushMemUses(UseAccess); | ||||
| 1523 | continue; | ||||
| 1524 | } | ||||
| 1525 | |||||
| 1526 | Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); | ||||
| 1527 | LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n")do { } while (false); | ||||
| 1528 | |||||
| 1529 | if (any_of(KillingDefs, [this, UseInst](Instruction *KI) { | ||||
| 1530 | return DT.dominates(KI, UseInst); | ||||
| 1531 | })) { | ||||
| 1532 | LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n")do { } while (false); | ||||
| 1533 | continue; | ||||
| 1534 | } | ||||
| 1535 | |||||
| 1536 | // A memory terminator kills all preceeding MemoryDefs and all succeeding | ||||
| 1537 | // MemoryAccesses. We do not have to check it's users. | ||||
| 1538 | if (isMemTerminator(*CurrentLoc, EarlierMemInst, UseInst)) { | ||||
| 1539 | LLVM_DEBUG(do { } while (false) | ||||
| 1540 | dbgs()do { } while (false) | ||||
| 1541 | << " ... skipping, memterminator invalidates following accesses\n")do { } while (false); | ||||
| 1542 | continue; | ||||
| 1543 | } | ||||
| 1544 | |||||
| 1545 | if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) { | ||||
| 1546 | LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n")do { } while (false); | ||||
| 1547 | PushMemUses(UseAccess); | ||||
| 1548 | continue; | ||||
| 1549 | } | ||||
| 1550 | |||||
| 1551 | if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) { | ||||
| 1552 | LLVM_DEBUG(dbgs() << " ... found throwing instruction\n")do { } while (false); | ||||
| 1553 | return None; | ||||
| 1554 | } | ||||
| 1555 | |||||
| 1556 | // Uses which may read the original MemoryDef mean we cannot eliminate the | ||||
| 1557 | // original MD. Stop walk. | ||||
| 1558 | if (isReadClobber(*CurrentLoc, UseInst)) { | ||||
| 1559 | LLVM_DEBUG(dbgs() << " ... found read clobber\n")do { } while (false); | ||||
| 1560 | return None; | ||||
| 1561 | } | ||||
| 1562 | |||||
| 1563 | // If this worklist walks back to the original memory access (and the | ||||
| 1564 | // pointer is not guarenteed loop invariant) then we cannot assume that a | ||||
| 1565 | // store kills itself. | ||||
| 1566 | if (EarlierAccess == UseAccess && | ||||
| 1567 | !isGuaranteedLoopInvariant(CurrentLoc->Ptr)) { | ||||
| 1568 | LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n")do { } while (false); | ||||
| 1569 | return None; | ||||
| 1570 | } | ||||
| 1571 | // Otherwise, for the KillingDef and EarlierAccess we only have to check | ||||
| 1572 | // if it reads the memory location. | ||||
| 1573 | // TODO: It would probably be better to check for self-reads before | ||||
| 1574 | // calling the function. | ||||
| 1575 | if (KillingDef == UseAccess || EarlierAccess == UseAccess) { | ||||
| 1576 | LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n")do { } while (false); | ||||
| 1577 | continue; | ||||
| 1578 | } | ||||
| 1579 | |||||
| 1580 | // Check all uses for MemoryDefs, except for defs completely overwriting | ||||
| 1581 | // the original location. Otherwise we have to check uses of *all* | ||||
| 1582 | // MemoryDefs we discover, including non-aliasing ones. Otherwise we might | ||||
| 1583 | // miss cases like the following | ||||
| 1584 | // 1 = Def(LoE) ; <----- EarlierDef stores [0,1] | ||||
| 1585 | // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3] | ||||
| 1586 | // Use(2) ; MayAlias 2 *and* 1, loads [0, 3]. | ||||
| 1587 | // (The Use points to the *first* Def it may alias) | ||||
| 1588 | // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias, | ||||
| 1589 | // stores [0,1] | ||||
| 1590 | if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) { | ||||
| 1591 | if (isCompleteOverwrite(*CurrentLoc, EarlierMemInst, UseInst)) { | ||||
| 1592 | BasicBlock *MaybeKillingBlock = UseInst->getParent(); | ||||
| 1593 | if (PostOrderNumbers.find(MaybeKillingBlock)->second < | ||||
| 1594 | PostOrderNumbers.find(EarlierAccess->getBlock())->second) { | ||||
| 1595 | if (!isInvisibleToCallerAfterRet(DefUO)) { | ||||
| 1596 | LLVM_DEBUG(dbgs()do { } while (false) | ||||
| 1597 | << " ... found killing def " << *UseInst << "\n")do { } while (false); | ||||
| 1598 | KillingDefs.insert(UseInst); | ||||
| 1599 | } | ||||
| 1600 | } else { | ||||
| 1601 | LLVM_DEBUG(dbgs()do { } while (false) | ||||
| 1602 | << " ... found preceeding def " << *UseInst << "\n")do { } while (false); | ||||
| 1603 | return None; | ||||
| 1604 | } | ||||
| 1605 | } else | ||||
| 1606 | PushMemUses(UseDef); | ||||
| 1607 | } | ||||
| 1608 | } | ||||
| 1609 | |||||
| 1610 | // For accesses to locations visible after the function returns, make sure | ||||
| 1611 | // that the location is killed (=overwritten) along all paths from | ||||
| 1612 | // EarlierAccess to the exit. | ||||
| 1613 | if (!isInvisibleToCallerAfterRet(DefUO)) { | ||||
| 1614 | SmallPtrSet<BasicBlock *, 16> KillingBlocks; | ||||
| 1615 | for (Instruction *KD : KillingDefs) | ||||
| 1616 | KillingBlocks.insert(KD->getParent()); | ||||
| 1617 | assert(!KillingBlocks.empty() &&((void)0) | ||||
| 1618 | "Expected at least a single killing block")((void)0); | ||||
| 1619 | |||||
| 1620 | // Find the common post-dominator of all killing blocks. | ||||
| 1621 | BasicBlock *CommonPred = *KillingBlocks.begin(); | ||||
| 1622 | for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end(); | ||||
| 1623 | I != E; I++) { | ||||
| 1624 | if (!CommonPred) | ||||
| 1625 | break; | ||||
| 1626 | CommonPred = PDT.findNearestCommonDominator(CommonPred, *I); | ||||
| 1627 | } | ||||
| 1628 | |||||
| 1629 | // If CommonPred is in the set of killing blocks, just check if it | ||||
| 1630 | // post-dominates EarlierAccess. | ||||
| 1631 | if (KillingBlocks.count(CommonPred)) { | ||||
| 1632 | if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) | ||||
| 1633 | return {EarlierAccess}; | ||||
| 1634 | return None; | ||||
| 1635 | } | ||||
| 1636 | |||||
| 1637 | // If the common post-dominator does not post-dominate EarlierAccess, | ||||
| 1638 | // there is a path from EarlierAccess to an exit not going through a | ||||
| 1639 | // killing block. | ||||
| 1640 | if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) { | ||||
| 1641 | SetVector<BasicBlock *> WorkList; | ||||
| 1642 | |||||
| 1643 | // If CommonPred is null, there are multiple exits from the function. | ||||
| 1644 | // They all have to be added to the worklist. | ||||
| 1645 | if (CommonPred) | ||||
| 1646 | WorkList.insert(CommonPred); | ||||
| 1647 | else | ||||
| 1648 | for (BasicBlock *R : PDT.roots()) | ||||
| 1649 | WorkList.insert(R); | ||||
| 1650 | |||||
| 1651 | NumCFGTries++; | ||||
| 1652 | // Check if all paths starting from an exit node go through one of the | ||||
| 1653 | // killing blocks before reaching EarlierAccess. | ||||
| 1654 | for (unsigned I = 0; I < WorkList.size(); I++) { | ||||
| 1655 | NumCFGChecks++; | ||||
| 1656 | BasicBlock *Current = WorkList[I]; | ||||
| 1657 | if (KillingBlocks.count(Current)) | ||||
| 1658 | continue; | ||||
| 1659 | if (Current == EarlierAccess->getBlock()) | ||||
| 1660 | return None; | ||||
| 1661 | |||||
| 1662 | // EarlierAccess is reachable from the entry, so we don't have to | ||||
| 1663 | // explore unreachable blocks further. | ||||
| 1664 | if (!DT.isReachableFromEntry(Current)) | ||||
| 1665 | continue; | ||||
| 1666 | |||||
| 1667 | for (BasicBlock *Pred : predecessors(Current)) | ||||
| 1668 | WorkList.insert(Pred); | ||||
| 1669 | |||||
| 1670 | if (WorkList.size() >= MemorySSAPathCheckLimit) | ||||
| 1671 | return None; | ||||
| 1672 | } | ||||
| 1673 | NumCFGSuccess++; | ||||
| 1674 | return {EarlierAccess}; | ||||
| 1675 | } | ||||
| 1676 | return None; | ||||
| 1677 | } | ||||
| 1678 | |||||
| 1679 | // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is | ||||
| 1680 | // potentially dead. | ||||
| 1681 | return {EarlierAccess}; | ||||
| 1682 | } | ||||
| 1683 | |||||
| 1684 | // Delete dead memory defs | ||||
| 1685 | void deleteDeadInstruction(Instruction *SI) { | ||||
| 1686 | MemorySSAUpdater Updater(&MSSA); | ||||
| 1687 | SmallVector<Instruction *, 32> NowDeadInsts; | ||||
| 1688 | NowDeadInsts.push_back(SI); | ||||
| 1689 | --NumFastOther; | ||||
| 1690 | |||||
| 1691 | while (!NowDeadInsts.empty()) { | ||||
| 1692 | Instruction *DeadInst = NowDeadInsts.pop_back_val(); | ||||
| 1693 | ++NumFastOther; | ||||
| 1694 | |||||
| 1695 | // Try to preserve debug information attached to the dead instruction. | ||||
| 1696 | salvageDebugInfo(*DeadInst); | ||||
| 1697 | salvageKnowledge(DeadInst); | ||||
| 1698 | |||||
| 1699 | // Remove the Instruction from MSSA. | ||||
| 1700 | if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) { | ||||
| 1701 | if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) { | ||||
| 1702 | SkipStores.insert(MD); | ||||
| 1703 | } | ||||
| 1704 | Updater.removeMemoryAccess(MA); | ||||
| 1705 | } | ||||
| 1706 | |||||
| 1707 | auto I = IOLs.find(DeadInst->getParent()); | ||||
| 1708 | if (I != IOLs.end()) | ||||
| 1709 | I->second.erase(DeadInst); | ||||
| 1710 | // Remove its operands | ||||
| 1711 | for (Use &O : DeadInst->operands()) | ||||
| 1712 | if (Instruction *OpI = dyn_cast<Instruction>(O)) { | ||||
| 1713 | O = nullptr; | ||||
| 1714 | if (isInstructionTriviallyDead(OpI, &TLI)) | ||||
| 1715 | NowDeadInsts.push_back(OpI); | ||||
| 1716 | } | ||||
| 1717 | |||||
| 1718 | DeadInst->eraseFromParent(); | ||||
| 1719 | } | ||||
| 1720 | } | ||||
| 1721 | |||||
| 1722 | // Check for any extra throws between SI and NI that block DSE. This only | ||||
| 1723 | // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may | ||||
| 1724 | // throw are handled during the walk from one def to the next. | ||||
| 1725 | bool mayThrowBetween(Instruction *SI, Instruction *NI, | ||||
| 1726 | const Value *SILocUnd) { | ||||
| 1727 | // First see if we can ignore it by using the fact that SI is an | ||||
| 1728 | // alloca/alloca like object that is not visible to the caller during | ||||
| 1729 | // execution of the function. | ||||
| 1730 | if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd)) | ||||
| 1731 | return false; | ||||
| 1732 | |||||
| 1733 | if (SI->getParent() == NI->getParent()) | ||||
| 1734 | return ThrowingBlocks.count(SI->getParent()); | ||||
| 1735 | return !ThrowingBlocks.empty(); | ||||
| 1736 | } | ||||
| 1737 | |||||
| 1738 | // Check if \p NI acts as a DSE barrier for \p SI. The following instructions | ||||
| 1739 | // act as barriers: | ||||
| 1740 | // * A memory instruction that may throw and \p SI accesses a non-stack | ||||
| 1741 | // object. | ||||
| 1742 | // * Atomic stores stronger that monotonic. | ||||
| 1743 | bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) { | ||||
| 1744 | // If NI may throw it acts as a barrier, unless we are to an alloca/alloca | ||||
| 1745 | // like object that does not escape. | ||||
| 1746 | if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd)) | ||||
| 1747 | return true; | ||||
| 1748 | |||||
| 1749 | // If NI is an atomic load/store stronger than monotonic, do not try to | ||||
| 1750 | // eliminate/reorder it. | ||||
| 1751 | if (NI->isAtomic()) { | ||||
| 1752 | if (auto *LI = dyn_cast<LoadInst>(NI)) | ||||
| 1753 | return isStrongerThanMonotonic(LI->getOrdering()); | ||||
| 1754 | if (auto *SI = dyn_cast<StoreInst>(NI)) | ||||
| 1755 | return isStrongerThanMonotonic(SI->getOrdering()); | ||||
| 1756 | if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI)) | ||||
| 1757 | return isStrongerThanMonotonic(ARMW->getOrdering()); | ||||
| 1758 | if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI)) | ||||
| 1759 | return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) || | ||||
| 1760 | isStrongerThanMonotonic(CmpXchg->getFailureOrdering()); | ||||
| 1761 | llvm_unreachable("other instructions should be skipped in MemorySSA")__builtin_unreachable(); | ||||
| 1762 | } | ||||
| 1763 | return false; | ||||
| 1764 | } | ||||
| 1765 | |||||
| 1766 | /// Eliminate writes to objects that are not visible in the caller and are not | ||||
| 1767 | /// accessed before returning from the function. | ||||
| 1768 | bool eliminateDeadWritesAtEndOfFunction() { | ||||
| 1769 | bool MadeChange = false; | ||||
| 1770 | LLVM_DEBUG(do { } while (false) | ||||
| 1771 | dbgs()do { } while (false) | ||||
| 1772 | << "Trying to eliminate MemoryDefs at the end of the function\n")do { } while (false); | ||||
| 1773 | for (int I = MemDefs.size() - 1; I >= 0; I--) { | ||||
| 1774 | MemoryDef *Def = MemDefs[I]; | ||||
| 1775 | if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst())) | ||||
| 1776 | continue; | ||||
| 1777 | |||||
| 1778 | Instruction *DefI = Def->getMemoryInst(); | ||||
| 1779 | SmallVector<const Value *, 4> Pointers; | ||||
| 1780 | auto DefLoc = getLocForWriteEx(DefI); | ||||
| 1781 | if (!DefLoc) | ||||
| 1782 | continue; | ||||
| 1783 | |||||
| 1784 | // NOTE: Currently eliminating writes at the end of a function is limited | ||||
| 1785 | // to MemoryDefs with a single underlying object, to save compile-time. In | ||||
| 1786 | // practice it appears the case with multiple underlying objects is very | ||||
| 1787 | // uncommon. If it turns out to be important, we can use | ||||
| 1788 | // getUnderlyingObjects here instead. | ||||
| 1789 | const Value *UO = getUnderlyingObject(DefLoc->Ptr); | ||||
| 1790 | if (!UO || !isInvisibleToCallerAfterRet(UO)) | ||||
| 1791 | continue; | ||||
| 1792 | |||||
| 1793 | if (isWriteAtEndOfFunction(Def)) { | ||||
| 1794 | // See through pointer-to-pointer bitcasts | ||||
| 1795 | LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "do { } while (false) | ||||
| 1796 | "of the function\n")do { } while (false); | ||||
| 1797 | deleteDeadInstruction(DefI); | ||||
| 1798 | ++NumFastStores; | ||||
| 1799 | MadeChange = true; | ||||
| 1800 | } | ||||
| 1801 | } | ||||
| 1802 | return MadeChange; | ||||
| 1803 | } | ||||
| 1804 | |||||
| 1805 | /// \returns true if \p Def is a no-op store, either because it | ||||
| 1806 | /// directly stores back a loaded value or stores zero to a calloced object. | ||||
| 1807 | bool storeIsNoop(MemoryDef *Def, const MemoryLocation &DefLoc, | ||||
| 1808 | const Value *DefUO) { | ||||
| 1809 | StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst()); | ||||
| 1810 | MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst()); | ||||
| 1811 | Constant *StoredConstant = nullptr; | ||||
| 1812 | if (Store) | ||||
| 1813 | StoredConstant = dyn_cast<Constant>(Store->getOperand(0)); | ||||
| 1814 | if (MemSet) | ||||
| 1815 | StoredConstant = dyn_cast<Constant>(MemSet->getValue()); | ||||
| 1816 | |||||
| 1817 | if (StoredConstant && StoredConstant->isNullValue()) { | ||||
| 1818 | auto *DefUOInst = dyn_cast<Instruction>(DefUO); | ||||
| 1819 | if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) { | ||||
| 1820 | auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst)); | ||||
| 1821 | // If UnderlyingDef is the clobbering access of Def, no instructions | ||||
| 1822 | // between them can modify the memory location. | ||||
| 1823 | auto *ClobberDef = | ||||
| 1824 | MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def); | ||||
| 1825 | return UnderlyingDef == ClobberDef; | ||||
| 1826 | } | ||||
| 1827 | } | ||||
| 1828 | |||||
| 1829 | if (!Store) | ||||
| 1830 | return false; | ||||
| 1831 | |||||
| 1832 | if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) { | ||||
| 1833 | if (LoadI->getPointerOperand() == Store->getOperand(1)) { | ||||
| 1834 | // Get the defining access for the load. | ||||
| 1835 | auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess(); | ||||
| 1836 | // Fast path: the defining accesses are the same. | ||||
| 1837 | if (LoadAccess == Def->getDefiningAccess()) | ||||
| 1838 | return true; | ||||
| 1839 | |||||
| 1840 | // Look through phi accesses. Recursively scan all phi accesses by | ||||
| 1841 | // adding them to a worklist. Bail when we run into a memory def that | ||||
| 1842 | // does not match LoadAccess. | ||||
| 1843 | SetVector<MemoryAccess *> ToCheck; | ||||
| 1844 | MemoryAccess *Current = | ||||
| 1845 | MSSA.getWalker()->getClobberingMemoryAccess(Def); | ||||
| 1846 | // We don't want to bail when we run into the store memory def. But, | ||||
| 1847 | // the phi access may point to it. So, pretend like we've already | ||||
| 1848 | // checked it. | ||||
| 1849 | ToCheck.insert(Def); | ||||
| 1850 | ToCheck.insert(Current); | ||||
| 1851 | // Start at current (1) to simulate already having checked Def. | ||||
| 1852 | for (unsigned I = 1; I < ToCheck.size(); ++I) { | ||||
| 1853 | Current = ToCheck[I]; | ||||
| 1854 | if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) { | ||||
| 1855 | // Check all the operands. | ||||
| 1856 | for (auto &Use : PhiAccess->incoming_values()) | ||||
| 1857 | ToCheck.insert(cast<MemoryAccess>(&Use)); | ||||
| 1858 | continue; | ||||
| 1859 | } | ||||
| 1860 | |||||
| 1861 | // If we found a memory def, bail. This happens when we have an | ||||
| 1862 | // unrelated write in between an otherwise noop store. | ||||
| 1863 | assert(isa<MemoryDef>(Current) &&((void)0) | ||||
| 1864 | "Only MemoryDefs should reach here.")((void)0); | ||||
| 1865 | // TODO: Skip no alias MemoryDefs that have no aliasing reads. | ||||
| 1866 | // We are searching for the definition of the store's destination. | ||||
| 1867 | // So, if that is the same definition as the load, then this is a | ||||
| 1868 | // noop. Otherwise, fail. | ||||
| 1869 | if (LoadAccess != Current) | ||||
| 1870 | return false; | ||||
| 1871 | } | ||||
| 1872 | return true; | ||||
| 1873 | } | ||||
| 1874 | } | ||||
| 1875 | |||||
| 1876 | return false; | ||||
| 1877 | } | ||||
| 1878 | }; | ||||
| 1879 | |||||
| 1880 | static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, | ||||
| 1881 | DominatorTree &DT, PostDominatorTree &PDT, | ||||
| 1882 | const TargetLibraryInfo &TLI, | ||||
| 1883 | const LoopInfo &LI) { | ||||
| 1884 | bool MadeChange = false; | ||||
| 1885 | |||||
| 1886 | DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI, LI); | ||||
| 1887 | // For each store: | ||||
| 1888 | for (unsigned I = 0; I < State.MemDefs.size(); I++) { | ||||
| 1889 | MemoryDef *KillingDef = State.MemDefs[I]; | ||||
| 1890 | if (State.SkipStores.count(KillingDef)) | ||||
| 1891 | continue; | ||||
| 1892 | Instruction *SI = KillingDef->getMemoryInst(); | ||||
| 1893 | |||||
| 1894 | Optional<MemoryLocation> MaybeSILoc; | ||||
| 1895 | if (State.isMemTerminatorInst(SI)) | ||||
| 1896 | MaybeSILoc = State.getLocForTerminator(SI).map( | ||||
| 1897 | [](const std::pair<MemoryLocation, bool> &P) { return P.first; }); | ||||
| 1898 | else | ||||
| 1899 | MaybeSILoc = State.getLocForWriteEx(SI); | ||||
| 1900 | |||||
| 1901 | if (!MaybeSILoc) { | ||||
| 1902 | LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "do { } while (false) | ||||
| 1903 | << *SI << "\n")do { } while (false); | ||||
| 1904 | continue; | ||||
| 1905 | } | ||||
| 1906 | MemoryLocation SILoc = *MaybeSILoc; | ||||
| 1907 | assert(SILoc.Ptr && "SILoc should not be null")((void)0); | ||||
| 1908 | const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr); | ||||
| 1909 | |||||
| 1910 | MemoryAccess *Current = KillingDef; | ||||
| 1911 | LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "do { } while (false) | ||||
| 1912 | << *Current << " (" << *SI << ")\n")do { } while (false); | ||||
| 1913 | |||||
| 1914 | unsigned ScanLimit = MemorySSAScanLimit; | ||||
| 1915 | unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit; | ||||
| 1916 | unsigned PartialLimit = MemorySSAPartialStoreLimit; | ||||
| 1917 | // Worklist of MemoryAccesses that may be killed by KillingDef. | ||||
| 1918 | SetVector<MemoryAccess *> ToCheck; | ||||
| 1919 | |||||
| 1920 | if (SILocUnd) | ||||
| 1921 | ToCheck.insert(KillingDef->getDefiningAccess()); | ||||
| 1922 | |||||
| 1923 | bool Shortend = false; | ||||
| 1924 | bool IsMemTerm = State.isMemTerminatorInst(SI); | ||||
| 1925 | // Check if MemoryAccesses in the worklist are killed by KillingDef. | ||||
| 1926 | for (unsigned I = 0; I < ToCheck.size(); I++) { | ||||
| 1927 | Current = ToCheck[I]; | ||||
| 1928 | if (State.SkipStores.count(Current)) | ||||
| 1929 | continue; | ||||
| 1930 | |||||
| 1931 | Optional<MemoryAccess *> Next = State.getDomMemoryDef( | ||||
| 1932 | KillingDef, Current, SILoc, SILocUnd, ScanLimit, WalkerStepLimit, | ||||
| 1933 | IsMemTerm, PartialLimit); | ||||
| 1934 | |||||
| 1935 | if (!Next) { | ||||
| 1936 | LLVM_DEBUG(dbgs() << " finished walk\n")do { } while (false); | ||||
| 1937 | continue; | ||||
| 1938 | } | ||||
| 1939 | |||||
| 1940 | MemoryAccess *EarlierAccess = *Next; | ||||
| 1941 | LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess)do { } while (false); | ||||
| 1942 | if (isa<MemoryPhi>(EarlierAccess)) { | ||||
| 1943 | LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n")do { } while (false); | ||||
| 1944 | for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) { | ||||
| 1945 | MemoryAccess *IncomingAccess = cast<MemoryAccess>(V); | ||||
| 1946 | BasicBlock *IncomingBlock = IncomingAccess->getBlock(); | ||||
| 1947 | BasicBlock *PhiBlock = EarlierAccess->getBlock(); | ||||
| 1948 | |||||
| 1949 | // We only consider incoming MemoryAccesses that come before the | ||||
| 1950 | // MemoryPhi. Otherwise we could discover candidates that do not | ||||
| 1951 | // strictly dominate our starting def. | ||||
| 1952 | if (State.PostOrderNumbers[IncomingBlock] > | ||||
| 1953 | State.PostOrderNumbers[PhiBlock]) | ||||
| 1954 | ToCheck.insert(IncomingAccess); | ||||
| 1955 | } | ||||
| 1956 | continue; | ||||
| 1957 | } | ||||
| 1958 | auto *NextDef = cast<MemoryDef>(EarlierAccess); | ||||
| 1959 | Instruction *NI = NextDef->getMemoryInst(); | ||||
| 1960 | LLVM_DEBUG(dbgs() << " (" << *NI << ")\n")do { } while (false); | ||||
| 1961 | ToCheck.insert(NextDef->getDefiningAccess()); | ||||
| 1962 | NumGetDomMemoryDefPassed++; | ||||
| 1963 | |||||
| 1964 | if (!DebugCounter::shouldExecute(MemorySSACounter)) | ||||
| 1965 | continue; | ||||
| 1966 | |||||
| 1967 | MemoryLocation NILoc = *State.getLocForWriteEx(NI); | ||||
| 1968 | |||||
| 1969 | if (IsMemTerm) { | ||||
| 1970 | const Value *NIUnd = getUnderlyingObject(NILoc.Ptr); | ||||
| 1971 | if (SILocUnd != NIUnd) | ||||
| 1972 | continue; | ||||
| 1973 | LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { } while (false) | ||||
| 1974 | << "\n KILLER: " << *SI << '\n')do { } while (false); | ||||
| 1975 | State.deleteDeadInstruction(NI); | ||||
| 1976 | ++NumFastStores; | ||||
| 1977 | MadeChange = true; | ||||
| 1978 | } else { | ||||
| 1979 | // Check if NI overwrites SI. | ||||
| 1980 | int64_t InstWriteOffset, DepWriteOffset; | ||||
| 1981 | OverwriteResult OR = State.isOverwrite(SI, NI, SILoc, NILoc, | ||||
| 1982 | DepWriteOffset, InstWriteOffset); | ||||
| 1983 | if (OR == OW_MaybePartial) { | ||||
| 1984 | auto Iter = State.IOLs.insert( | ||||
| 1985 | std::make_pair<BasicBlock *, InstOverlapIntervalsTy>( | ||||
| 1986 | NI->getParent(), InstOverlapIntervalsTy())); | ||||
| 1987 | auto &IOL = Iter.first->second; | ||||
| 1988 | OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset, | ||||
| 1989 | NI, IOL); | ||||
| 1990 | } | ||||
| 1991 | |||||
| 1992 | if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) { | ||||
| 1993 | auto *Earlier = dyn_cast<StoreInst>(NI); | ||||
| 1994 | auto *Later = dyn_cast<StoreInst>(SI); | ||||
| 1995 | // We are re-using tryToMergePartialOverlappingStores, which requires | ||||
| 1996 | // Earlier to domiante Later. | ||||
| 1997 | // TODO: implement tryToMergeParialOverlappingStores using MemorySSA. | ||||
| 1998 | if (Earlier && Later && DT.dominates(Earlier, Later)) { | ||||
| 1999 | if (Constant *Merged = tryToMergePartialOverlappingStores( | ||||
| 2000 | Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL, | ||||
| 2001 | State.BatchAA, &DT)) { | ||||
| 2002 | |||||
| 2003 | // Update stored value of earlier store to merged constant. | ||||
| 2004 | Earlier->setOperand(0, Merged); | ||||
| 2005 | ++NumModifiedStores; | ||||
| 2006 | MadeChange = true; | ||||
| 2007 | |||||
| 2008 | Shortend = true; | ||||
| 2009 | // Remove later store and remove any outstanding overlap intervals | ||||
| 2010 | // for the updated store. | ||||
| 2011 | State.deleteDeadInstruction(Later); | ||||
| 2012 | auto I = State.IOLs.find(Earlier->getParent()); | ||||
| 2013 | if (I != State.IOLs.end()) | ||||
| 2014 | I->second.erase(Earlier); | ||||
| 2015 | break; | ||||
| 2016 | } | ||||
| 2017 | } | ||||
| 2018 | } | ||||
| 2019 | |||||
| 2020 | if (OR == OW_Complete) { | ||||
| 2021 | LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { } while (false) | ||||
| 2022 | << "\n KILLER: " << *SI << '\n')do { } while (false); | ||||
| 2023 | State.deleteDeadInstruction(NI); | ||||
| 2024 | ++NumFastStores; | ||||
| 2025 | MadeChange = true; | ||||
| 2026 | } | ||||
| 2027 | } | ||||
| 2028 | } | ||||
| 2029 | |||||
| 2030 | // Check if the store is a no-op. | ||||
| 2031 | if (!Shortend && isRemovable(SI) && | ||||
| 2032 | State.storeIsNoop(KillingDef, SILoc, SILocUnd)) { | ||||
| 2033 | LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *SI << '\n')do { } while (false); | ||||
| 2034 | State.deleteDeadInstruction(SI); | ||||
| 2035 | NumRedundantStores++; | ||||
| 2036 | MadeChange = true; | ||||
| 2037 | continue; | ||||
| 2038 | } | ||||
| 2039 | } | ||||
| 2040 | |||||
| 2041 | if (EnablePartialOverwriteTracking) | ||||
| 2042 | for (auto &KV : State.IOLs) | ||||
| 2043 | MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI); | ||||
| 2044 | |||||
| 2045 | MadeChange |= State.eliminateDeadWritesAtEndOfFunction(); | ||||
| 2046 | return MadeChange; | ||||
| 2047 | } | ||||
| 2048 | } // end anonymous namespace | ||||
| 2049 | |||||
| 2050 | //===----------------------------------------------------------------------===// | ||||
| 2051 | // DSE Pass | ||||
| 2052 | //===----------------------------------------------------------------------===// | ||||
| 2053 | PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) { | ||||
| 2054 | AliasAnalysis &AA = AM.getResult<AAManager>(F); | ||||
| 2055 | const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F); | ||||
| 2056 | DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); | ||||
| 2057 | MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); | ||||
| 2058 | PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F); | ||||
| 2059 | LoopInfo &LI = AM.getResult<LoopAnalysis>(F); | ||||
| 2060 | |||||
| 2061 | bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI); | ||||
| |||||
| 2062 | |||||
| 2063 | #ifdef LLVM_ENABLE_STATS0 | ||||
| 2064 | if (AreStatisticsEnabled()) | ||||
| 2065 | for (auto &I : instructions(F)) | ||||
| 2066 | NumRemainingStores += isa<StoreInst>(&I); | ||||
| 2067 | #endif | ||||
| 2068 | |||||
| 2069 | if (!Changed) | ||||
| 2070 | return PreservedAnalyses::all(); | ||||
| 2071 | |||||
| 2072 | PreservedAnalyses PA; | ||||
| 2073 | PA.preserveSet<CFGAnalyses>(); | ||||
| 2074 | PA.preserve<MemorySSAAnalysis>(); | ||||
| 2075 | PA.preserve<LoopAnalysis>(); | ||||
| 2076 | return PA; | ||||
| 2077 | } | ||||
| 2078 | |||||
| 2079 | namespace { | ||||
| 2080 | |||||
| 2081 | /// A legacy pass for the legacy pass manager that wraps \c DSEPass. | ||||
| 2082 | class DSELegacyPass : public FunctionPass { | ||||
| 2083 | public: | ||||
| 2084 | static char ID; // Pass identification, replacement for typeid | ||||
| 2085 | |||||
| 2086 | DSELegacyPass() : FunctionPass(ID) { | ||||
| 2087 | initializeDSELegacyPassPass(*PassRegistry::getPassRegistry()); | ||||
| 2088 | } | ||||
| 2089 | |||||
| 2090 | bool runOnFunction(Function &F) override { | ||||
| 2091 | if (skipFunction(F)) | ||||
| 2092 | return false; | ||||
| 2093 | |||||
| 2094 | AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||
| 2095 | DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||
| 2096 | const TargetLibraryInfo &TLI = | ||||
| 2097 | getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | ||||
| 2098 | MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); | ||||
| 2099 | PostDominatorTree &PDT = | ||||
| 2100 | getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); | ||||
| 2101 | LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | ||||
| 2102 | |||||
| 2103 | bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI); | ||||
| 2104 | |||||
| 2105 | #ifdef LLVM_ENABLE_STATS0 | ||||
| 2106 | if (AreStatisticsEnabled()) | ||||
| 2107 | for (auto &I : instructions(F)) | ||||
| 2108 | NumRemainingStores += isa<StoreInst>(&I); | ||||
| 2109 | #endif | ||||
| 2110 | |||||
| 2111 | return Changed; | ||||
| 2112 | } | ||||
| 2113 | |||||
| 2114 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
| 2115 | AU.setPreservesCFG(); | ||||
| 2116 | AU.addRequired<AAResultsWrapperPass>(); | ||||
| 2117 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||
| 2118 | AU.addPreserved<GlobalsAAWrapperPass>(); | ||||
| 2119 | AU.addRequired<DominatorTreeWrapperPass>(); | ||||
| 2120 | AU.addPreserved<DominatorTreeWrapperPass>(); | ||||
| 2121 | AU.addRequired<PostDominatorTreeWrapperPass>(); | ||||
| 2122 | AU.addRequired<MemorySSAWrapperPass>(); | ||||
| 2123 | AU.addPreserved<PostDominatorTreeWrapperPass>(); | ||||
| 2124 | AU.addPreserved<MemorySSAWrapperPass>(); | ||||
| 2125 | AU.addRequired<LoopInfoWrapperPass>(); | ||||
| 2126 | AU.addPreserved<LoopInfoWrapperPass>(); | ||||
| 2127 | } | ||||
| 2128 | }; | ||||
| 2129 | |||||
| 2130 | } // end anonymous namespace | ||||
| 2131 | |||||
| 2132 | char DSELegacyPass::ID = 0; | ||||
| 2133 | |||||
| 2134 | INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,static void *initializeDSELegacyPassPassOnce(PassRegistry & Registry) { | ||||
| 2135 | false)static void *initializeDSELegacyPassPassOnce(PassRegistry & Registry) { | ||||
| 2136 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||
| 2137 | INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)initializePostDominatorTreeWrapperPassPass(Registry); | ||||
| 2138 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||
| 2139 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); | ||||
| 2140 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||
| 2141 | INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry); | ||||
| 2142 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||
| 2143 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry); | ||||
| 2144 | INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse", &DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <DSELegacyPass>), false, false); Registry.registerPass( *PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag ; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce , std::ref(Registry)); } | ||||
| 2145 | false)PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse", &DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <DSELegacyPass>), false, false); Registry.registerPass( *PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag ; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce , std::ref(Registry)); } | ||||
| 2146 | |||||
| 2147 | FunctionPass *llvm::createDeadStoreEliminationPass() { | ||||
| 2148 | return new DSELegacyPass(); | ||||
| 2149 | } |
| 1 | //===- MemoryLocation.h - Memory location descriptions ----------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | /// \file |
| 9 | /// This file provides utility analysis objects describing memory locations. |
| 10 | /// These are used both by the Alias Analysis infrastructure and more |
| 11 | /// specialized memory analysis layers. |
| 12 | /// |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #ifndef LLVM_ANALYSIS_MEMORYLOCATION_H |
| 16 | #define LLVM_ANALYSIS_MEMORYLOCATION_H |
| 17 | |
| 18 | #include "llvm/ADT/DenseMapInfo.h" |
| 19 | #include "llvm/ADT/Optional.h" |
| 20 | #include "llvm/IR/Metadata.h" |
| 21 | #include "llvm/Support/TypeSize.h" |
| 22 | |
| 23 | namespace llvm { |
| 24 | |
| 25 | class CallBase; |
| 26 | class Instruction; |
| 27 | class LoadInst; |
| 28 | class StoreInst; |
| 29 | class MemTransferInst; |
| 30 | class MemIntrinsic; |
| 31 | class AtomicCmpXchgInst; |
| 32 | class AtomicMemTransferInst; |
| 33 | class AtomicMemIntrinsic; |
| 34 | class AtomicRMWInst; |
| 35 | class AnyMemTransferInst; |
| 36 | class AnyMemIntrinsic; |
| 37 | class TargetLibraryInfo; |
| 38 | class VAArgInst; |
| 39 | |
| 40 | // Represents the size of a MemoryLocation. Logically, it's an |
| 41 | // Optional<uint63_t> that also carries a bit to represent whether the integer |
| 42 | // it contains, N, is 'precise'. Precise, in this context, means that we know |
| 43 | // that the area of storage referenced by the given MemoryLocation must be |
| 44 | // precisely N bytes. An imprecise value is formed as the union of two or more |
| 45 | // precise values, and can conservatively represent all of the values unioned |
| 46 | // into it. Importantly, imprecise values are an *upper-bound* on the size of a |
| 47 | // MemoryLocation. |
| 48 | // |
| 49 | // Concretely, a precise MemoryLocation is (%p, 4) in |
| 50 | // store i32 0, i32* %p |
| 51 | // |
| 52 | // Since we know that %p must be at least 4 bytes large at this point. |
| 53 | // Otherwise, we have UB. An example of an imprecise MemoryLocation is (%p, 4) |
| 54 | // at the memcpy in |
| 55 | // |
| 56 | // %n = select i1 %foo, i64 1, i64 4 |
| 57 | // call void @llvm.memcpy.p0i8.p0i8.i64(i8* %p, i8* %baz, i64 %n, i32 1, |
| 58 | // i1 false) |
| 59 | // |
| 60 | // ...Since we'll copy *up to* 4 bytes into %p, but we can't guarantee that |
| 61 | // we'll ever actually do so. |
| 62 | // |
| 63 | // If asked to represent a pathologically large value, this will degrade to |
| 64 | // None. |
| 65 | class LocationSize { |
| 66 | enum : uint64_t { |
| 67 | BeforeOrAfterPointer = ~uint64_t(0), |
| 68 | AfterPointer = BeforeOrAfterPointer - 1, |
| 69 | MapEmpty = BeforeOrAfterPointer - 2, |
| 70 | MapTombstone = BeforeOrAfterPointer - 3, |
| 71 | ImpreciseBit = uint64_t(1) << 63, |
| 72 | |
| 73 | // The maximum value we can represent without falling back to 'unknown'. |
| 74 | MaxValue = (MapTombstone - 1) & ~ImpreciseBit, |
| 75 | }; |
| 76 | |
| 77 | uint64_t Value; |
| 78 | |
| 79 | // Hack to support implicit construction. This should disappear when the |
| 80 | // public LocationSize ctor goes away. |
| 81 | enum DirectConstruction { Direct }; |
| 82 | |
| 83 | constexpr LocationSize(uint64_t Raw, DirectConstruction): Value(Raw) {} |
| 84 | |
| 85 | static_assert(AfterPointer & ImpreciseBit, |
| 86 | "AfterPointer is imprecise by definition."); |
| 87 | static_assert(BeforeOrAfterPointer & ImpreciseBit, |
| 88 | "BeforeOrAfterPointer is imprecise by definition."); |
| 89 | |
| 90 | public: |
| 91 | // FIXME: Migrate all users to construct via either `precise` or `upperBound`, |
| 92 | // to make it more obvious at the callsite the kind of size that they're |
| 93 | // providing. |
| 94 | // |
| 95 | // Since the overwhelming majority of users of this provide precise values, |
| 96 | // this assumes the provided value is precise. |
| 97 | constexpr LocationSize(uint64_t Raw) |
| 98 | : Value(Raw > MaxValue ? AfterPointer : Raw) {} |
| 99 | |
| 100 | static LocationSize precise(uint64_t Value) { return LocationSize(Value); } |
| 101 | static LocationSize precise(TypeSize Value) { |
| 102 | if (Value.isScalable()) |
| 103 | return afterPointer(); |
| 104 | return precise(Value.getFixedSize()); |
| 105 | } |
| 106 | |
| 107 | static LocationSize upperBound(uint64_t Value) { |
| 108 | // You can't go lower than 0, so give a precise result. |
| 109 | if (LLVM_UNLIKELY(Value == 0)__builtin_expect((bool)(Value == 0), false)) |
| 110 | return precise(0); |
| 111 | if (LLVM_UNLIKELY(Value > MaxValue)__builtin_expect((bool)(Value > MaxValue), false)) |
| 112 | return afterPointer(); |
| 113 | return LocationSize(Value | ImpreciseBit, Direct); |
| 114 | } |
| 115 | static LocationSize upperBound(TypeSize Value) { |
| 116 | if (Value.isScalable()) |
| 117 | return afterPointer(); |
| 118 | return upperBound(Value.getFixedSize()); |
| 119 | } |
| 120 | |
| 121 | /// Any location after the base pointer (but still within the underlying |
| 122 | /// object). |
| 123 | constexpr static LocationSize afterPointer() { |
| 124 | return LocationSize(AfterPointer, Direct); |
| 125 | } |
| 126 | |
| 127 | /// Any location before or after the base pointer (but still within the |
| 128 | /// underlying object). |
| 129 | constexpr static LocationSize beforeOrAfterPointer() { |
| 130 | return LocationSize(BeforeOrAfterPointer, Direct); |
| 131 | } |
| 132 | |
| 133 | // Sentinel values, generally used for maps. |
| 134 | constexpr static LocationSize mapTombstone() { |
| 135 | return LocationSize(MapTombstone, Direct); |
| 136 | } |
| 137 | constexpr static LocationSize mapEmpty() { |
| 138 | return LocationSize(MapEmpty, Direct); |
| 139 | } |
| 140 | |
| 141 | // Returns a LocationSize that can correctly represent either `*this` or |
| 142 | // `Other`. |
| 143 | LocationSize unionWith(LocationSize Other) const { |
| 144 | if (Other == *this) |
| 145 | return *this; |
| 146 | |
| 147 | if (Value == BeforeOrAfterPointer || Other.Value == BeforeOrAfterPointer) |
| 148 | return beforeOrAfterPointer(); |
| 149 | if (Value == AfterPointer || Other.Value == AfterPointer) |
| 150 | return afterPointer(); |
| 151 | |
| 152 | return upperBound(std::max(getValue(), Other.getValue())); |
| 153 | } |
| 154 | |
| 155 | bool hasValue() const { |
| 156 | return Value != AfterPointer && Value != BeforeOrAfterPointer; |
| 157 | } |
| 158 | uint64_t getValue() const { |
| 159 | assert(hasValue() && "Getting value from an unknown LocationSize!")((void)0); |
| 160 | return Value & ~ImpreciseBit; |
| 161 | } |
| 162 | |
| 163 | // Returns whether or not this value is precise. Note that if a value is |
| 164 | // precise, it's guaranteed to not be unknown. |
| 165 | bool isPrecise() const { |
| 166 | return (Value & ImpreciseBit) == 0; |
| 167 | } |
| 168 | |
| 169 | // Convenience method to check if this LocationSize's value is 0. |
| 170 | bool isZero() const { return hasValue() && getValue() == 0; } |
| 171 | |
| 172 | /// Whether accesses before the base pointer are possible. |
| 173 | bool mayBeBeforePointer() const { return Value == BeforeOrAfterPointer; } |
| 174 | |
| 175 | bool operator==(const LocationSize &Other) const { |
| 176 | return Value == Other.Value; |
| 177 | } |
| 178 | |
| 179 | bool operator!=(const LocationSize &Other) const { |
| 180 | return !(*this == Other); |
| 181 | } |
| 182 | |
| 183 | // Ordering operators are not provided, since it's unclear if there's only one |
| 184 | // reasonable way to compare: |
| 185 | // - values that don't exist against values that do, and |
| 186 | // - precise values to imprecise values |
| 187 | |
| 188 | void print(raw_ostream &OS) const; |
| 189 | |
| 190 | // Returns an opaque value that represents this LocationSize. Cannot be |
| 191 | // reliably converted back into a LocationSize. |
| 192 | uint64_t toRaw() const { return Value; } |
| 193 | }; |
| 194 | |
| 195 | inline raw_ostream &operator<<(raw_ostream &OS, LocationSize Size) { |
| 196 | Size.print(OS); |
| 197 | return OS; |
| 198 | } |
| 199 | |
| 200 | /// Representation for a specific memory location. |
| 201 | /// |
| 202 | /// This abstraction can be used to represent a specific location in memory. |
| 203 | /// The goal of the location is to represent enough information to describe |
| 204 | /// abstract aliasing, modification, and reference behaviors of whatever |
| 205 | /// value(s) are stored in memory at the particular location. |
| 206 | /// |
| 207 | /// The primary user of this interface is LLVM's Alias Analysis, but other |
| 208 | /// memory analyses such as MemoryDependence can use it as well. |
| 209 | class MemoryLocation { |
| 210 | public: |
| 211 | /// UnknownSize - This is a special value which can be used with the |
| 212 | /// size arguments in alias queries to indicate that the caller does not |
| 213 | /// know the sizes of the potential memory references. |
| 214 | enum : uint64_t { UnknownSize = ~UINT64_C(0)0ULL }; |
| 215 | |
| 216 | /// The address of the start of the location. |
| 217 | const Value *Ptr; |
| 218 | |
| 219 | /// The maximum size of the location, in address-units, or |
| 220 | /// UnknownSize if the size is not known. |
| 221 | /// |
| 222 | /// Note that an unknown size does not mean the pointer aliases the entire |
| 223 | /// virtual address space, because there are restrictions on stepping out of |
| 224 | /// one object and into another. See |
| 225 | /// http://llvm.org/docs/LangRef.html#pointeraliasing |
| 226 | LocationSize Size; |
| 227 | |
| 228 | /// The metadata nodes which describes the aliasing of the location (each |
| 229 | /// member is null if that kind of information is unavailable). |
| 230 | AAMDNodes AATags; |
| 231 | |
| 232 | void print(raw_ostream &OS) const { OS << *Ptr << " " << Size << "\n"; } |
| 233 | |
| 234 | /// Return a location with information about the memory reference by the given |
| 235 | /// instruction. |
| 236 | static MemoryLocation get(const LoadInst *LI); |
| 237 | static MemoryLocation get(const StoreInst *SI); |
| 238 | static MemoryLocation get(const VAArgInst *VI); |
| 239 | static MemoryLocation get(const AtomicCmpXchgInst *CXI); |
| 240 | static MemoryLocation get(const AtomicRMWInst *RMWI); |
| 241 | static MemoryLocation get(const Instruction *Inst) { |
| 242 | return *MemoryLocation::getOrNone(Inst); |
| 243 | } |
| 244 | static Optional<MemoryLocation> getOrNone(const Instruction *Inst); |
| 245 | |
| 246 | /// Return a location representing the source of a memory transfer. |
| 247 | static MemoryLocation getForSource(const MemTransferInst *MTI); |
| 248 | static MemoryLocation getForSource(const AtomicMemTransferInst *MTI); |
| 249 | static MemoryLocation getForSource(const AnyMemTransferInst *MTI); |
| 250 | |
| 251 | /// Return a location representing the destination of a memory set or |
| 252 | /// transfer. |
| 253 | static MemoryLocation getForDest(const MemIntrinsic *MI); |
| 254 | static MemoryLocation getForDest(const AtomicMemIntrinsic *MI); |
| 255 | static MemoryLocation getForDest(const AnyMemIntrinsic *MI); |
| 256 | |
| 257 | /// Return a location representing a particular argument of a call. |
| 258 | static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, |
| 259 | const TargetLibraryInfo *TLI); |
| 260 | static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, |
| 261 | const TargetLibraryInfo &TLI) { |
| 262 | return getForArgument(Call, ArgIdx, &TLI); |
| 263 | } |
| 264 | |
| 265 | /// Return a location that may access any location after Ptr, while remaining |
| 266 | /// within the underlying object. |
| 267 | static MemoryLocation getAfter(const Value *Ptr, |
| 268 | const AAMDNodes &AATags = AAMDNodes()) { |
| 269 | return MemoryLocation(Ptr, LocationSize::afterPointer(), AATags); |
| 270 | } |
| 271 | |
| 272 | /// Return a location that may access any location before or after Ptr, while |
| 273 | /// remaining within the underlying object. |
| 274 | static MemoryLocation |
| 275 | getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags = AAMDNodes()) { |
| 276 | return MemoryLocation(Ptr, LocationSize::beforeOrAfterPointer(), AATags); |
| 277 | } |
| 278 | |
| 279 | // Return the exact size if the exact size is known at compiletime, |
| 280 | // otherwise return MemoryLocation::UnknownSize. |
| 281 | static uint64_t getSizeOrUnknown(const TypeSize &T) { |
| 282 | return T.isScalable() ? UnknownSize : T.getFixedSize(); |
| 283 | } |
| 284 | |
| 285 | MemoryLocation() |
| 286 | : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()), AATags() {} |
| 287 | |
| 288 | explicit MemoryLocation(const Value *Ptr, LocationSize Size, |
| 289 | const AAMDNodes &AATags = AAMDNodes()) |
| 290 | : Ptr(Ptr), Size(Size), AATags(AATags) {} |
| 291 | |
| 292 | MemoryLocation getWithNewPtr(const Value *NewPtr) const { |
| 293 | MemoryLocation Copy(*this); |
| 294 | Copy.Ptr = NewPtr; |
| 295 | return Copy; |
| 296 | } |
| 297 | |
| 298 | MemoryLocation getWithNewSize(LocationSize NewSize) const { |
| 299 | MemoryLocation Copy(*this); |
| 300 | Copy.Size = NewSize; |
| 301 | return Copy; |
| 302 | } |
| 303 | |
| 304 | MemoryLocation getWithoutAATags() const { |
| 305 | MemoryLocation Copy(*this); |
| 306 | Copy.AATags = AAMDNodes(); |
| 307 | return Copy; |
| 308 | } |
| 309 | |
| 310 | bool operator==(const MemoryLocation &Other) const { |
| 311 | return Ptr == Other.Ptr && Size == Other.Size && AATags == Other.AATags; |
| 312 | } |
| 313 | }; |
| 314 | |
| 315 | // Specialize DenseMapInfo. |
| 316 | template <> struct DenseMapInfo<LocationSize> { |
| 317 | static inline LocationSize getEmptyKey() { |
| 318 | return LocationSize::mapEmpty(); |
| 319 | } |
| 320 | static inline LocationSize getTombstoneKey() { |
| 321 | return LocationSize::mapTombstone(); |
| 322 | } |
| 323 | static unsigned getHashValue(const LocationSize &Val) { |
| 324 | return DenseMapInfo<uint64_t>::getHashValue(Val.toRaw()); |
| 325 | } |
| 326 | static bool isEqual(const LocationSize &LHS, const LocationSize &RHS) { |
| 327 | return LHS == RHS; |
| 328 | } |
| 329 | }; |
| 330 | |
| 331 | template <> struct DenseMapInfo<MemoryLocation> { |
| 332 | static inline MemoryLocation getEmptyKey() { |
| 333 | return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(), |
| 334 | DenseMapInfo<LocationSize>::getEmptyKey()); |
| 335 | } |
| 336 | static inline MemoryLocation getTombstoneKey() { |
| 337 | return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(), |
| 338 | DenseMapInfo<LocationSize>::getTombstoneKey()); |
| 339 | } |
| 340 | static unsigned getHashValue(const MemoryLocation &Val) { |
| 341 | return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^ |
| 342 | DenseMapInfo<LocationSize>::getHashValue(Val.Size) ^ |
| 343 | DenseMapInfo<AAMDNodes>::getHashValue(Val.AATags); |
| 344 | } |
| 345 | static bool isEqual(const MemoryLocation &LHS, const MemoryLocation &RHS) { |
| 346 | return LHS == RHS; |
| 347 | } |
| 348 | }; |
| 349 | } |
| 350 | |
| 351 | #endif |