| File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp |
| Warning: | line 1427, column 26 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// | ||||||||
| 2 | // | ||||||||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
| 6 | // | ||||||||
| 7 | //===----------------------------------------------------------------------===// | ||||||||
| 8 | // | ||||||||
| 9 | // This pass performs various transformations related to eliminating memcpy | ||||||||
| 10 | // calls, or transforming sets of stores into memset's. | ||||||||
| 11 | // | ||||||||
| 12 | //===----------------------------------------------------------------------===// | ||||||||
| 13 | |||||||||
| 14 | #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" | ||||||||
| 15 | #include "llvm/ADT/DenseSet.h" | ||||||||
| 16 | #include "llvm/ADT/None.h" | ||||||||
| 17 | #include "llvm/ADT/STLExtras.h" | ||||||||
| 18 | #include "llvm/ADT/SmallVector.h" | ||||||||
| 19 | #include "llvm/ADT/Statistic.h" | ||||||||
| 20 | #include "llvm/ADT/iterator_range.h" | ||||||||
| 21 | #include "llvm/Analysis/AliasAnalysis.h" | ||||||||
| 22 | #include "llvm/Analysis/AssumptionCache.h" | ||||||||
| 23 | #include "llvm/Analysis/GlobalsModRef.h" | ||||||||
| 24 | #include "llvm/Analysis/Loads.h" | ||||||||
| 25 | #include "llvm/Analysis/MemoryDependenceAnalysis.h" | ||||||||
| 26 | #include "llvm/Analysis/MemoryLocation.h" | ||||||||
| 27 | #include "llvm/Analysis/MemorySSA.h" | ||||||||
| 28 | #include "llvm/Analysis/MemorySSAUpdater.h" | ||||||||
| 29 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||||||
| 30 | #include "llvm/Analysis/ValueTracking.h" | ||||||||
| 31 | #include "llvm/IR/Argument.h" | ||||||||
| 32 | #include "llvm/IR/BasicBlock.h" | ||||||||
| 33 | #include "llvm/IR/Constants.h" | ||||||||
| 34 | #include "llvm/IR/DataLayout.h" | ||||||||
| 35 | #include "llvm/IR/DerivedTypes.h" | ||||||||
| 36 | #include "llvm/IR/Dominators.h" | ||||||||
| 37 | #include "llvm/IR/Function.h" | ||||||||
| 38 | #include "llvm/IR/GetElementPtrTypeIterator.h" | ||||||||
| 39 | #include "llvm/IR/GlobalVariable.h" | ||||||||
| 40 | #include "llvm/IR/IRBuilder.h" | ||||||||
| 41 | #include "llvm/IR/InstrTypes.h" | ||||||||
| 42 | #include "llvm/IR/Instruction.h" | ||||||||
| 43 | #include "llvm/IR/Instructions.h" | ||||||||
| 44 | #include "llvm/IR/IntrinsicInst.h" | ||||||||
| 45 | #include "llvm/IR/Intrinsics.h" | ||||||||
| 46 | #include "llvm/IR/LLVMContext.h" | ||||||||
| 47 | #include "llvm/IR/Module.h" | ||||||||
| 48 | #include "llvm/IR/Operator.h" | ||||||||
| 49 | #include "llvm/IR/PassManager.h" | ||||||||
| 50 | #include "llvm/IR/Type.h" | ||||||||
| 51 | #include "llvm/IR/User.h" | ||||||||
| 52 | #include "llvm/IR/Value.h" | ||||||||
| 53 | #include "llvm/InitializePasses.h" | ||||||||
| 54 | #include "llvm/Pass.h" | ||||||||
| 55 | #include "llvm/Support/Casting.h" | ||||||||
| 56 | #include "llvm/Support/Debug.h" | ||||||||
| 57 | #include "llvm/Support/MathExtras.h" | ||||||||
| 58 | #include "llvm/Support/raw_ostream.h" | ||||||||
| 59 | #include "llvm/Transforms/Scalar.h" | ||||||||
| 60 | #include "llvm/Transforms/Utils/Local.h" | ||||||||
| 61 | #include <algorithm> | ||||||||
| 62 | #include <cassert> | ||||||||
| 63 | #include <cstdint> | ||||||||
| 64 | #include <utility> | ||||||||
| 65 | |||||||||
| 66 | using namespace llvm; | ||||||||
| 67 | |||||||||
| 68 | #define DEBUG_TYPE"memcpyopt" "memcpyopt" | ||||||||
| 69 | |||||||||
| 70 | static cl::opt<bool> | ||||||||
| 71 | EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(true), cl::Hidden, | ||||||||
| 72 | cl::desc("Use MemorySSA-backed MemCpyOpt.")); | ||||||||
| 73 | |||||||||
| 74 | STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted")static llvm::Statistic NumMemCpyInstr = {"memcpyopt", "NumMemCpyInstr" , "Number of memcpy instructions deleted"}; | ||||||||
| 75 | STATISTIC(NumMemSetInfer, "Number of memsets inferred")static llvm::Statistic NumMemSetInfer = {"memcpyopt", "NumMemSetInfer" , "Number of memsets inferred"}; | ||||||||
| 76 | STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy")static llvm::Statistic NumMoveToCpy = {"memcpyopt", "NumMoveToCpy" , "Number of memmoves converted to memcpy"}; | ||||||||
| 77 | STATISTIC(NumCpyToSet, "Number of memcpys converted to memset")static llvm::Statistic NumCpyToSet = {"memcpyopt", "NumCpyToSet" , "Number of memcpys converted to memset"}; | ||||||||
| 78 | STATISTIC(NumCallSlot, "Number of call slot optimizations performed")static llvm::Statistic NumCallSlot = {"memcpyopt", "NumCallSlot" , "Number of call slot optimizations performed"}; | ||||||||
| 79 | |||||||||
| 80 | namespace { | ||||||||
| 81 | |||||||||
| 82 | /// Represents a range of memset'd bytes with the ByteVal value. | ||||||||
| 83 | /// This allows us to analyze stores like: | ||||||||
| 84 | /// store 0 -> P+1 | ||||||||
| 85 | /// store 0 -> P+0 | ||||||||
| 86 | /// store 0 -> P+3 | ||||||||
| 87 | /// store 0 -> P+2 | ||||||||
| 88 | /// which sometimes happens with stores to arrays of structs etc. When we see | ||||||||
| 89 | /// the first store, we make a range [1, 2). The second store extends the range | ||||||||
| 90 | /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the | ||||||||
| 91 | /// two ranges into [0, 3) which is memset'able. | ||||||||
| 92 | struct MemsetRange { | ||||||||
| 93 | // Start/End - A semi range that describes the span that this range covers. | ||||||||
| 94 | // The range is closed at the start and open at the end: [Start, End). | ||||||||
| 95 | int64_t Start, End; | ||||||||
| 96 | |||||||||
| 97 | /// StartPtr - The getelementptr instruction that points to the start of the | ||||||||
| 98 | /// range. | ||||||||
| 99 | Value *StartPtr; | ||||||||
| 100 | |||||||||
| 101 | /// Alignment - The known alignment of the first store. | ||||||||
| 102 | unsigned Alignment; | ||||||||
| 103 | |||||||||
| 104 | /// TheStores - The actual stores that make up this range. | ||||||||
| 105 | SmallVector<Instruction*, 16> TheStores; | ||||||||
| 106 | |||||||||
| 107 | bool isProfitableToUseMemset(const DataLayout &DL) const; | ||||||||
| 108 | }; | ||||||||
| 109 | |||||||||
| 110 | } // end anonymous namespace | ||||||||
| 111 | |||||||||
| 112 | bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { | ||||||||
| 113 | // If we found more than 4 stores to merge or 16 bytes, use memset. | ||||||||
| 114 | if (TheStores.size() >= 4 || End-Start >= 16) return true; | ||||||||
| 115 | |||||||||
| 116 | // If there is nothing to merge, don't do anything. | ||||||||
| 117 | if (TheStores.size() < 2) return false; | ||||||||
| 118 | |||||||||
| 119 | // If any of the stores are a memset, then it is always good to extend the | ||||||||
| 120 | // memset. | ||||||||
| 121 | for (Instruction *SI : TheStores) | ||||||||
| 122 | if (!isa<StoreInst>(SI)) | ||||||||
| 123 | return true; | ||||||||
| 124 | |||||||||
| 125 | // Assume that the code generator is capable of merging pairs of stores | ||||||||
| 126 | // together if it wants to. | ||||||||
| 127 | if (TheStores.size() == 2) return false; | ||||||||
| 128 | |||||||||
| 129 | // If we have fewer than 8 stores, it can still be worthwhile to do this. | ||||||||
| 130 | // For example, merging 4 i8 stores into an i32 store is useful almost always. | ||||||||
| 131 | // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the | ||||||||
| 132 | // memset will be split into 2 32-bit stores anyway) and doing so can | ||||||||
| 133 | // pessimize the llvm optimizer. | ||||||||
| 134 | // | ||||||||
| 135 | // Since we don't have perfect knowledge here, make some assumptions: assume | ||||||||
| 136 | // the maximum GPR width is the same size as the largest legal integer | ||||||||
| 137 | // size. If so, check to see whether we will end up actually reducing the | ||||||||
| 138 | // number of stores used. | ||||||||
| 139 | unsigned Bytes = unsigned(End-Start); | ||||||||
| 140 | unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; | ||||||||
| 141 | if (MaxIntSize == 0) | ||||||||
| 142 | MaxIntSize = 1; | ||||||||
| 143 | unsigned NumPointerStores = Bytes / MaxIntSize; | ||||||||
| 144 | |||||||||
| 145 | // Assume the remaining bytes if any are done a byte at a time. | ||||||||
| 146 | unsigned NumByteStores = Bytes % MaxIntSize; | ||||||||
| 147 | |||||||||
| 148 | // If we will reduce the # stores (according to this heuristic), do the | ||||||||
| 149 | // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 | ||||||||
| 150 | // etc. | ||||||||
| 151 | return TheStores.size() > NumPointerStores+NumByteStores; | ||||||||
| 152 | } | ||||||||
| 153 | |||||||||
| 154 | namespace { | ||||||||
| 155 | |||||||||
| 156 | class MemsetRanges { | ||||||||
| 157 | using range_iterator = SmallVectorImpl<MemsetRange>::iterator; | ||||||||
| 158 | |||||||||
| 159 | /// A sorted list of the memset ranges. | ||||||||
| 160 | SmallVector<MemsetRange, 8> Ranges; | ||||||||
| 161 | |||||||||
| 162 | const DataLayout &DL; | ||||||||
| 163 | |||||||||
| 164 | public: | ||||||||
| 165 | MemsetRanges(const DataLayout &DL) : DL(DL) {} | ||||||||
| 166 | |||||||||
| 167 | using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; | ||||||||
| 168 | |||||||||
| 169 | const_iterator begin() const { return Ranges.begin(); } | ||||||||
| 170 | const_iterator end() const { return Ranges.end(); } | ||||||||
| 171 | bool empty() const { return Ranges.empty(); } | ||||||||
| 172 | |||||||||
| 173 | void addInst(int64_t OffsetFromFirst, Instruction *Inst) { | ||||||||
| 174 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) | ||||||||
| 175 | addStore(OffsetFromFirst, SI); | ||||||||
| 176 | else | ||||||||
| 177 | addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); | ||||||||
| 178 | } | ||||||||
| 179 | |||||||||
| 180 | void addStore(int64_t OffsetFromFirst, StoreInst *SI) { | ||||||||
| 181 | TypeSize StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); | ||||||||
| 182 | assert(!StoreSize.isScalable() && "Can't track scalable-typed stores")((void)0); | ||||||||
| 183 | addRange(OffsetFromFirst, StoreSize.getFixedSize(), SI->getPointerOperand(), | ||||||||
| 184 | SI->getAlign().value(), SI); | ||||||||
| 185 | } | ||||||||
| 186 | |||||||||
| 187 | void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { | ||||||||
| 188 | int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); | ||||||||
| 189 | addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); | ||||||||
| 190 | } | ||||||||
| 191 | |||||||||
| 192 | void addRange(int64_t Start, int64_t Size, Value *Ptr, | ||||||||
| 193 | unsigned Alignment, Instruction *Inst); | ||||||||
| 194 | }; | ||||||||
| 195 | |||||||||
| 196 | } // end anonymous namespace | ||||||||
| 197 | |||||||||
| 198 | /// Add a new store to the MemsetRanges data structure. This adds a | ||||||||
| 199 | /// new range for the specified store at the specified offset, merging into | ||||||||
| 200 | /// existing ranges as appropriate. | ||||||||
| 201 | void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, | ||||||||
| 202 | unsigned Alignment, Instruction *Inst) { | ||||||||
| 203 | int64_t End = Start+Size; | ||||||||
| 204 | |||||||||
| 205 | range_iterator I = partition_point( | ||||||||
| 206 | Ranges, [=](const MemsetRange &O) { return O.End < Start; }); | ||||||||
| 207 | |||||||||
| 208 | // We now know that I == E, in which case we didn't find anything to merge | ||||||||
| 209 | // with, or that Start <= I->End. If End < I->Start or I == E, then we need | ||||||||
| 210 | // to insert a new range. Handle this now. | ||||||||
| 211 | if (I == Ranges.end() || End < I->Start) { | ||||||||
| 212 | MemsetRange &R = *Ranges.insert(I, MemsetRange()); | ||||||||
| 213 | R.Start = Start; | ||||||||
| 214 | R.End = End; | ||||||||
| 215 | R.StartPtr = Ptr; | ||||||||
| 216 | R.Alignment = Alignment; | ||||||||
| 217 | R.TheStores.push_back(Inst); | ||||||||
| 218 | return; | ||||||||
| 219 | } | ||||||||
| 220 | |||||||||
| 221 | // This store overlaps with I, add it. | ||||||||
| 222 | I->TheStores.push_back(Inst); | ||||||||
| 223 | |||||||||
| 224 | // At this point, we may have an interval that completely contains our store. | ||||||||
| 225 | // If so, just add it to the interval and return. | ||||||||
| 226 | if (I->Start <= Start && I->End >= End) | ||||||||
| 227 | return; | ||||||||
| 228 | |||||||||
| 229 | // Now we know that Start <= I->End and End >= I->Start so the range overlaps | ||||||||
| 230 | // but is not entirely contained within the range. | ||||||||
| 231 | |||||||||
| 232 | // See if the range extends the start of the range. In this case, it couldn't | ||||||||
| 233 | // possibly cause it to join the prior range, because otherwise we would have | ||||||||
| 234 | // stopped on *it*. | ||||||||
| 235 | if (Start < I->Start) { | ||||||||
| 236 | I->Start = Start; | ||||||||
| 237 | I->StartPtr = Ptr; | ||||||||
| 238 | I->Alignment = Alignment; | ||||||||
| 239 | } | ||||||||
| 240 | |||||||||
| 241 | // Now we know that Start <= I->End and Start >= I->Start (so the startpoint | ||||||||
| 242 | // is in or right at the end of I), and that End >= I->Start. Extend I out to | ||||||||
| 243 | // End. | ||||||||
| 244 | if (End > I->End) { | ||||||||
| 245 | I->End = End; | ||||||||
| 246 | range_iterator NextI = I; | ||||||||
| 247 | while (++NextI != Ranges.end() && End >= NextI->Start) { | ||||||||
| 248 | // Merge the range in. | ||||||||
| 249 | I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); | ||||||||
| 250 | if (NextI->End > I->End) | ||||||||
| 251 | I->End = NextI->End; | ||||||||
| 252 | Ranges.erase(NextI); | ||||||||
| 253 | NextI = I; | ||||||||
| 254 | } | ||||||||
| 255 | } | ||||||||
| 256 | } | ||||||||
| 257 | |||||||||
| 258 | //===----------------------------------------------------------------------===// | ||||||||
| 259 | // MemCpyOptLegacyPass Pass | ||||||||
| 260 | //===----------------------------------------------------------------------===// | ||||||||
| 261 | |||||||||
| 262 | namespace { | ||||||||
| 263 | |||||||||
| 264 | class MemCpyOptLegacyPass : public FunctionPass { | ||||||||
| 265 | MemCpyOptPass Impl; | ||||||||
| 266 | |||||||||
| 267 | public: | ||||||||
| 268 | static char ID; // Pass identification, replacement for typeid | ||||||||
| 269 | |||||||||
| 270 | MemCpyOptLegacyPass() : FunctionPass(ID) { | ||||||||
| 271 | initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||||||
| 272 | } | ||||||||
| 273 | |||||||||
| 274 | bool runOnFunction(Function &F) override; | ||||||||
| 275 | |||||||||
| 276 | private: | ||||||||
| 277 | // This transformation requires dominator postdominator info | ||||||||
| 278 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||||||
| 279 | AU.setPreservesCFG(); | ||||||||
| 280 | AU.addRequired<AssumptionCacheTracker>(); | ||||||||
| 281 | AU.addRequired<DominatorTreeWrapperPass>(); | ||||||||
| 282 | AU.addPreserved<DominatorTreeWrapperPass>(); | ||||||||
| 283 | AU.addPreserved<GlobalsAAWrapperPass>(); | ||||||||
| 284 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||||||
| 285 | if (!EnableMemorySSA) | ||||||||
| 286 | AU.addRequired<MemoryDependenceWrapperPass>(); | ||||||||
| 287 | AU.addPreserved<MemoryDependenceWrapperPass>(); | ||||||||
| 288 | AU.addRequired<AAResultsWrapperPass>(); | ||||||||
| 289 | AU.addPreserved<AAResultsWrapperPass>(); | ||||||||
| 290 | if (EnableMemorySSA) | ||||||||
| 291 | AU.addRequired<MemorySSAWrapperPass>(); | ||||||||
| 292 | AU.addPreserved<MemorySSAWrapperPass>(); | ||||||||
| 293 | } | ||||||||
| 294 | }; | ||||||||
| 295 | |||||||||
| 296 | } // end anonymous namespace | ||||||||
| 297 | |||||||||
| 298 | char MemCpyOptLegacyPass::ID = 0; | ||||||||
| 299 | |||||||||
| 300 | /// The public interface to this file... | ||||||||
| 301 | FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } | ||||||||
| 302 | |||||||||
| 303 | INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry &Registry) { | ||||||||
| 304 | false, false)static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry &Registry) { | ||||||||
| 305 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | ||||||||
| 306 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||||||
| 307 | INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry); | ||||||||
| 308 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||||||
| 309 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||||||
| 310 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); | ||||||||
| 311 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||||||
| 312 | INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt" , &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemCpyOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag ; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag , initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry)); } | ||||||||
| 313 | false, false)PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt" , &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemCpyOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag ; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag , initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry)); } | ||||||||
| 314 | |||||||||
| 315 | // Check that V is either not accessible by the caller, or unwinding cannot | ||||||||
| 316 | // occur between Start and End. | ||||||||
| 317 | static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start, | ||||||||
| 318 | Instruction *End) { | ||||||||
| 319 | assert(Start->getParent() == End->getParent() && "Must be in same block")((void)0); | ||||||||
| 320 | if (!Start->getFunction()->doesNotThrow() && | ||||||||
| 321 | !isa<AllocaInst>(getUnderlyingObject(V))) { | ||||||||
| 322 | for (const Instruction &I : | ||||||||
| 323 | make_range(Start->getIterator(), End->getIterator())) { | ||||||||
| 324 | if (I.mayThrow()) | ||||||||
| 325 | return true; | ||||||||
| 326 | } | ||||||||
| 327 | } | ||||||||
| 328 | return false; | ||||||||
| 329 | } | ||||||||
| 330 | |||||||||
| 331 | void MemCpyOptPass::eraseInstruction(Instruction *I) { | ||||||||
| 332 | if (MSSAU) | ||||||||
| 333 | MSSAU->removeMemoryAccess(I); | ||||||||
| 334 | if (MD) | ||||||||
| 335 | MD->removeInstruction(I); | ||||||||
| 336 | I->eraseFromParent(); | ||||||||
| 337 | } | ||||||||
| 338 | |||||||||
| 339 | // Check for mod or ref of Loc between Start and End, excluding both boundaries. | ||||||||
| 340 | // Start and End must be in the same block | ||||||||
| 341 | static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc, | ||||||||
| 342 | const MemoryUseOrDef *Start, | ||||||||
| 343 | const MemoryUseOrDef *End) { | ||||||||
| 344 | assert(Start->getBlock() == End->getBlock() && "Only local supported")((void)0); | ||||||||
| 345 | for (const MemoryAccess &MA : | ||||||||
| 346 | make_range(++Start->getIterator(), End->getIterator())) { | ||||||||
| 347 | if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(), | ||||||||
| 348 | Loc))) | ||||||||
| 349 | return true; | ||||||||
| 350 | } | ||||||||
| 351 | return false; | ||||||||
| 352 | } | ||||||||
| 353 | |||||||||
| 354 | // Check for mod of Loc between Start and End, excluding both boundaries. | ||||||||
| 355 | // Start and End can be in different blocks. | ||||||||
| 356 | static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc, | ||||||||
| 357 | const MemoryUseOrDef *Start, | ||||||||
| 358 | const MemoryUseOrDef *End) { | ||||||||
| 359 | // TODO: Only walk until we hit Start. | ||||||||
| 360 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||||
| 361 | End->getDefiningAccess(), Loc); | ||||||||
| 362 | return !MSSA->dominates(Clobber, Start); | ||||||||
| 363 | } | ||||||||
| 364 | |||||||||
| 365 | /// When scanning forward over instructions, we look for some other patterns to | ||||||||
| 366 | /// fold away. In particular, this looks for stores to neighboring locations of | ||||||||
| 367 | /// memory. If it sees enough consecutive ones, it attempts to merge them | ||||||||
| 368 | /// together into a memcpy/memset. | ||||||||
| 369 | Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, | ||||||||
| 370 | Value *StartPtr, | ||||||||
| 371 | Value *ByteVal) { | ||||||||
| 372 | const DataLayout &DL = StartInst->getModule()->getDataLayout(); | ||||||||
| 373 | |||||||||
| 374 | // We can't track scalable types | ||||||||
| 375 | if (StoreInst *SI = dyn_cast<StoreInst>(StartInst)) | ||||||||
| 376 | if (DL.getTypeStoreSize(SI->getOperand(0)->getType()).isScalable()) | ||||||||
| 377 | return nullptr; | ||||||||
| 378 | |||||||||
| 379 | // Okay, so we now have a single store that can be splatable. Scan to find | ||||||||
| 380 | // all subsequent stores of the same value to offset from the same pointer. | ||||||||
| 381 | // Join these together into ranges, so we can decide whether contiguous blocks | ||||||||
| 382 | // are stored. | ||||||||
| 383 | MemsetRanges Ranges(DL); | ||||||||
| 384 | |||||||||
| 385 | BasicBlock::iterator BI(StartInst); | ||||||||
| 386 | |||||||||
| 387 | // Keeps track of the last memory use or def before the insertion point for | ||||||||
| 388 | // the new memset. The new MemoryDef for the inserted memsets will be inserted | ||||||||
| 389 | // after MemInsertPoint. It points to either LastMemDef or to the last user | ||||||||
| 390 | // before the insertion point of the memset, if there are any such users. | ||||||||
| 391 | MemoryUseOrDef *MemInsertPoint = nullptr; | ||||||||
| 392 | // Keeps track of the last MemoryDef between StartInst and the insertion point | ||||||||
| 393 | // for the new memset. This will become the defining access of the inserted | ||||||||
| 394 | // memsets. | ||||||||
| 395 | MemoryDef *LastMemDef = nullptr; | ||||||||
| 396 | for (++BI; !BI->isTerminator(); ++BI) { | ||||||||
| 397 | if (MSSAU) { | ||||||||
| 398 | auto *CurrentAcc = cast_or_null<MemoryUseOrDef>( | ||||||||
| 399 | MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); | ||||||||
| 400 | if (CurrentAcc) { | ||||||||
| 401 | MemInsertPoint = CurrentAcc; | ||||||||
| 402 | if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc)) | ||||||||
| 403 | LastMemDef = CurrentDef; | ||||||||
| 404 | } | ||||||||
| 405 | } | ||||||||
| 406 | |||||||||
| 407 | // Calls that only access inaccessible memory do not block merging | ||||||||
| 408 | // accessible stores. | ||||||||
| 409 | if (auto *CB = dyn_cast<CallBase>(BI)) { | ||||||||
| 410 | if (CB->onlyAccessesInaccessibleMemory()) | ||||||||
| 411 | continue; | ||||||||
| 412 | } | ||||||||
| 413 | |||||||||
| 414 | if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { | ||||||||
| 415 | // If the instruction is readnone, ignore it, otherwise bail out. We | ||||||||
| 416 | // don't even allow readonly here because we don't want something like: | ||||||||
| 417 | // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). | ||||||||
| 418 | if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) | ||||||||
| 419 | break; | ||||||||
| 420 | continue; | ||||||||
| 421 | } | ||||||||
| 422 | |||||||||
| 423 | if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { | ||||||||
| 424 | // If this is a store, see if we can merge it in. | ||||||||
| 425 | if (!NextStore->isSimple()) break; | ||||||||
| 426 | |||||||||
| 427 | Value *StoredVal = NextStore->getValueOperand(); | ||||||||
| 428 | |||||||||
| 429 | // Don't convert stores of non-integral pointer types to memsets (which | ||||||||
| 430 | // stores integers). | ||||||||
| 431 | if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) | ||||||||
| 432 | break; | ||||||||
| 433 | |||||||||
| 434 | // We can't track ranges involving scalable types. | ||||||||
| 435 | if (DL.getTypeStoreSize(StoredVal->getType()).isScalable()) | ||||||||
| 436 | break; | ||||||||
| 437 | |||||||||
| 438 | // Check to see if this stored value is of the same byte-splattable value. | ||||||||
| 439 | Value *StoredByte = isBytewiseValue(StoredVal, DL); | ||||||||
| 440 | if (isa<UndefValue>(ByteVal) && StoredByte) | ||||||||
| 441 | ByteVal = StoredByte; | ||||||||
| 442 | if (ByteVal != StoredByte) | ||||||||
| 443 | break; | ||||||||
| 444 | |||||||||
| 445 | // Check to see if this store is to a constant offset from the start ptr. | ||||||||
| 446 | Optional<int64_t> Offset = | ||||||||
| 447 | isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); | ||||||||
| 448 | if (!Offset) | ||||||||
| 449 | break; | ||||||||
| 450 | |||||||||
| 451 | Ranges.addStore(*Offset, NextStore); | ||||||||
| 452 | } else { | ||||||||
| 453 | MemSetInst *MSI = cast<MemSetInst>(BI); | ||||||||
| 454 | |||||||||
| 455 | if (MSI->isVolatile() || ByteVal != MSI->getValue() || | ||||||||
| 456 | !isa<ConstantInt>(MSI->getLength())) | ||||||||
| 457 | break; | ||||||||
| 458 | |||||||||
| 459 | // Check to see if this store is to a constant offset from the start ptr. | ||||||||
| 460 | Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); | ||||||||
| 461 | if (!Offset) | ||||||||
| 462 | break; | ||||||||
| 463 | |||||||||
| 464 | Ranges.addMemSet(*Offset, MSI); | ||||||||
| 465 | } | ||||||||
| 466 | } | ||||||||
| 467 | |||||||||
| 468 | // If we have no ranges, then we just had a single store with nothing that | ||||||||
| 469 | // could be merged in. This is a very common case of course. | ||||||||
| 470 | if (Ranges.empty()) | ||||||||
| 471 | return nullptr; | ||||||||
| 472 | |||||||||
| 473 | // If we had at least one store that could be merged in, add the starting | ||||||||
| 474 | // store as well. We try to avoid this unless there is at least something | ||||||||
| 475 | // interesting as a small compile-time optimization. | ||||||||
| 476 | Ranges.addInst(0, StartInst); | ||||||||
| 477 | |||||||||
| 478 | // If we create any memsets, we put it right before the first instruction that | ||||||||
| 479 | // isn't part of the memset block. This ensure that the memset is dominated | ||||||||
| 480 | // by any addressing instruction needed by the start of the block. | ||||||||
| 481 | IRBuilder<> Builder(&*BI); | ||||||||
| 482 | |||||||||
| 483 | // Now that we have full information about ranges, loop over the ranges and | ||||||||
| 484 | // emit memset's for anything big enough to be worthwhile. | ||||||||
| 485 | Instruction *AMemSet = nullptr; | ||||||||
| 486 | for (const MemsetRange &Range : Ranges) { | ||||||||
| 487 | if (Range.TheStores.size() == 1) continue; | ||||||||
| 488 | |||||||||
| 489 | // If it is profitable to lower this range to memset, do so now. | ||||||||
| 490 | if (!Range.isProfitableToUseMemset(DL)) | ||||||||
| 491 | continue; | ||||||||
| 492 | |||||||||
| 493 | // Otherwise, we do want to transform this! Create a new memset. | ||||||||
| 494 | // Get the starting pointer of the block. | ||||||||
| 495 | StartPtr = Range.StartPtr; | ||||||||
| 496 | |||||||||
| 497 | AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, | ||||||||
| 498 | MaybeAlign(Range.Alignment)); | ||||||||
| 499 | LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SIdo { } while (false) | ||||||||
| 500 | : Range.TheStores) dbgs()do { } while (false) | ||||||||
| 501 | << *SI << '\n';do { } while (false) | ||||||||
| 502 | dbgs() << "With: " << *AMemSet << '\n')do { } while (false); | ||||||||
| 503 | if (!Range.TheStores.empty()) | ||||||||
| 504 | AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); | ||||||||
| 505 | |||||||||
| 506 | if (MSSAU) { | ||||||||
| 507 | assert(LastMemDef && MemInsertPoint &&((void)0) | ||||||||
| 508 | "Both LastMemDef and MemInsertPoint need to be set")((void)0); | ||||||||
| 509 | auto *NewDef = | ||||||||
| 510 | cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI | ||||||||
| 511 | ? MSSAU->createMemoryAccessBefore( | ||||||||
| 512 | AMemSet, LastMemDef, MemInsertPoint) | ||||||||
| 513 | : MSSAU->createMemoryAccessAfter( | ||||||||
| 514 | AMemSet, LastMemDef, MemInsertPoint)); | ||||||||
| 515 | MSSAU->insertDef(NewDef, /*RenameUses=*/true); | ||||||||
| 516 | LastMemDef = NewDef; | ||||||||
| 517 | MemInsertPoint = NewDef; | ||||||||
| 518 | } | ||||||||
| 519 | |||||||||
| 520 | // Zap all the stores. | ||||||||
| 521 | for (Instruction *SI : Range.TheStores) | ||||||||
| 522 | eraseInstruction(SI); | ||||||||
| 523 | |||||||||
| 524 | ++NumMemSetInfer; | ||||||||
| 525 | } | ||||||||
| 526 | |||||||||
| 527 | return AMemSet; | ||||||||
| 528 | } | ||||||||
| 529 | |||||||||
| 530 | // This method try to lift a store instruction before position P. | ||||||||
| 531 | // It will lift the store and its argument + that anything that | ||||||||
| 532 | // may alias with these. | ||||||||
| 533 | // The method returns true if it was successful. | ||||||||
| 534 | bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) { | ||||||||
| 535 | // If the store alias this position, early bail out. | ||||||||
| 536 | MemoryLocation StoreLoc = MemoryLocation::get(SI); | ||||||||
| 537 | if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc))) | ||||||||
| 538 | return false; | ||||||||
| 539 | |||||||||
| 540 | // Keep track of the arguments of all instruction we plan to lift | ||||||||
| 541 | // so we can make sure to lift them as well if appropriate. | ||||||||
| 542 | DenseSet<Instruction*> Args; | ||||||||
| 543 | if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) | ||||||||
| 544 | if (Ptr->getParent() == SI->getParent()) | ||||||||
| 545 | Args.insert(Ptr); | ||||||||
| 546 | |||||||||
| 547 | // Instruction to lift before P. | ||||||||
| 548 | SmallVector<Instruction *, 8> ToLift{SI}; | ||||||||
| 549 | |||||||||
| 550 | // Memory locations of lifted instructions. | ||||||||
| 551 | SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; | ||||||||
| 552 | |||||||||
| 553 | // Lifted calls. | ||||||||
| 554 | SmallVector<const CallBase *, 8> Calls; | ||||||||
| 555 | |||||||||
| 556 | const MemoryLocation LoadLoc = MemoryLocation::get(LI); | ||||||||
| 557 | |||||||||
| 558 | for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { | ||||||||
| 559 | auto *C = &*I; | ||||||||
| 560 | |||||||||
| 561 | // Make sure hoisting does not perform a store that was not guaranteed to | ||||||||
| 562 | // happen. | ||||||||
| 563 | if (!isGuaranteedToTransferExecutionToSuccessor(C)) | ||||||||
| 564 | return false; | ||||||||
| 565 | |||||||||
| 566 | bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None)); | ||||||||
| 567 | |||||||||
| 568 | bool NeedLift = false; | ||||||||
| 569 | if (Args.erase(C)) | ||||||||
| 570 | NeedLift = true; | ||||||||
| 571 | else if (MayAlias) { | ||||||||
| 572 | NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) { | ||||||||
| 573 | return isModOrRefSet(AA->getModRefInfo(C, ML)); | ||||||||
| 574 | }); | ||||||||
| 575 | |||||||||
| 576 | if (!NeedLift) | ||||||||
| 577 | NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) { | ||||||||
| 578 | return isModOrRefSet(AA->getModRefInfo(C, Call)); | ||||||||
| 579 | }); | ||||||||
| 580 | } | ||||||||
| 581 | |||||||||
| 582 | if (!NeedLift) | ||||||||
| 583 | continue; | ||||||||
| 584 | |||||||||
| 585 | if (MayAlias) { | ||||||||
| 586 | // Since LI is implicitly moved downwards past the lifted instructions, | ||||||||
| 587 | // none of them may modify its source. | ||||||||
| 588 | if (isModSet(AA->getModRefInfo(C, LoadLoc))) | ||||||||
| 589 | return false; | ||||||||
| 590 | else if (const auto *Call = dyn_cast<CallBase>(C)) { | ||||||||
| 591 | // If we can't lift this before P, it's game over. | ||||||||
| 592 | if (isModOrRefSet(AA->getModRefInfo(P, Call))) | ||||||||
| 593 | return false; | ||||||||
| 594 | |||||||||
| 595 | Calls.push_back(Call); | ||||||||
| 596 | } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { | ||||||||
| 597 | // If we can't lift this before P, it's game over. | ||||||||
| 598 | auto ML = MemoryLocation::get(C); | ||||||||
| 599 | if (isModOrRefSet(AA->getModRefInfo(P, ML))) | ||||||||
| 600 | return false; | ||||||||
| 601 | |||||||||
| 602 | MemLocs.push_back(ML); | ||||||||
| 603 | } else | ||||||||
| 604 | // We don't know how to lift this instruction. | ||||||||
| 605 | return false; | ||||||||
| 606 | } | ||||||||
| 607 | |||||||||
| 608 | ToLift.push_back(C); | ||||||||
| 609 | for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) | ||||||||
| 610 | if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) { | ||||||||
| 611 | if (A->getParent() == SI->getParent()) { | ||||||||
| 612 | // Cannot hoist user of P above P | ||||||||
| 613 | if(A == P) return false; | ||||||||
| 614 | Args.insert(A); | ||||||||
| 615 | } | ||||||||
| 616 | } | ||||||||
| 617 | } | ||||||||
| 618 | |||||||||
| 619 | // Find MSSA insertion point. Normally P will always have a corresponding | ||||||||
| 620 | // memory access before which we can insert. However, with non-standard AA | ||||||||
| 621 | // pipelines, there may be a mismatch between AA and MSSA, in which case we | ||||||||
| 622 | // will scan for a memory access before P. In either case, we know for sure | ||||||||
| 623 | // that at least the load will have a memory access. | ||||||||
| 624 | // TODO: Simplify this once P will be determined by MSSA, in which case the | ||||||||
| 625 | // discrepancy can no longer occur. | ||||||||
| 626 | MemoryUseOrDef *MemInsertPoint = nullptr; | ||||||||
| 627 | if (MSSAU) { | ||||||||
| 628 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) { | ||||||||
| 629 | MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator()); | ||||||||
| 630 | } else { | ||||||||
| 631 | const Instruction *ConstP = P; | ||||||||
| 632 | for (const Instruction &I : make_range(++ConstP->getReverseIterator(), | ||||||||
| 633 | ++LI->getReverseIterator())) { | ||||||||
| 634 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) { | ||||||||
| 635 | MemInsertPoint = MA; | ||||||||
| 636 | break; | ||||||||
| 637 | } | ||||||||
| 638 | } | ||||||||
| 639 | } | ||||||||
| 640 | } | ||||||||
| 641 | |||||||||
| 642 | // We made it, we need to lift. | ||||||||
| 643 | for (auto *I : llvm::reverse(ToLift)) { | ||||||||
| 644 | LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n")do { } while (false); | ||||||||
| 645 | I->moveBefore(P); | ||||||||
| 646 | if (MSSAU) { | ||||||||
| 647 | assert(MemInsertPoint && "Must have found insert point")((void)0); | ||||||||
| 648 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) { | ||||||||
| 649 | MSSAU->moveAfter(MA, MemInsertPoint); | ||||||||
| 650 | MemInsertPoint = MA; | ||||||||
| 651 | } | ||||||||
| 652 | } | ||||||||
| 653 | } | ||||||||
| 654 | |||||||||
| 655 | return true; | ||||||||
| 656 | } | ||||||||
| 657 | |||||||||
| 658 | bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { | ||||||||
| 659 | if (!SI->isSimple()) return false; | ||||||||
| 660 | |||||||||
| 661 | // Avoid merging nontemporal stores since the resulting | ||||||||
| 662 | // memcpy/memset would not be able to preserve the nontemporal hint. | ||||||||
| 663 | // In theory we could teach how to propagate the !nontemporal metadata to | ||||||||
| 664 | // memset calls. However, that change would force the backend to | ||||||||
| 665 | // conservatively expand !nontemporal memset calls back to sequences of | ||||||||
| 666 | // store instructions (effectively undoing the merging). | ||||||||
| 667 | if (SI->getMetadata(LLVMContext::MD_nontemporal)) | ||||||||
| 668 | return false; | ||||||||
| 669 | |||||||||
| 670 | const DataLayout &DL = SI->getModule()->getDataLayout(); | ||||||||
| 671 | |||||||||
| 672 | Value *StoredVal = SI->getValueOperand(); | ||||||||
| 673 | |||||||||
| 674 | // Not all the transforms below are correct for non-integral pointers, bail | ||||||||
| 675 | // until we've audited the individual pieces. | ||||||||
| 676 | if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) | ||||||||
| 677 | return false; | ||||||||
| 678 | |||||||||
| 679 | // Load to store forwarding can be interpreted as memcpy. | ||||||||
| 680 | if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { | ||||||||
| 681 | if (LI->isSimple() && LI->hasOneUse() && | ||||||||
| 682 | LI->getParent() == SI->getParent()) { | ||||||||
| 683 | |||||||||
| 684 | auto *T = LI->getType(); | ||||||||
| 685 | if (T->isAggregateType()) { | ||||||||
| 686 | MemoryLocation LoadLoc = MemoryLocation::get(LI); | ||||||||
| 687 | |||||||||
| 688 | // We use alias analysis to check if an instruction may store to | ||||||||
| 689 | // the memory we load from in between the load and the store. If | ||||||||
| 690 | // such an instruction is found, we try to promote there instead | ||||||||
| 691 | // of at the store position. | ||||||||
| 692 | // TODO: Can use MSSA for this. | ||||||||
| 693 | Instruction *P = SI; | ||||||||
| 694 | for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { | ||||||||
| 695 | if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { | ||||||||
| 696 | P = &I; | ||||||||
| 697 | break; | ||||||||
| 698 | } | ||||||||
| 699 | } | ||||||||
| 700 | |||||||||
| 701 | // We found an instruction that may write to the loaded memory. | ||||||||
| 702 | // We can try to promote at this position instead of the store | ||||||||
| 703 | // position if nothing aliases the store memory after this and the store | ||||||||
| 704 | // destination is not in the range. | ||||||||
| 705 | if (P && P != SI) { | ||||||||
| 706 | if (!moveUp(SI, P, LI)) | ||||||||
| 707 | P = nullptr; | ||||||||
| 708 | } | ||||||||
| 709 | |||||||||
| 710 | // If a valid insertion position is found, then we can promote | ||||||||
| 711 | // the load/store pair to a memcpy. | ||||||||
| 712 | if (P) { | ||||||||
| 713 | // If we load from memory that may alias the memory we store to, | ||||||||
| 714 | // memmove must be used to preserve semantic. If not, memcpy can | ||||||||
| 715 | // be used. | ||||||||
| 716 | bool UseMemMove = false; | ||||||||
| 717 | if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc)) | ||||||||
| 718 | UseMemMove = true; | ||||||||
| 719 | |||||||||
| 720 | uint64_t Size = DL.getTypeStoreSize(T); | ||||||||
| 721 | |||||||||
| 722 | IRBuilder<> Builder(P); | ||||||||
| 723 | Instruction *M; | ||||||||
| 724 | if (UseMemMove) | ||||||||
| 725 | M = Builder.CreateMemMove( | ||||||||
| 726 | SI->getPointerOperand(), SI->getAlign(), | ||||||||
| 727 | LI->getPointerOperand(), LI->getAlign(), Size); | ||||||||
| 728 | else | ||||||||
| 729 | M = Builder.CreateMemCpy( | ||||||||
| 730 | SI->getPointerOperand(), SI->getAlign(), | ||||||||
| 731 | LI->getPointerOperand(), LI->getAlign(), Size); | ||||||||
| 732 | |||||||||
| 733 | LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "do { } while (false) | ||||||||
| 734 | << *M << "\n")do { } while (false); | ||||||||
| 735 | |||||||||
| 736 | if (MSSAU) { | ||||||||
| 737 | auto *LastDef = | ||||||||
| 738 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); | ||||||||
| 739 | auto *NewAccess = | ||||||||
| 740 | MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); | ||||||||
| 741 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
| 742 | } | ||||||||
| 743 | |||||||||
| 744 | eraseInstruction(SI); | ||||||||
| 745 | eraseInstruction(LI); | ||||||||
| 746 | ++NumMemCpyInstr; | ||||||||
| 747 | |||||||||
| 748 | // Make sure we do not invalidate the iterator. | ||||||||
| 749 | BBI = M->getIterator(); | ||||||||
| 750 | return true; | ||||||||
| 751 | } | ||||||||
| 752 | } | ||||||||
| 753 | |||||||||
| 754 | // Detect cases where we're performing call slot forwarding, but | ||||||||
| 755 | // happen to be using a load-store pair to implement it, rather than | ||||||||
| 756 | // a memcpy. | ||||||||
| 757 | CallInst *C = nullptr; | ||||||||
| 758 | if (EnableMemorySSA) { | ||||||||
| 759 | if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>( | ||||||||
| 760 | MSSA->getWalker()->getClobberingMemoryAccess(LI))) { | ||||||||
| 761 | // The load most post-dom the call. Limit to the same block for now. | ||||||||
| 762 | // TODO: Support non-local call-slot optimization? | ||||||||
| 763 | if (LoadClobber->getBlock() == SI->getParent()) | ||||||||
| 764 | C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst()); | ||||||||
| 765 | } | ||||||||
| 766 | } else { | ||||||||
| 767 | MemDepResult ldep = MD->getDependency(LI); | ||||||||
| 768 | if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) | ||||||||
| 769 | C = dyn_cast<CallInst>(ldep.getInst()); | ||||||||
| 770 | } | ||||||||
| 771 | |||||||||
| 772 | if (C) { | ||||||||
| 773 | // Check that nothing touches the dest of the "copy" between | ||||||||
| 774 | // the call and the store. | ||||||||
| 775 | MemoryLocation StoreLoc = MemoryLocation::get(SI); | ||||||||
| 776 | if (EnableMemorySSA) { | ||||||||
| 777 | if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C), | ||||||||
| 778 | MSSA->getMemoryAccess(SI))) | ||||||||
| 779 | C = nullptr; | ||||||||
| 780 | } else { | ||||||||
| 781 | for (BasicBlock::iterator I = --SI->getIterator(), | ||||||||
| 782 | E = C->getIterator(); | ||||||||
| 783 | I != E; --I) { | ||||||||
| 784 | if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) { | ||||||||
| 785 | C = nullptr; | ||||||||
| 786 | break; | ||||||||
| 787 | } | ||||||||
| 788 | } | ||||||||
| 789 | } | ||||||||
| 790 | } | ||||||||
| 791 | |||||||||
| 792 | if (C) { | ||||||||
| 793 | bool changed = performCallSlotOptzn( | ||||||||
| 794 | LI, SI, SI->getPointerOperand()->stripPointerCasts(), | ||||||||
| 795 | LI->getPointerOperand()->stripPointerCasts(), | ||||||||
| 796 | DL.getTypeStoreSize(SI->getOperand(0)->getType()), | ||||||||
| 797 | commonAlignment(SI->getAlign(), LI->getAlign()), C); | ||||||||
| 798 | if (changed) { | ||||||||
| 799 | eraseInstruction(SI); | ||||||||
| 800 | eraseInstruction(LI); | ||||||||
| 801 | ++NumMemCpyInstr; | ||||||||
| 802 | return true; | ||||||||
| 803 | } | ||||||||
| 804 | } | ||||||||
| 805 | } | ||||||||
| 806 | } | ||||||||
| 807 | |||||||||
| 808 | // There are two cases that are interesting for this code to handle: memcpy | ||||||||
| 809 | // and memset. Right now we only handle memset. | ||||||||
| 810 | |||||||||
| 811 | // Ensure that the value being stored is something that can be memset'able a | ||||||||
| 812 | // byte at a time like "0" or "-1" or any width, as well as things like | ||||||||
| 813 | // 0xA0A0A0A0 and 0.0. | ||||||||
| 814 | auto *V = SI->getOperand(0); | ||||||||
| 815 | if (Value *ByteVal = isBytewiseValue(V, DL)) { | ||||||||
| 816 | if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), | ||||||||
| 817 | ByteVal)) { | ||||||||
| 818 | BBI = I->getIterator(); // Don't invalidate iterator. | ||||||||
| 819 | return true; | ||||||||
| 820 | } | ||||||||
| 821 | |||||||||
| 822 | // If we have an aggregate, we try to promote it to memset regardless | ||||||||
| 823 | // of opportunity for merging as it can expose optimization opportunities | ||||||||
| 824 | // in subsequent passes. | ||||||||
| 825 | auto *T = V->getType(); | ||||||||
| 826 | if (T->isAggregateType()) { | ||||||||
| 827 | uint64_t Size = DL.getTypeStoreSize(T); | ||||||||
| 828 | IRBuilder<> Builder(SI); | ||||||||
| 829 | auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, | ||||||||
| 830 | SI->getAlign()); | ||||||||
| 831 | |||||||||
| 832 | LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n")do { } while (false); | ||||||||
| 833 | |||||||||
| 834 | if (MSSAU) { | ||||||||
| 835 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)))((void)0); | ||||||||
| 836 | auto *LastDef = | ||||||||
| 837 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); | ||||||||
| 838 | auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); | ||||||||
| 839 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
| 840 | } | ||||||||
| 841 | |||||||||
| 842 | eraseInstruction(SI); | ||||||||
| 843 | NumMemSetInfer++; | ||||||||
| 844 | |||||||||
| 845 | // Make sure we do not invalidate the iterator. | ||||||||
| 846 | BBI = M->getIterator(); | ||||||||
| 847 | return true; | ||||||||
| 848 | } | ||||||||
| 849 | } | ||||||||
| 850 | |||||||||
| 851 | return false; | ||||||||
| 852 | } | ||||||||
| 853 | |||||||||
| 854 | bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { | ||||||||
| 855 | // See if there is another memset or store neighboring this memset which | ||||||||
| 856 | // allows us to widen out the memset to do a single larger store. | ||||||||
| 857 | if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) | ||||||||
| 858 | if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), | ||||||||
| 859 | MSI->getValue())) { | ||||||||
| 860 | BBI = I->getIterator(); // Don't invalidate iterator. | ||||||||
| 861 | return true; | ||||||||
| 862 | } | ||||||||
| 863 | return false; | ||||||||
| 864 | } | ||||||||
| 865 | |||||||||
| 866 | /// Takes a memcpy and a call that it depends on, | ||||||||
| 867 | /// and checks for the possibility of a call slot optimization by having | ||||||||
| 868 | /// the call write its result directly into the destination of the memcpy. | ||||||||
| 869 | bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad, | ||||||||
| 870 | Instruction *cpyStore, Value *cpyDest, | ||||||||
| 871 | Value *cpySrc, TypeSize cpySize, | ||||||||
| 872 | Align cpyAlign, CallInst *C) { | ||||||||
| 873 | // The general transformation to keep in mind is | ||||||||
| 874 | // | ||||||||
| 875 | // call @func(..., src, ...) | ||||||||
| 876 | // memcpy(dest, src, ...) | ||||||||
| 877 | // | ||||||||
| 878 | // -> | ||||||||
| 879 | // | ||||||||
| 880 | // memcpy(dest, src, ...) | ||||||||
| 881 | // call @func(..., dest, ...) | ||||||||
| 882 | // | ||||||||
| 883 | // Since moving the memcpy is technically awkward, we additionally check that | ||||||||
| 884 | // src only holds uninitialized values at the moment of the call, meaning that | ||||||||
| 885 | // the memcpy can be discarded rather than moved. | ||||||||
| 886 | |||||||||
| 887 | // We can't optimize scalable types. | ||||||||
| 888 | if (cpySize.isScalable()) | ||||||||
| 889 | return false; | ||||||||
| 890 | |||||||||
| 891 | // Lifetime marks shouldn't be operated on. | ||||||||
| 892 | if (Function *F = C->getCalledFunction()) | ||||||||
| 893 | if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) | ||||||||
| 894 | return false; | ||||||||
| 895 | |||||||||
| 896 | // Require that src be an alloca. This simplifies the reasoning considerably. | ||||||||
| 897 | AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); | ||||||||
| 898 | if (!srcAlloca) | ||||||||
| 899 | return false; | ||||||||
| 900 | |||||||||
| 901 | ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); | ||||||||
| 902 | if (!srcArraySize) | ||||||||
| 903 | return false; | ||||||||
| 904 | |||||||||
| 905 | const DataLayout &DL = cpyLoad->getModule()->getDataLayout(); | ||||||||
| 906 | uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * | ||||||||
| 907 | srcArraySize->getZExtValue(); | ||||||||
| 908 | |||||||||
| 909 | if (cpySize < srcSize) | ||||||||
| 910 | return false; | ||||||||
| 911 | |||||||||
| 912 | // Check that accessing the first srcSize bytes of dest will not cause a | ||||||||
| 913 | // trap. Otherwise the transform is invalid since it might cause a trap | ||||||||
| 914 | // to occur earlier than it otherwise would. | ||||||||
| 915 | if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpySize), | ||||||||
| 916 | DL, C, DT)) | ||||||||
| 917 | return false; | ||||||||
| 918 | |||||||||
| 919 | // Make sure that nothing can observe cpyDest being written early. There are | ||||||||
| 920 | // a number of cases to consider: | ||||||||
| 921 | // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of | ||||||||
| 922 | // the transform. | ||||||||
| 923 | // 2. C itself may not access cpyDest (prior to the transform). This is | ||||||||
| 924 | // checked further below. | ||||||||
| 925 | // 3. If cpyDest is accessible to the caller of this function (potentially | ||||||||
| 926 | // captured and not based on an alloca), we need to ensure that we cannot | ||||||||
| 927 | // unwind between C and cpyStore. This is checked here. | ||||||||
| 928 | // 4. If cpyDest is potentially captured, there may be accesses to it from | ||||||||
| 929 | // another thread. In this case, we need to check that cpyStore is | ||||||||
| 930 | // guaranteed to be executed if C is. As it is a non-atomic access, it | ||||||||
| 931 | // renders accesses from other threads undefined. | ||||||||
| 932 | // TODO: This is currently not checked. | ||||||||
| 933 | if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) | ||||||||
| 934 | return false; | ||||||||
| 935 | |||||||||
| 936 | // Check that dest points to memory that is at least as aligned as src. | ||||||||
| 937 | Align srcAlign = srcAlloca->getAlign(); | ||||||||
| 938 | bool isDestSufficientlyAligned = srcAlign <= cpyAlign; | ||||||||
| 939 | // If dest is not aligned enough and we can't increase its alignment then | ||||||||
| 940 | // bail out. | ||||||||
| 941 | if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) | ||||||||
| 942 | return false; | ||||||||
| 943 | |||||||||
| 944 | // Check that src is not accessed except via the call and the memcpy. This | ||||||||
| 945 | // guarantees that it holds only undefined values when passed in (so the final | ||||||||
| 946 | // memcpy can be dropped), that it is not read or written between the call and | ||||||||
| 947 | // the memcpy, and that writing beyond the end of it is undefined. | ||||||||
| 948 | SmallVector<User *, 8> srcUseList(srcAlloca->users()); | ||||||||
| 949 | while (!srcUseList.empty()) { | ||||||||
| 950 | User *U = srcUseList.pop_back_val(); | ||||||||
| 951 | |||||||||
| 952 | if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { | ||||||||
| 953 | append_range(srcUseList, U->users()); | ||||||||
| 954 | continue; | ||||||||
| 955 | } | ||||||||
| 956 | if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { | ||||||||
| 957 | if (!G->hasAllZeroIndices()) | ||||||||
| 958 | return false; | ||||||||
| 959 | |||||||||
| 960 | append_range(srcUseList, U->users()); | ||||||||
| 961 | continue; | ||||||||
| 962 | } | ||||||||
| 963 | if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) | ||||||||
| 964 | if (IT->isLifetimeStartOrEnd()) | ||||||||
| 965 | continue; | ||||||||
| 966 | |||||||||
| 967 | if (U != C && U != cpyLoad) | ||||||||
| 968 | return false; | ||||||||
| 969 | } | ||||||||
| 970 | |||||||||
| 971 | // Check that src isn't captured by the called function since the | ||||||||
| 972 | // transformation can cause aliasing issues in that case. | ||||||||
| 973 | for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI) | ||||||||
| 974 | if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI)) | ||||||||
| 975 | return false; | ||||||||
| 976 | |||||||||
| 977 | // Since we're changing the parameter to the callsite, we need to make sure | ||||||||
| 978 | // that what would be the new parameter dominates the callsite. | ||||||||
| 979 | if (!DT->dominates(cpyDest, C)) { | ||||||||
| 980 | // Support moving a constant index GEP before the call. | ||||||||
| 981 | auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest); | ||||||||
| 982 | if (GEP && GEP->hasAllConstantIndices() && | ||||||||
| 983 | DT->dominates(GEP->getPointerOperand(), C)) | ||||||||
| 984 | GEP->moveBefore(C); | ||||||||
| 985 | else | ||||||||
| 986 | return false; | ||||||||
| 987 | } | ||||||||
| 988 | |||||||||
| 989 | // In addition to knowing that the call does not access src in some | ||||||||
| 990 | // unexpected manner, for example via a global, which we deduce from | ||||||||
| 991 | // the use analysis, we also need to know that it does not sneakily | ||||||||
| 992 | // access dest. We rely on AA to figure this out for us. | ||||||||
| 993 | ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); | ||||||||
| 994 | // If necessary, perform additional analysis. | ||||||||
| 995 | if (isModOrRefSet(MR)) | ||||||||
| 996 | MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT); | ||||||||
| 997 | if (isModOrRefSet(MR)) | ||||||||
| 998 | return false; | ||||||||
| 999 | |||||||||
| 1000 | // We can't create address space casts here because we don't know if they're | ||||||||
| 1001 | // safe for the target. | ||||||||
| 1002 | if (cpySrc->getType()->getPointerAddressSpace() != | ||||||||
| 1003 | cpyDest->getType()->getPointerAddressSpace()) | ||||||||
| 1004 | return false; | ||||||||
| 1005 | for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) | ||||||||
| 1006 | if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && | ||||||||
| 1007 | cpySrc->getType()->getPointerAddressSpace() != | ||||||||
| 1008 | C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) | ||||||||
| 1009 | return false; | ||||||||
| 1010 | |||||||||
| 1011 | // All the checks have passed, so do the transformation. | ||||||||
| 1012 | bool changedArgument = false; | ||||||||
| 1013 | for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) | ||||||||
| 1014 | if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { | ||||||||
| 1015 | Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest | ||||||||
| 1016 | : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), | ||||||||
| 1017 | cpyDest->getName(), C); | ||||||||
| 1018 | changedArgument = true; | ||||||||
| 1019 | if (C->getArgOperand(ArgI)->getType() == Dest->getType()) | ||||||||
| 1020 | C->setArgOperand(ArgI, Dest); | ||||||||
| 1021 | else | ||||||||
| 1022 | C->setArgOperand(ArgI, CastInst::CreatePointerCast( | ||||||||
| 1023 | Dest, C->getArgOperand(ArgI)->getType(), | ||||||||
| 1024 | Dest->getName(), C)); | ||||||||
| 1025 | } | ||||||||
| 1026 | |||||||||
| 1027 | if (!changedArgument) | ||||||||
| 1028 | return false; | ||||||||
| 1029 | |||||||||
| 1030 | // If the destination wasn't sufficiently aligned then increase its alignment. | ||||||||
| 1031 | if (!isDestSufficientlyAligned) { | ||||||||
| 1032 | assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!")((void)0); | ||||||||
| 1033 | cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); | ||||||||
| 1034 | } | ||||||||
| 1035 | |||||||||
| 1036 | // Drop any cached information about the call, because we may have changed | ||||||||
| 1037 | // its dependence information by changing its parameter. | ||||||||
| 1038 | if (MD) | ||||||||
| 1039 | MD->removeInstruction(C); | ||||||||
| 1040 | |||||||||
| 1041 | // Update AA metadata | ||||||||
| 1042 | // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be | ||||||||
| 1043 | // handled here, but combineMetadata doesn't support them yet | ||||||||
| 1044 | unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, | ||||||||
| 1045 | LLVMContext::MD_noalias, | ||||||||
| 1046 | LLVMContext::MD_invariant_group, | ||||||||
| 1047 | LLVMContext::MD_access_group}; | ||||||||
| 1048 | combineMetadata(C, cpyLoad, KnownIDs, true); | ||||||||
| 1049 | |||||||||
| 1050 | ++NumCallSlot; | ||||||||
| 1051 | return true; | ||||||||
| 1052 | } | ||||||||
| 1053 | |||||||||
| 1054 | /// We've found that the (upward scanning) memory dependence of memcpy 'M' is | ||||||||
| 1055 | /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. | ||||||||
| 1056 | bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, | ||||||||
| 1057 | MemCpyInst *MDep) { | ||||||||
| 1058 | // We can only transforms memcpy's where the dest of one is the source of the | ||||||||
| 1059 | // other. | ||||||||
| 1060 | if (M->getSource() != MDep->getDest() || MDep->isVolatile()) | ||||||||
| 1061 | return false; | ||||||||
| 1062 | |||||||||
| 1063 | // If dep instruction is reading from our current input, then it is a noop | ||||||||
| 1064 | // transfer and substituting the input won't change this instruction. Just | ||||||||
| 1065 | // ignore the input and let someone else zap MDep. This handles cases like: | ||||||||
| 1066 | // memcpy(a <- a) | ||||||||
| 1067 | // memcpy(b <- a) | ||||||||
| 1068 | if (M->getSource() == MDep->getSource()) | ||||||||
| 1069 | return false; | ||||||||
| 1070 | |||||||||
| 1071 | // Second, the length of the memcpy's must be the same, or the preceding one | ||||||||
| 1072 | // must be larger than the following one. | ||||||||
| 1073 | if (MDep->getLength() != M->getLength()) { | ||||||||
| 1074 | ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); | ||||||||
| 1075 | ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); | ||||||||
| 1076 | if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) | ||||||||
| 1077 | return false; | ||||||||
| 1078 | } | ||||||||
| 1079 | |||||||||
| 1080 | // Verify that the copied-from memory doesn't change in between the two | ||||||||
| 1081 | // transfers. For example, in: | ||||||||
| 1082 | // memcpy(a <- b) | ||||||||
| 1083 | // *b = 42; | ||||||||
| 1084 | // memcpy(c <- a) | ||||||||
| 1085 | // It would be invalid to transform the second memcpy into memcpy(c <- b). | ||||||||
| 1086 | // | ||||||||
| 1087 | // TODO: If the code between M and MDep is transparent to the destination "c", | ||||||||
| 1088 | // then we could still perform the xform by moving M up to the first memcpy. | ||||||||
| 1089 | if (EnableMemorySSA) { | ||||||||
| 1090 | // TODO: It would be sufficient to check the MDep source up to the memcpy | ||||||||
| 1091 | // size of M, rather than MDep. | ||||||||
| 1092 | if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), | ||||||||
| 1093 | MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M))) | ||||||||
| 1094 | return false; | ||||||||
| 1095 | } else { | ||||||||
| 1096 | // NOTE: This is conservative, it will stop on any read from the source loc, | ||||||||
| 1097 | // not just the defining memcpy. | ||||||||
| 1098 | MemDepResult SourceDep = | ||||||||
| 1099 | MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, | ||||||||
| 1100 | M->getIterator(), M->getParent()); | ||||||||
| 1101 | if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) | ||||||||
| 1102 | return false; | ||||||||
| 1103 | } | ||||||||
| 1104 | |||||||||
| 1105 | // If the dest of the second might alias the source of the first, then the | ||||||||
| 1106 | // source and dest might overlap. We still want to eliminate the intermediate | ||||||||
| 1107 | // value, but we have to generate a memmove instead of memcpy. | ||||||||
| 1108 | bool UseMemMove = false; | ||||||||
| 1109 | if (!AA->isNoAlias(MemoryLocation::getForDest(M), | ||||||||
| 1110 | MemoryLocation::getForSource(MDep))) | ||||||||
| 1111 | UseMemMove = true; | ||||||||
| 1112 | |||||||||
| 1113 | // If all checks passed, then we can transform M. | ||||||||
| 1114 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"do { } while (false) | ||||||||
| 1115 | << *MDep << '\n' << *M << '\n')do { } while (false); | ||||||||
| 1116 | |||||||||
| 1117 | // TODO: Is this worth it if we're creating a less aligned memcpy? For | ||||||||
| 1118 | // example we could be moving from movaps -> movq on x86. | ||||||||
| 1119 | IRBuilder<> Builder(M); | ||||||||
| 1120 | Instruction *NewM; | ||||||||
| 1121 | if (UseMemMove) | ||||||||
| 1122 | NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), | ||||||||
| 1123 | MDep->getRawSource(), MDep->getSourceAlign(), | ||||||||
| 1124 | M->getLength(), M->isVolatile()); | ||||||||
| 1125 | else if (isa<MemCpyInlineInst>(M)) { | ||||||||
| 1126 | // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is | ||||||||
| 1127 | // never allowed since that would allow the latter to be lowered as a call | ||||||||
| 1128 | // to an external function. | ||||||||
| 1129 | NewM = Builder.CreateMemCpyInline( | ||||||||
| 1130 | M->getRawDest(), M->getDestAlign(), MDep->getRawSource(), | ||||||||
| 1131 | MDep->getSourceAlign(), M->getLength(), M->isVolatile()); | ||||||||
| 1132 | } else | ||||||||
| 1133 | NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), | ||||||||
| 1134 | MDep->getRawSource(), MDep->getSourceAlign(), | ||||||||
| 1135 | M->getLength(), M->isVolatile()); | ||||||||
| 1136 | |||||||||
| 1137 | if (MSSAU) { | ||||||||
| 1138 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)))((void)0); | ||||||||
| 1139 | auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); | ||||||||
| 1140 | auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||||||
| 1141 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
| 1142 | } | ||||||||
| 1143 | |||||||||
| 1144 | // Remove the instruction we're replacing. | ||||||||
| 1145 | eraseInstruction(M); | ||||||||
| 1146 | ++NumMemCpyInstr; | ||||||||
| 1147 | return true; | ||||||||
| 1148 | } | ||||||||
| 1149 | |||||||||
| 1150 | /// We've found that the (upward scanning) memory dependence of \p MemCpy is | ||||||||
| 1151 | /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that | ||||||||
| 1152 | /// weren't copied over by \p MemCpy. | ||||||||
| 1153 | /// | ||||||||
| 1154 | /// In other words, transform: | ||||||||
| 1155 | /// \code | ||||||||
| 1156 | /// memset(dst, c, dst_size); | ||||||||
| 1157 | /// memcpy(dst, src, src_size); | ||||||||
| 1158 | /// \endcode | ||||||||
| 1159 | /// into: | ||||||||
| 1160 | /// \code | ||||||||
| 1161 | /// memcpy(dst, src, src_size); | ||||||||
| 1162 | /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); | ||||||||
| 1163 | /// \endcode | ||||||||
| 1164 | bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, | ||||||||
| 1165 | MemSetInst *MemSet) { | ||||||||
| 1166 | // We can only transform memset/memcpy with the same destination. | ||||||||
| 1167 | if (!AA->isMustAlias(MemSet->getDest(), MemCpy->getDest())) | ||||||||
| 1168 | return false; | ||||||||
| 1169 | |||||||||
| 1170 | // Check that src and dst of the memcpy aren't the same. While memcpy | ||||||||
| 1171 | // operands cannot partially overlap, exact equality is allowed. | ||||||||
| 1172 | if (!AA->isNoAlias(MemoryLocation(MemCpy->getSource(), | ||||||||
| 1173 | LocationSize::precise(1)), | ||||||||
| 1174 | MemoryLocation(MemCpy->getDest(), | ||||||||
| 1175 | LocationSize::precise(1)))) | ||||||||
| 1176 | return false; | ||||||||
| 1177 | |||||||||
| 1178 | if (EnableMemorySSA) { | ||||||||
| 1179 | // We know that dst up to src_size is not written. We now need to make sure | ||||||||
| 1180 | // that dst up to dst_size is not accessed. (If we did not move the memset, | ||||||||
| 1181 | // checking for reads would be sufficient.) | ||||||||
| 1182 | if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet), | ||||||||
| 1183 | MSSA->getMemoryAccess(MemSet), | ||||||||
| 1184 | MSSA->getMemoryAccess(MemCpy))) { | ||||||||
| 1185 | return false; | ||||||||
| 1186 | } | ||||||||
| 1187 | } else { | ||||||||
| 1188 | // We have already checked that dst up to src_size is not accessed. We | ||||||||
| 1189 | // need to make sure that there are no accesses up to dst_size either. | ||||||||
| 1190 | MemDepResult DstDepInfo = MD->getPointerDependencyFrom( | ||||||||
| 1191 | MemoryLocation::getForDest(MemSet), false, MemCpy->getIterator(), | ||||||||
| 1192 | MemCpy->getParent()); | ||||||||
| 1193 | if (DstDepInfo.getInst() != MemSet) | ||||||||
| 1194 | return false; | ||||||||
| 1195 | } | ||||||||
| 1196 | |||||||||
| 1197 | // Use the same i8* dest as the memcpy, killing the memset dest if different. | ||||||||
| 1198 | Value *Dest = MemCpy->getRawDest(); | ||||||||
| 1199 | Value *DestSize = MemSet->getLength(); | ||||||||
| 1200 | Value *SrcSize = MemCpy->getLength(); | ||||||||
| 1201 | |||||||||
| 1202 | if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy)) | ||||||||
| 1203 | return false; | ||||||||
| 1204 | |||||||||
| 1205 | // If the sizes are the same, simply drop the memset instead of generating | ||||||||
| 1206 | // a replacement with zero size. | ||||||||
| 1207 | if (DestSize == SrcSize) { | ||||||||
| 1208 | eraseInstruction(MemSet); | ||||||||
| 1209 | return true; | ||||||||
| 1210 | } | ||||||||
| 1211 | |||||||||
| 1212 | // By default, create an unaligned memset. | ||||||||
| 1213 | unsigned Align = 1; | ||||||||
| 1214 | // If Dest is aligned, and SrcSize is constant, use the minimum alignment | ||||||||
| 1215 | // of the sum. | ||||||||
| 1216 | const unsigned DestAlign = | ||||||||
| 1217 | std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); | ||||||||
| 1218 | if (DestAlign > 1) | ||||||||
| 1219 | if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) | ||||||||
| 1220 | Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); | ||||||||
| 1221 | |||||||||
| 1222 | IRBuilder<> Builder(MemCpy); | ||||||||
| 1223 | |||||||||
| 1224 | // If the sizes have different types, zext the smaller one. | ||||||||
| 1225 | if (DestSize->getType() != SrcSize->getType()) { | ||||||||
| 1226 | if (DestSize->getType()->getIntegerBitWidth() > | ||||||||
| 1227 | SrcSize->getType()->getIntegerBitWidth()) | ||||||||
| 1228 | SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); | ||||||||
| 1229 | else | ||||||||
| 1230 | DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); | ||||||||
| 1231 | } | ||||||||
| 1232 | |||||||||
| 1233 | Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); | ||||||||
| 1234 | Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); | ||||||||
| 1235 | Value *MemsetLen = Builder.CreateSelect( | ||||||||
| 1236 | Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); | ||||||||
| 1237 | unsigned DestAS = Dest->getType()->getPointerAddressSpace(); | ||||||||
| 1238 | Instruction *NewMemSet = Builder.CreateMemSet( | ||||||||
| 1239 | Builder.CreateGEP(Builder.getInt8Ty(), | ||||||||
| 1240 | Builder.CreatePointerCast(Dest, | ||||||||
| 1241 | Builder.getInt8PtrTy(DestAS)), | ||||||||
| 1242 | SrcSize), | ||||||||
| 1243 | MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); | ||||||||
| 1244 | |||||||||
| 1245 | if (MSSAU) { | ||||||||
| 1246 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) &&((void)0) | ||||||||
| 1247 | "MemCpy must be a MemoryDef")((void)0); | ||||||||
| 1248 | // The new memset is inserted after the memcpy, but it is known that its | ||||||||
| 1249 | // defining access is the memset about to be removed which immediately | ||||||||
| 1250 | // precedes the memcpy. | ||||||||
| 1251 | auto *LastDef = | ||||||||
| 1252 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | ||||||||
| 1253 | auto *NewAccess = MSSAU->createMemoryAccessBefore( | ||||||||
| 1254 | NewMemSet, LastDef->getDefiningAccess(), LastDef); | ||||||||
| 1255 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
| 1256 | } | ||||||||
| 1257 | |||||||||
| 1258 | eraseInstruction(MemSet); | ||||||||
| 1259 | return true; | ||||||||
| 1260 | } | ||||||||
| 1261 | |||||||||
| 1262 | /// Determine whether the instruction has undefined content for the given Size, | ||||||||
| 1263 | /// either because it was freshly alloca'd or started its lifetime. | ||||||||
| 1264 | static bool hasUndefContents(Instruction *I, Value *Size) { | ||||||||
| 1265 | if (isa<AllocaInst>(I)) | ||||||||
| 1266 | return true; | ||||||||
| 1267 | |||||||||
| 1268 | if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) { | ||||||||
| 1269 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) | ||||||||
| 1270 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) | ||||||||
| 1271 | if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) | ||||||||
| 1272 | if (LTSize->getZExtValue() >= CSize->getZExtValue()) | ||||||||
| 1273 | return true; | ||||||||
| 1274 | } | ||||||||
| 1275 | |||||||||
| 1276 | return false; | ||||||||
| 1277 | } | ||||||||
| 1278 | |||||||||
| 1279 | static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V, | ||||||||
| 1280 | MemoryDef *Def, Value *Size) { | ||||||||
| 1281 | if (MSSA->isLiveOnEntryDef(Def)) | ||||||||
| 1282 | return isa<AllocaInst>(getUnderlyingObject(V)); | ||||||||
| 1283 | |||||||||
| 1284 | if (IntrinsicInst *II = | ||||||||
| 1285 | dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) { | ||||||||
| 1286 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) { | ||||||||
| 1287 | ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0)); | ||||||||
| 1288 | |||||||||
| 1289 | if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) { | ||||||||
| 1290 | if (AA->isMustAlias(V, II->getArgOperand(1)) && | ||||||||
| 1291 | LTSize->getZExtValue() >= CSize->getZExtValue()) | ||||||||
| 1292 | return true; | ||||||||
| 1293 | } | ||||||||
| 1294 | |||||||||
| 1295 | // If the lifetime.start covers a whole alloca (as it almost always | ||||||||
| 1296 | // does) and we're querying a pointer based on that alloca, then we know | ||||||||
| 1297 | // the memory is definitely undef, regardless of how exactly we alias. | ||||||||
| 1298 | // The size also doesn't matter, as an out-of-bounds access would be UB. | ||||||||
| 1299 | AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V)); | ||||||||
| 1300 | if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) { | ||||||||
| 1301 | const DataLayout &DL = Alloca->getModule()->getDataLayout(); | ||||||||
| 1302 | if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL)) | ||||||||
| 1303 | if (*AllocaSize == LTSize->getValue() * 8) | ||||||||
| 1304 | return true; | ||||||||
| 1305 | } | ||||||||
| 1306 | } | ||||||||
| 1307 | } | ||||||||
| 1308 | |||||||||
| 1309 | return false; | ||||||||
| 1310 | } | ||||||||
| 1311 | |||||||||
| 1312 | /// Transform memcpy to memset when its source was just memset. | ||||||||
| 1313 | /// In other words, turn: | ||||||||
| 1314 | /// \code | ||||||||
| 1315 | /// memset(dst1, c, dst1_size); | ||||||||
| 1316 | /// memcpy(dst2, dst1, dst2_size); | ||||||||
| 1317 | /// \endcode | ||||||||
| 1318 | /// into: | ||||||||
| 1319 | /// \code | ||||||||
| 1320 | /// memset(dst1, c, dst1_size); | ||||||||
| 1321 | /// memset(dst2, c, dst2_size); | ||||||||
| 1322 | /// \endcode | ||||||||
| 1323 | /// When dst2_size <= dst1_size. | ||||||||
| 1324 | bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, | ||||||||
| 1325 | MemSetInst *MemSet) { | ||||||||
| 1326 | // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and | ||||||||
| 1327 | // memcpying from the same address. Otherwise it is hard to reason about. | ||||||||
| 1328 | if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) | ||||||||
| 1329 | return false; | ||||||||
| 1330 | |||||||||
| 1331 | Value *MemSetSize = MemSet->getLength(); | ||||||||
| 1332 | Value *CopySize = MemCpy->getLength(); | ||||||||
| 1333 | |||||||||
| 1334 | if (MemSetSize != CopySize) { | ||||||||
| 1335 | // Make sure the memcpy doesn't read any more than what the memset wrote. | ||||||||
| 1336 | // Don't worry about sizes larger than i64. | ||||||||
| 1337 | |||||||||
| 1338 | // A known memset size is required. | ||||||||
| 1339 | ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize); | ||||||||
| 1340 | if (!CMemSetSize) | ||||||||
| 1341 | return false; | ||||||||
| 1342 | |||||||||
| 1343 | // A known memcpy size is also required. | ||||||||
| 1344 | ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize); | ||||||||
| 1345 | if (!CCopySize) | ||||||||
| 1346 | return false; | ||||||||
| 1347 | if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) { | ||||||||
| 1348 | // If the memcpy is larger than the memset, but the memory was undef prior | ||||||||
| 1349 | // to the memset, we can just ignore the tail. Technically we're only | ||||||||
| 1350 | // interested in the bytes from MemSetSize..CopySize here, but as we can't | ||||||||
| 1351 | // easily represent this location, we use the full 0..CopySize range. | ||||||||
| 1352 | MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); | ||||||||
| 1353 | bool CanReduceSize = false; | ||||||||
| 1354 | if (EnableMemorySSA) { | ||||||||
| 1355 | MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); | ||||||||
| 1356 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||||
| 1357 | MemSetAccess->getDefiningAccess(), MemCpyLoc); | ||||||||
| 1358 | if (auto *MD = dyn_cast<MemoryDef>(Clobber)) | ||||||||
| 1359 | if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize)) | ||||||||
| 1360 | CanReduceSize = true; | ||||||||
| 1361 | } else { | ||||||||
| 1362 | MemDepResult DepInfo = MD->getPointerDependencyFrom( | ||||||||
| 1363 | MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); | ||||||||
| 1364 | if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) | ||||||||
| 1365 | CanReduceSize = true; | ||||||||
| 1366 | } | ||||||||
| 1367 | |||||||||
| 1368 | if (!CanReduceSize) | ||||||||
| 1369 | return false; | ||||||||
| 1370 | CopySize = MemSetSize; | ||||||||
| 1371 | } | ||||||||
| 1372 | } | ||||||||
| 1373 | |||||||||
| 1374 | IRBuilder<> Builder(MemCpy); | ||||||||
| 1375 | Instruction *NewM = | ||||||||
| 1376 | Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), | ||||||||
| 1377 | CopySize, MaybeAlign(MemCpy->getDestAlignment())); | ||||||||
| 1378 | if (MSSAU) { | ||||||||
| 1379 | auto *LastDef = | ||||||||
| 1380 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | ||||||||
| 1381 | auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||||||
| 1382 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
| 1383 | } | ||||||||
| 1384 | |||||||||
| 1385 | return true; | ||||||||
| 1386 | } | ||||||||
| 1387 | |||||||||
| 1388 | /// Perform simplification of memcpy's. If we have memcpy A | ||||||||
| 1389 | /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite | ||||||||
| 1390 | /// B to be a memcpy from X to Z (or potentially a memmove, depending on | ||||||||
| 1391 | /// circumstances). This allows later passes to remove the first memcpy | ||||||||
| 1392 | /// altogether. | ||||||||
| 1393 | bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { | ||||||||
| 1394 | // We can only optimize non-volatile memcpy's. | ||||||||
| 1395 | if (M->isVolatile()) return false; | ||||||||
| 1396 | |||||||||
| 1397 | // If the source and destination of the memcpy are the same, then zap it. | ||||||||
| 1398 | if (M->getSource() == M->getDest()) { | ||||||||
| 1399 | ++BBI; | ||||||||
| 1400 | eraseInstruction(M); | ||||||||
| 1401 | return true; | ||||||||
| 1402 | } | ||||||||
| 1403 | |||||||||
| 1404 | // If copying from a constant, try to turn the memcpy into a memset. | ||||||||
| 1405 | if (GlobalVariable *GV
| ||||||||
| 1406 | if (GV->isConstant() && GV->hasDefinitiveInitializer()) | ||||||||
| 1407 | if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), | ||||||||
| 1408 | M->getModule()->getDataLayout())) { | ||||||||
| 1409 | IRBuilder<> Builder(M); | ||||||||
| 1410 | Instruction *NewM = | ||||||||
| 1411 | Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), | ||||||||
| 1412 | MaybeAlign(M->getDestAlignment()), false); | ||||||||
| 1413 | if (MSSAU) { | ||||||||
| 1414 | auto *LastDef = | ||||||||
| 1415 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); | ||||||||
| 1416 | auto *NewAccess = | ||||||||
| 1417 | MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||||||
| 1418 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
| 1419 | } | ||||||||
| 1420 | |||||||||
| 1421 | eraseInstruction(M); | ||||||||
| 1422 | ++NumCpyToSet; | ||||||||
| 1423 | return true; | ||||||||
| 1424 | } | ||||||||
| 1425 | |||||||||
| 1426 | if (EnableMemorySSA) { | ||||||||
| 1427 | MemoryUseOrDef *MA = MSSA->getMemoryAccess(M); | ||||||||
| |||||||||
| 1428 | MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA); | ||||||||
| 1429 | MemoryLocation DestLoc = MemoryLocation::getForDest(M); | ||||||||
| 1430 | const MemoryAccess *DestClobber = | ||||||||
| 1431 | MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc); | ||||||||
| 1432 | |||||||||
| 1433 | // Try to turn a partially redundant memset + memcpy into | ||||||||
| 1434 | // memcpy + smaller memset. We don't need the memcpy size for this. | ||||||||
| 1435 | // The memcpy most post-dom the memset, so limit this to the same basic | ||||||||
| 1436 | // block. A non-local generalization is likely not worthwhile. | ||||||||
| 1437 | if (auto *MD = dyn_cast<MemoryDef>(DestClobber)) | ||||||||
| 1438 | if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst())) | ||||||||
| 1439 | if (DestClobber->getBlock() == M->getParent()) | ||||||||
| 1440 | if (processMemSetMemCpyDependence(M, MDep)) | ||||||||
| 1441 | return true; | ||||||||
| 1442 | |||||||||
| 1443 | MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||||
| 1444 | AnyClobber, MemoryLocation::getForSource(M)); | ||||||||
| 1445 | |||||||||
| 1446 | // There are four possible optimizations we can do for memcpy: | ||||||||
| 1447 | // a) memcpy-memcpy xform which exposes redundance for DSE. | ||||||||
| 1448 | // b) call-memcpy xform for return slot optimization. | ||||||||
| 1449 | // c) memcpy from freshly alloca'd space or space that has just started | ||||||||
| 1450 | // its lifetime copies undefined data, and we can therefore eliminate | ||||||||
| 1451 | // the memcpy in favor of the data that was already at the destination. | ||||||||
| 1452 | // d) memcpy from a just-memset'd source can be turned into memset. | ||||||||
| 1453 | if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) { | ||||||||
| 1454 | if (Instruction *MI = MD->getMemoryInst()) { | ||||||||
| 1455 | if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) { | ||||||||
| 1456 | if (auto *C = dyn_cast<CallInst>(MI)) { | ||||||||
| 1457 | // The memcpy must post-dom the call. Limit to the same block for | ||||||||
| 1458 | // now. Additionally, we need to ensure that there are no accesses | ||||||||
| 1459 | // to dest between the call and the memcpy. Accesses to src will be | ||||||||
| 1460 | // checked by performCallSlotOptzn(). | ||||||||
| 1461 | // TODO: Support non-local call-slot optimization? | ||||||||
| 1462 | if (C->getParent() == M->getParent() && | ||||||||
| 1463 | !accessedBetween(*AA, DestLoc, MD, MA)) { | ||||||||
| 1464 | // FIXME: Can we pass in either of dest/src alignment here instead | ||||||||
| 1465 | // of conservatively taking the minimum? | ||||||||
| 1466 | Align Alignment = std::min(M->getDestAlign().valueOrOne(), | ||||||||
| 1467 | M->getSourceAlign().valueOrOne()); | ||||||||
| 1468 | if (performCallSlotOptzn( | ||||||||
| 1469 | M, M, M->getDest(), M->getSource(), | ||||||||
| 1470 | TypeSize::getFixed(CopySize->getZExtValue()), Alignment, | ||||||||
| 1471 | C)) { | ||||||||
| 1472 | LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"do { } while (false) | ||||||||
| 1473 | << " call: " << *C << "\n"do { } while (false) | ||||||||
| 1474 | << " memcpy: " << *M << "\n")do { } while (false); | ||||||||
| 1475 | eraseInstruction(M); | ||||||||
| 1476 | ++NumMemCpyInstr; | ||||||||
| 1477 | return true; | ||||||||
| 1478 | } | ||||||||
| 1479 | } | ||||||||
| 1480 | } | ||||||||
| 1481 | } | ||||||||
| 1482 | if (auto *MDep = dyn_cast<MemCpyInst>(MI)) | ||||||||
| 1483 | return processMemCpyMemCpyDependence(M, MDep); | ||||||||
| 1484 | if (auto *MDep = dyn_cast<MemSetInst>(MI)) { | ||||||||
| 1485 | if (performMemCpyToMemSetOptzn(M, MDep)) { | ||||||||
| 1486 | LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n")do { } while (false); | ||||||||
| 1487 | eraseInstruction(M); | ||||||||
| 1488 | ++NumCpyToSet; | ||||||||
| 1489 | return true; | ||||||||
| 1490 | } | ||||||||
| 1491 | } | ||||||||
| 1492 | } | ||||||||
| 1493 | |||||||||
| 1494 | if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, M->getLength())) { | ||||||||
| 1495 | LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n")do { } while (false); | ||||||||
| 1496 | eraseInstruction(M); | ||||||||
| 1497 | ++NumMemCpyInstr; | ||||||||
| 1498 | return true; | ||||||||
| 1499 | } | ||||||||
| 1500 | } | ||||||||
| 1501 | } else { | ||||||||
| 1502 | MemDepResult DepInfo = MD->getDependency(M); | ||||||||
| 1503 | |||||||||
| 1504 | // Try to turn a partially redundant memset + memcpy into | ||||||||
| 1505 | // memcpy + smaller memset. We don't need the memcpy size for this. | ||||||||
| 1506 | if (DepInfo.isClobber()) | ||||||||
| 1507 | if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) | ||||||||
| 1508 | if (processMemSetMemCpyDependence(M, MDep)) | ||||||||
| 1509 | return true; | ||||||||
| 1510 | |||||||||
| 1511 | // There are four possible optimizations we can do for memcpy: | ||||||||
| 1512 | // a) memcpy-memcpy xform which exposes redundance for DSE. | ||||||||
| 1513 | // b) call-memcpy xform for return slot optimization. | ||||||||
| 1514 | // c) memcpy from freshly alloca'd space or space that has just started | ||||||||
| 1515 | // its lifetime copies undefined data, and we can therefore eliminate | ||||||||
| 1516 | // the memcpy in favor of the data that was already at the destination. | ||||||||
| 1517 | // d) memcpy from a just-memset'd source can be turned into memset. | ||||||||
| 1518 | if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) { | ||||||||
| 1519 | if (DepInfo.isClobber()) { | ||||||||
| 1520 | if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { | ||||||||
| 1521 | // FIXME: Can we pass in either of dest/src alignment here instead | ||||||||
| 1522 | // of conservatively taking the minimum? | ||||||||
| 1523 | Align Alignment = std::min(M->getDestAlign().valueOrOne(), | ||||||||
| 1524 | M->getSourceAlign().valueOrOne()); | ||||||||
| 1525 | if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | ||||||||
| 1526 | TypeSize::getFixed(CopySize->getZExtValue()), | ||||||||
| 1527 | Alignment, C)) { | ||||||||
| 1528 | eraseInstruction(M); | ||||||||
| 1529 | ++NumMemCpyInstr; | ||||||||
| 1530 | return true; | ||||||||
| 1531 | } | ||||||||
| 1532 | } | ||||||||
| 1533 | } | ||||||||
| 1534 | } | ||||||||
| 1535 | |||||||||
| 1536 | MemoryLocation SrcLoc = MemoryLocation::getForSource(M); | ||||||||
| 1537 | MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( | ||||||||
| 1538 | SrcLoc, true, M->getIterator(), M->getParent()); | ||||||||
| 1539 | |||||||||
| 1540 | if (SrcDepInfo.isClobber()) { | ||||||||
| 1541 | if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) | ||||||||
| 1542 | return processMemCpyMemCpyDependence(M, MDep); | ||||||||
| 1543 | } else if (SrcDepInfo.isDef()) { | ||||||||
| 1544 | if (hasUndefContents(SrcDepInfo.getInst(), M->getLength())) { | ||||||||
| 1545 | eraseInstruction(M); | ||||||||
| 1546 | ++NumMemCpyInstr; | ||||||||
| 1547 | return true; | ||||||||
| 1548 | } | ||||||||
| 1549 | } | ||||||||
| 1550 | |||||||||
| 1551 | if (SrcDepInfo.isClobber()) | ||||||||
| 1552 | if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) | ||||||||
| 1553 | if (performMemCpyToMemSetOptzn(M, MDep)) { | ||||||||
| 1554 | eraseInstruction(M); | ||||||||
| 1555 | ++NumCpyToSet; | ||||||||
| 1556 | return true; | ||||||||
| 1557 | } | ||||||||
| 1558 | } | ||||||||
| 1559 | |||||||||
| 1560 | return false; | ||||||||
| 1561 | } | ||||||||
| 1562 | |||||||||
| 1563 | /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed | ||||||||
| 1564 | /// not to alias. | ||||||||
| 1565 | bool MemCpyOptPass::processMemMove(MemMoveInst *M) { | ||||||||
| 1566 | if (!TLI->has(LibFunc_memmove)) | ||||||||
| 1567 | return false; | ||||||||
| 1568 | |||||||||
| 1569 | // See if the pointers alias. | ||||||||
| 1570 | if (!AA->isNoAlias(MemoryLocation::getForDest(M), | ||||||||
| 1571 | MemoryLocation::getForSource(M))) | ||||||||
| 1572 | return false; | ||||||||
| 1573 | |||||||||
| 1574 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *Mdo { } while (false) | ||||||||
| 1575 | << "\n")do { } while (false); | ||||||||
| 1576 | |||||||||
| 1577 | // If not, then we know we can transform this. | ||||||||
| 1578 | Type *ArgTys[3] = { M->getRawDest()->getType(), | ||||||||
| 1579 | M->getRawSource()->getType(), | ||||||||
| 1580 | M->getLength()->getType() }; | ||||||||
| 1581 | M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), | ||||||||
| 1582 | Intrinsic::memcpy, ArgTys)); | ||||||||
| 1583 | |||||||||
| 1584 | // For MemorySSA nothing really changes (except that memcpy may imply stricter | ||||||||
| 1585 | // aliasing guarantees). | ||||||||
| 1586 | |||||||||
| 1587 | // MemDep may have over conservative information about this instruction, just | ||||||||
| 1588 | // conservatively flush it from the cache. | ||||||||
| 1589 | if (MD) | ||||||||
| 1590 | MD->removeInstruction(M); | ||||||||
| 1591 | |||||||||
| 1592 | ++NumMoveToCpy; | ||||||||
| 1593 | return true; | ||||||||
| 1594 | } | ||||||||
| 1595 | |||||||||
| 1596 | /// This is called on every byval argument in call sites. | ||||||||
| 1597 | bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { | ||||||||
| 1598 | const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); | ||||||||
| 1599 | // Find out what feeds this byval argument. | ||||||||
| 1600 | Value *ByValArg = CB.getArgOperand(ArgNo); | ||||||||
| 1601 | Type *ByValTy = CB.getParamByValType(ArgNo); | ||||||||
| 1602 | TypeSize ByValSize = DL.getTypeAllocSize(ByValTy); | ||||||||
| 1603 | MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize)); | ||||||||
| 1604 | MemCpyInst *MDep = nullptr; | ||||||||
| 1605 | if (EnableMemorySSA) { | ||||||||
| 1606 | MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB); | ||||||||
| 1607 | if (!CallAccess) | ||||||||
| 1608 | return false; | ||||||||
| 1609 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||||
| 1610 | CallAccess->getDefiningAccess(), Loc); | ||||||||
| 1611 | if (auto *MD = dyn_cast<MemoryDef>(Clobber)) | ||||||||
| 1612 | MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst()); | ||||||||
| 1613 | } else { | ||||||||
| 1614 | MemDepResult DepInfo = MD->getPointerDependencyFrom( | ||||||||
| 1615 | Loc, true, CB.getIterator(), CB.getParent()); | ||||||||
| 1616 | if (!DepInfo.isClobber()) | ||||||||
| 1617 | return false; | ||||||||
| 1618 | MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); | ||||||||
| 1619 | } | ||||||||
| 1620 | |||||||||
| 1621 | // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by | ||||||||
| 1622 | // a memcpy, see if we can byval from the source of the memcpy instead of the | ||||||||
| 1623 | // result. | ||||||||
| 1624 | if (!MDep || MDep->isVolatile() || | ||||||||
| 1625 | ByValArg->stripPointerCasts() != MDep->getDest()) | ||||||||
| 1626 | return false; | ||||||||
| 1627 | |||||||||
| 1628 | // The length of the memcpy must be larger or equal to the size of the byval. | ||||||||
| 1629 | ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); | ||||||||
| 1630 | if (!C1 || !TypeSize::isKnownGE( | ||||||||
| 1631 | TypeSize::getFixed(C1->getValue().getZExtValue()), ByValSize)) | ||||||||
| 1632 | return false; | ||||||||
| 1633 | |||||||||
| 1634 | // Get the alignment of the byval. If the call doesn't specify the alignment, | ||||||||
| 1635 | // then it is some target specific value that we can't know. | ||||||||
| 1636 | MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); | ||||||||
| 1637 | if (!ByValAlign) return false; | ||||||||
| 1638 | |||||||||
| 1639 | // If it is greater than the memcpy, then we check to see if we can force the | ||||||||
| 1640 | // source of the memcpy to the alignment we need. If we fail, we bail out. | ||||||||
| 1641 | MaybeAlign MemDepAlign = MDep->getSourceAlign(); | ||||||||
| 1642 | if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && | ||||||||
| 1643 | getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, | ||||||||
| 1644 | DT) < *ByValAlign) | ||||||||
| 1645 | return false; | ||||||||
| 1646 | |||||||||
| 1647 | // The address space of the memcpy source must match the byval argument | ||||||||
| 1648 | if (MDep->getSource()->getType()->getPointerAddressSpace() != | ||||||||
| 1649 | ByValArg->getType()->getPointerAddressSpace()) | ||||||||
| 1650 | return false; | ||||||||
| 1651 | |||||||||
| 1652 | // Verify that the copied-from memory doesn't change in between the memcpy and | ||||||||
| 1653 | // the byval call. | ||||||||
| 1654 | // memcpy(a <- b) | ||||||||
| 1655 | // *b = 42; | ||||||||
| 1656 | // foo(*a) | ||||||||
| 1657 | // It would be invalid to transform the second memcpy into foo(*b). | ||||||||
| 1658 | if (EnableMemorySSA) { | ||||||||
| 1659 | if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), | ||||||||
| 1660 | MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB))) | ||||||||
| 1661 | return false; | ||||||||
| 1662 | } else { | ||||||||
| 1663 | // NOTE: This is conservative, it will stop on any read from the source loc, | ||||||||
| 1664 | // not just the defining memcpy. | ||||||||
| 1665 | MemDepResult SourceDep = MD->getPointerDependencyFrom( | ||||||||
| 1666 | MemoryLocation::getForSource(MDep), false, | ||||||||
| 1667 | CB.getIterator(), MDep->getParent()); | ||||||||
| 1668 | if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) | ||||||||
| 1669 | return false; | ||||||||
| 1670 | } | ||||||||
| 1671 | |||||||||
| 1672 | Value *TmpCast = MDep->getSource(); | ||||||||
| 1673 | if (MDep->getSource()->getType() != ByValArg->getType()) { | ||||||||
| 1674 | BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), | ||||||||
| 1675 | "tmpcast", &CB); | ||||||||
| 1676 | // Set the tmpcast's DebugLoc to MDep's | ||||||||
| 1677 | TmpBitCast->setDebugLoc(MDep->getDebugLoc()); | ||||||||
| 1678 | TmpCast = TmpBitCast; | ||||||||
| 1679 | } | ||||||||
| 1680 | |||||||||
| 1681 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"do { } while (false) | ||||||||
| 1682 | << " " << *MDep << "\n"do { } while (false) | ||||||||
| 1683 | << " " << CB << "\n")do { } while (false); | ||||||||
| 1684 | |||||||||
| 1685 | // Otherwise we're good! Update the byval argument. | ||||||||
| 1686 | CB.setArgOperand(ArgNo, TmpCast); | ||||||||
| 1687 | ++NumMemCpyInstr; | ||||||||
| 1688 | return true; | ||||||||
| 1689 | } | ||||||||
| 1690 | |||||||||
| 1691 | /// Executes one iteration of MemCpyOptPass. | ||||||||
| 1692 | bool MemCpyOptPass::iterateOnFunction(Function &F) { | ||||||||
| 1693 | bool MadeChange = false; | ||||||||
| 1694 | |||||||||
| 1695 | // Walk all instruction in the function. | ||||||||
| 1696 | for (BasicBlock &BB : F) { | ||||||||
| 1697 | // Skip unreachable blocks. For example processStore assumes that an | ||||||||
| 1698 | // instruction in a BB can't be dominated by a later instruction in the | ||||||||
| 1699 | // same BB (which is a scenario that can happen for an unreachable BB that | ||||||||
| 1700 | // has itself as a predecessor). | ||||||||
| 1701 | if (!DT->isReachableFromEntry(&BB)) | ||||||||
| 1702 | continue; | ||||||||
| 1703 | |||||||||
| 1704 | for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { | ||||||||
| 1705 | // Avoid invalidating the iterator. | ||||||||
| 1706 | Instruction *I = &*BI++; | ||||||||
| 1707 | |||||||||
| 1708 | bool RepeatInstruction = false; | ||||||||
| 1709 | |||||||||
| 1710 | if (StoreInst *SI
| ||||||||
| 1711 | MadeChange |= processStore(SI, BI); | ||||||||
| 1712 | else if (MemSetInst *M
| ||||||||
| 1713 | RepeatInstruction = processMemSet(M, BI); | ||||||||
| 1714 | else if (MemCpyInst *M
| ||||||||
| 1715 | RepeatInstruction = processMemCpy(M, BI); | ||||||||
| 1716 | else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) | ||||||||
| 1717 | RepeatInstruction = processMemMove(M); | ||||||||
| 1718 | else if (auto *CB = dyn_cast<CallBase>(I)) { | ||||||||
| 1719 | for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) | ||||||||
| 1720 | if (CB->isByValArgument(i)) | ||||||||
| 1721 | MadeChange |= processByValArgument(*CB, i); | ||||||||
| 1722 | } | ||||||||
| 1723 | |||||||||
| 1724 | // Reprocess the instruction if desired. | ||||||||
| 1725 | if (RepeatInstruction) { | ||||||||
| 1726 | if (BI != BB.begin()) | ||||||||
| 1727 | --BI; | ||||||||
| 1728 | MadeChange = true; | ||||||||
| 1729 | } | ||||||||
| 1730 | } | ||||||||
| 1731 | } | ||||||||
| 1732 | |||||||||
| 1733 | return MadeChange; | ||||||||
| 1734 | } | ||||||||
| 1735 | |||||||||
| 1736 | PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { | ||||||||
| 1737 | auto *MD = !EnableMemorySSA ? &AM.getResult<MemoryDependenceAnalysis>(F) | ||||||||
| 1738 | : AM.getCachedResult<MemoryDependenceAnalysis>(F); | ||||||||
| 1739 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); | ||||||||
| 1740 | auto *AA = &AM.getResult<AAManager>(F); | ||||||||
| 1741 | auto *AC = &AM.getResult<AssumptionAnalysis>(F); | ||||||||
| 1742 | auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); | ||||||||
| 1743 | auto *MSSA = EnableMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F) | ||||||||
| 1744 | : AM.getCachedResult<MemorySSAAnalysis>(F); | ||||||||
| 1745 | |||||||||
| 1746 | bool MadeChange = | ||||||||
| 1747 | runImpl(F, MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr); | ||||||||
| 1748 | if (!MadeChange) | ||||||||
| 1749 | return PreservedAnalyses::all(); | ||||||||
| 1750 | |||||||||
| 1751 | PreservedAnalyses PA; | ||||||||
| 1752 | PA.preserveSet<CFGAnalyses>(); | ||||||||
| 1753 | if (MD) | ||||||||
| 1754 | PA.preserve<MemoryDependenceAnalysis>(); | ||||||||
| 1755 | if (MSSA) | ||||||||
| 1756 | PA.preserve<MemorySSAAnalysis>(); | ||||||||
| 1757 | return PA; | ||||||||
| 1758 | } | ||||||||
| 1759 | |||||||||
| 1760 | bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_, | ||||||||
| 1761 | TargetLibraryInfo *TLI_, AliasAnalysis *AA_, | ||||||||
| 1762 | AssumptionCache *AC_, DominatorTree *DT_, | ||||||||
| 1763 | MemorySSA *MSSA_) { | ||||||||
| 1764 | bool MadeChange = false; | ||||||||
| 1765 | MD = MD_; | ||||||||
| 1766 | TLI = TLI_; | ||||||||
| 1767 | AA = AA_; | ||||||||
| 1768 | AC = AC_; | ||||||||
| 1769 | DT = DT_; | ||||||||
| 1770 | MSSA = MSSA_; | ||||||||
| 1771 | MemorySSAUpdater MSSAU_(MSSA_); | ||||||||
| 1772 | MSSAU = MSSA_
| ||||||||
| 1773 | // If we don't have at least memset and memcpy, there is little point of doing | ||||||||
| 1774 | // anything here. These are required by a freestanding implementation, so if | ||||||||
| 1775 | // even they are disabled, there is no point in trying hard. | ||||||||
| 1776 | if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) | ||||||||
| 1777 | return false; | ||||||||
| 1778 | |||||||||
| 1779 | while (true) { | ||||||||
| 1780 | if (!iterateOnFunction(F)) | ||||||||
| 1781 | break; | ||||||||
| 1782 | MadeChange = true; | ||||||||
| 1783 | } | ||||||||
| 1784 | |||||||||
| 1785 | if (MSSA_ && VerifyMemorySSA) | ||||||||
| 1786 | MSSA_->verifyMemorySSA(); | ||||||||
| 1787 | |||||||||
| 1788 | MD = nullptr; | ||||||||
| 1789 | return MadeChange; | ||||||||
| 1790 | } | ||||||||
| 1791 | |||||||||
| 1792 | /// This is the main transformation entry point for a function. | ||||||||
| 1793 | bool MemCpyOptLegacyPass::runOnFunction(Function &F) { | ||||||||
| 1794 | if (skipFunction(F)) | ||||||||
| |||||||||
| 1795 | return false; | ||||||||
| 1796 | |||||||||
| 1797 | auto *MDWP = !EnableMemorySSA | ||||||||
| 1798 | ? &getAnalysis<MemoryDependenceWrapperPass>() | ||||||||
| 1799 | : getAnalysisIfAvailable<MemoryDependenceWrapperPass>(); | ||||||||
| 1800 | auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | ||||||||
| 1801 | auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||||||
| 1802 | auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | ||||||||
| 1803 | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||||||
| 1804 | auto *MSSAWP = EnableMemorySSA | ||||||||
| 1805 | ? &getAnalysis<MemorySSAWrapperPass>() | ||||||||
| 1806 | : getAnalysisIfAvailable<MemorySSAWrapperPass>(); | ||||||||
| 1807 | |||||||||
| 1808 | return Impl.runImpl(F, MDWP ? & MDWP->getMemDep() : nullptr, TLI, AA, AC, DT, | ||||||||
| 1809 | MSSAWP
| ||||||||
| 1810 | } |
| 1 | //===-- llvm/IntrinsicInst.h - Intrinsic Instruction Wrappers ---*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines classes that make it really easy to deal with intrinsic |
| 10 | // functions with the isa/dyncast family of functions. In particular, this |
| 11 | // allows you to do things like: |
| 12 | // |
| 13 | // if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst)) |
| 14 | // ... MCI->getDest() ... MCI->getSource() ... |
| 15 | // |
| 16 | // All intrinsic function calls are instances of the call instruction, so these |
| 17 | // are all subclasses of the CallInst class. Note that none of these classes |
| 18 | // has state or virtual methods, which is an important part of this gross/neat |
| 19 | // hack working. |
| 20 | // |
| 21 | //===----------------------------------------------------------------------===// |
| 22 | |
| 23 | #ifndef LLVM_IR_INTRINSICINST_H |
| 24 | #define LLVM_IR_INTRINSICINST_H |
| 25 | |
| 26 | #include "llvm/IR/Constants.h" |
| 27 | #include "llvm/IR/DebugInfoMetadata.h" |
| 28 | #include "llvm/IR/DerivedTypes.h" |
| 29 | #include "llvm/IR/FPEnv.h" |
| 30 | #include "llvm/IR/Function.h" |
| 31 | #include "llvm/IR/GlobalVariable.h" |
| 32 | #include "llvm/IR/Instructions.h" |
| 33 | #include "llvm/IR/Intrinsics.h" |
| 34 | #include "llvm/IR/Metadata.h" |
| 35 | #include "llvm/IR/Value.h" |
| 36 | #include "llvm/Support/Casting.h" |
| 37 | #include <cassert> |
| 38 | #include <cstdint> |
| 39 | |
| 40 | namespace llvm { |
| 41 | |
| 42 | /// A wrapper class for inspecting calls to intrinsic functions. |
| 43 | /// This allows the standard isa/dyncast/cast functionality to work with calls |
| 44 | /// to intrinsic functions. |
| 45 | class IntrinsicInst : public CallInst { |
| 46 | public: |
| 47 | IntrinsicInst() = delete; |
| 48 | IntrinsicInst(const IntrinsicInst &) = delete; |
| 49 | IntrinsicInst &operator=(const IntrinsicInst &) = delete; |
| 50 | |
| 51 | /// Return the intrinsic ID of this intrinsic. |
| 52 | Intrinsic::ID getIntrinsicID() const { |
| 53 | return getCalledFunction()->getIntrinsicID(); |
| 54 | } |
| 55 | |
| 56 | /// Return true if swapping the first two arguments to the intrinsic produces |
| 57 | /// the same result. |
| 58 | bool isCommutative() const { |
| 59 | switch (getIntrinsicID()) { |
| 60 | case Intrinsic::maxnum: |
| 61 | case Intrinsic::minnum: |
| 62 | case Intrinsic::maximum: |
| 63 | case Intrinsic::minimum: |
| 64 | case Intrinsic::smax: |
| 65 | case Intrinsic::smin: |
| 66 | case Intrinsic::umax: |
| 67 | case Intrinsic::umin: |
| 68 | case Intrinsic::sadd_sat: |
| 69 | case Intrinsic::uadd_sat: |
| 70 | case Intrinsic::sadd_with_overflow: |
| 71 | case Intrinsic::uadd_with_overflow: |
| 72 | case Intrinsic::smul_with_overflow: |
| 73 | case Intrinsic::umul_with_overflow: |
| 74 | case Intrinsic::smul_fix: |
| 75 | case Intrinsic::umul_fix: |
| 76 | case Intrinsic::smul_fix_sat: |
| 77 | case Intrinsic::umul_fix_sat: |
| 78 | case Intrinsic::fma: |
| 79 | case Intrinsic::fmuladd: |
| 80 | return true; |
| 81 | default: |
| 82 | return false; |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | // Checks if the intrinsic is an annotation. |
| 87 | bool isAssumeLikeIntrinsic() const { |
| 88 | switch (getIntrinsicID()) { |
| 89 | default: break; |
| 90 | case Intrinsic::assume: |
| 91 | case Intrinsic::sideeffect: |
| 92 | case Intrinsic::pseudoprobe: |
| 93 | case Intrinsic::dbg_declare: |
| 94 | case Intrinsic::dbg_value: |
| 95 | case Intrinsic::dbg_label: |
| 96 | case Intrinsic::invariant_start: |
| 97 | case Intrinsic::invariant_end: |
| 98 | case Intrinsic::lifetime_start: |
| 99 | case Intrinsic::lifetime_end: |
| 100 | case Intrinsic::experimental_noalias_scope_decl: |
| 101 | case Intrinsic::objectsize: |
| 102 | case Intrinsic::ptr_annotation: |
| 103 | case Intrinsic::var_annotation: |
| 104 | return true; |
| 105 | } |
| 106 | return false; |
| 107 | } |
| 108 | |
| 109 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 110 | static bool classof(const CallInst *I) { |
| 111 | if (const Function *CF = I->getCalledFunction()) |
| 112 | return CF->isIntrinsic(); |
| 113 | return false; |
| 114 | } |
| 115 | static bool classof(const Value *V) { |
| 116 | return isa<CallInst>(V) && classof(cast<CallInst>(V)); |
| 117 | } |
| 118 | }; |
| 119 | |
| 120 | /// Check if \p ID corresponds to a debug info intrinsic. |
| 121 | static inline bool isDbgInfoIntrinsic(Intrinsic::ID ID) { |
| 122 | switch (ID) { |
| 123 | case Intrinsic::dbg_declare: |
| 124 | case Intrinsic::dbg_value: |
| 125 | case Intrinsic::dbg_addr: |
| 126 | case Intrinsic::dbg_label: |
| 127 | return true; |
| 128 | default: |
| 129 | return false; |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | /// This is the common base class for debug info intrinsics. |
| 134 | class DbgInfoIntrinsic : public IntrinsicInst { |
| 135 | public: |
| 136 | /// \name Casting methods |
| 137 | /// @{ |
| 138 | static bool classof(const IntrinsicInst *I) { |
| 139 | return isDbgInfoIntrinsic(I->getIntrinsicID()); |
| 140 | } |
| 141 | static bool classof(const Value *V) { |
| 142 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 143 | } |
| 144 | /// @} |
| 145 | }; |
| 146 | |
| 147 | /// This is the common base class for debug info intrinsics for variables. |
| 148 | class DbgVariableIntrinsic : public DbgInfoIntrinsic { |
| 149 | public: |
| 150 | // Iterator for ValueAsMetadata that internally uses direct pointer iteration |
| 151 | // over either a ValueAsMetadata* or a ValueAsMetadata**, dereferencing to the |
| 152 | // ValueAsMetadata . |
| 153 | class location_op_iterator |
| 154 | : public iterator_facade_base<location_op_iterator, |
| 155 | std::bidirectional_iterator_tag, Value *> { |
| 156 | PointerUnion<ValueAsMetadata *, ValueAsMetadata **> I; |
| 157 | |
| 158 | public: |
| 159 | location_op_iterator(ValueAsMetadata *SingleIter) : I(SingleIter) {} |
| 160 | location_op_iterator(ValueAsMetadata **MultiIter) : I(MultiIter) {} |
| 161 | |
| 162 | location_op_iterator(const location_op_iterator &R) : I(R.I) {} |
| 163 | location_op_iterator &operator=(const location_op_iterator &R) { |
| 164 | I = R.I; |
| 165 | return *this; |
| 166 | } |
| 167 | bool operator==(const location_op_iterator &RHS) const { |
| 168 | return I == RHS.I; |
| 169 | } |
| 170 | const Value *operator*() const { |
| 171 | ValueAsMetadata *VAM = I.is<ValueAsMetadata *>() |
| 172 | ? I.get<ValueAsMetadata *>() |
| 173 | : *I.get<ValueAsMetadata **>(); |
| 174 | return VAM->getValue(); |
| 175 | }; |
| 176 | Value *operator*() { |
| 177 | ValueAsMetadata *VAM = I.is<ValueAsMetadata *>() |
| 178 | ? I.get<ValueAsMetadata *>() |
| 179 | : *I.get<ValueAsMetadata **>(); |
| 180 | return VAM->getValue(); |
| 181 | } |
| 182 | location_op_iterator &operator++() { |
| 183 | if (I.is<ValueAsMetadata *>()) |
| 184 | I = I.get<ValueAsMetadata *>() + 1; |
| 185 | else |
| 186 | I = I.get<ValueAsMetadata **>() + 1; |
| 187 | return *this; |
| 188 | } |
| 189 | location_op_iterator &operator--() { |
| 190 | if (I.is<ValueAsMetadata *>()) |
| 191 | I = I.get<ValueAsMetadata *>() - 1; |
| 192 | else |
| 193 | I = I.get<ValueAsMetadata **>() - 1; |
| 194 | return *this; |
| 195 | } |
| 196 | }; |
| 197 | |
| 198 | /// Get the locations corresponding to the variable referenced by the debug |
| 199 | /// info intrinsic. Depending on the intrinsic, this could be the |
| 200 | /// variable's value or its address. |
| 201 | iterator_range<location_op_iterator> location_ops() const; |
| 202 | |
| 203 | Value *getVariableLocationOp(unsigned OpIdx) const; |
| 204 | |
| 205 | void replaceVariableLocationOp(Value *OldValue, Value *NewValue); |
| 206 | void replaceVariableLocationOp(unsigned OpIdx, Value *NewValue); |
| 207 | /// Adding a new location operand will always result in this intrinsic using |
| 208 | /// an ArgList, and must always be accompanied by a new expression that uses |
| 209 | /// the new operand. |
| 210 | void addVariableLocationOps(ArrayRef<Value *> NewValues, |
| 211 | DIExpression *NewExpr); |
| 212 | |
| 213 | void setVariable(DILocalVariable *NewVar) { |
| 214 | setArgOperand(1, MetadataAsValue::get(NewVar->getContext(), NewVar)); |
| 215 | } |
| 216 | |
| 217 | void setExpression(DIExpression *NewExpr) { |
| 218 | setArgOperand(2, MetadataAsValue::get(NewExpr->getContext(), NewExpr)); |
| 219 | } |
| 220 | |
| 221 | unsigned getNumVariableLocationOps() const { |
| 222 | if (hasArgList()) |
| 223 | return cast<DIArgList>(getRawLocation())->getArgs().size(); |
| 224 | return 1; |
| 225 | } |
| 226 | |
| 227 | bool hasArgList() const { return isa<DIArgList>(getRawLocation()); } |
| 228 | |
| 229 | /// Does this describe the address of a local variable. True for dbg.addr |
| 230 | /// and dbg.declare, but not dbg.value, which describes its value. |
| 231 | bool isAddressOfVariable() const { |
| 232 | return getIntrinsicID() != Intrinsic::dbg_value; |
| 233 | } |
| 234 | |
| 235 | void setUndef() { |
| 236 | // TODO: When/if we remove duplicate values from DIArgLists, we don't need |
| 237 | // this set anymore. |
| 238 | SmallPtrSet<Value *, 4> RemovedValues; |
| 239 | for (Value *OldValue : location_ops()) { |
| 240 | if (!RemovedValues.insert(OldValue).second) |
| 241 | continue; |
| 242 | Value *Undef = UndefValue::get(OldValue->getType()); |
| 243 | replaceVariableLocationOp(OldValue, Undef); |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | bool isUndef() const { |
| 248 | return (getNumVariableLocationOps() == 0 && |
| 249 | !getExpression()->isComplex()) || |
| 250 | any_of(location_ops(), [](Value *V) { return isa<UndefValue>(V); }); |
| 251 | } |
| 252 | |
| 253 | DILocalVariable *getVariable() const { |
| 254 | return cast<DILocalVariable>(getRawVariable()); |
| 255 | } |
| 256 | |
| 257 | DIExpression *getExpression() const { |
| 258 | return cast<DIExpression>(getRawExpression()); |
| 259 | } |
| 260 | |
| 261 | Metadata *getRawLocation() const { |
| 262 | return cast<MetadataAsValue>(getArgOperand(0))->getMetadata(); |
| 263 | } |
| 264 | |
| 265 | Metadata *getRawVariable() const { |
| 266 | return cast<MetadataAsValue>(getArgOperand(1))->getMetadata(); |
| 267 | } |
| 268 | |
| 269 | Metadata *getRawExpression() const { |
| 270 | return cast<MetadataAsValue>(getArgOperand(2))->getMetadata(); |
| 271 | } |
| 272 | |
| 273 | /// Use of this should generally be avoided; instead, |
| 274 | /// replaceVariableLocationOp and addVariableLocationOps should be used where |
| 275 | /// possible to avoid creating invalid state. |
| 276 | void setRawLocation(Metadata *Location) { |
| 277 | return setArgOperand(0, MetadataAsValue::get(getContext(), Location)); |
| 278 | } |
| 279 | |
| 280 | /// Get the size (in bits) of the variable, or fragment of the variable that |
| 281 | /// is described. |
| 282 | Optional<uint64_t> getFragmentSizeInBits() const; |
| 283 | |
| 284 | /// \name Casting methods |
| 285 | /// @{ |
| 286 | static bool classof(const IntrinsicInst *I) { |
| 287 | switch (I->getIntrinsicID()) { |
| 288 | case Intrinsic::dbg_declare: |
| 289 | case Intrinsic::dbg_value: |
| 290 | case Intrinsic::dbg_addr: |
| 291 | return true; |
| 292 | default: |
| 293 | return false; |
| 294 | } |
| 295 | } |
| 296 | static bool classof(const Value *V) { |
| 297 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 298 | } |
| 299 | /// @} |
| 300 | private: |
| 301 | void setArgOperand(unsigned i, Value *v) { |
| 302 | DbgInfoIntrinsic::setArgOperand(i, v); |
| 303 | } |
| 304 | void setOperand(unsigned i, Value *v) { DbgInfoIntrinsic::setOperand(i, v); } |
| 305 | }; |
| 306 | |
| 307 | /// This represents the llvm.dbg.declare instruction. |
| 308 | class DbgDeclareInst : public DbgVariableIntrinsic { |
| 309 | public: |
| 310 | Value *getAddress() const { |
| 311 | assert(getNumVariableLocationOps() == 1 &&((void)0) |
| 312 | "dbg.declare must have exactly 1 location operand.")((void)0); |
| 313 | return getVariableLocationOp(0); |
| 314 | } |
| 315 | |
| 316 | /// \name Casting methods |
| 317 | /// @{ |
| 318 | static bool classof(const IntrinsicInst *I) { |
| 319 | return I->getIntrinsicID() == Intrinsic::dbg_declare; |
| 320 | } |
| 321 | static bool classof(const Value *V) { |
| 322 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 323 | } |
| 324 | /// @} |
| 325 | }; |
| 326 | |
| 327 | /// This represents the llvm.dbg.addr instruction. |
| 328 | class DbgAddrIntrinsic : public DbgVariableIntrinsic { |
| 329 | public: |
| 330 | Value *getAddress() const { |
| 331 | assert(getNumVariableLocationOps() == 1 &&((void)0) |
| 332 | "dbg.addr must have exactly 1 location operand.")((void)0); |
| 333 | return getVariableLocationOp(0); |
| 334 | } |
| 335 | |
| 336 | /// \name Casting methods |
| 337 | /// @{ |
| 338 | static bool classof(const IntrinsicInst *I) { |
| 339 | return I->getIntrinsicID() == Intrinsic::dbg_addr; |
| 340 | } |
| 341 | static bool classof(const Value *V) { |
| 342 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 343 | } |
| 344 | }; |
| 345 | |
| 346 | /// This represents the llvm.dbg.value instruction. |
| 347 | class DbgValueInst : public DbgVariableIntrinsic { |
| 348 | public: |
| 349 | // The default argument should only be used in ISel, and the default option |
| 350 | // should be removed once ISel support for multiple location ops is complete. |
| 351 | Value *getValue(unsigned OpIdx = 0) const { |
| 352 | return getVariableLocationOp(OpIdx); |
| 353 | } |
| 354 | iterator_range<location_op_iterator> getValues() const { |
| 355 | return location_ops(); |
| 356 | } |
| 357 | |
| 358 | /// \name Casting methods |
| 359 | /// @{ |
| 360 | static bool classof(const IntrinsicInst *I) { |
| 361 | return I->getIntrinsicID() == Intrinsic::dbg_value; |
| 362 | } |
| 363 | static bool classof(const Value *V) { |
| 364 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 365 | } |
| 366 | /// @} |
| 367 | }; |
| 368 | |
| 369 | /// This represents the llvm.dbg.label instruction. |
| 370 | class DbgLabelInst : public DbgInfoIntrinsic { |
| 371 | public: |
| 372 | DILabel *getLabel() const { return cast<DILabel>(getRawLabel()); } |
| 373 | |
| 374 | Metadata *getRawLabel() const { |
| 375 | return cast<MetadataAsValue>(getArgOperand(0))->getMetadata(); |
| 376 | } |
| 377 | |
| 378 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 379 | /// @{ |
| 380 | static bool classof(const IntrinsicInst *I) { |
| 381 | return I->getIntrinsicID() == Intrinsic::dbg_label; |
| 382 | } |
| 383 | static bool classof(const Value *V) { |
| 384 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 385 | } |
| 386 | /// @} |
| 387 | }; |
| 388 | |
| 389 | /// This is the common base class for vector predication intrinsics. |
| 390 | class VPIntrinsic : public IntrinsicInst { |
| 391 | public: |
| 392 | /// \brief Declares a llvm.vp.* intrinsic in \p M that matches the parameters |
| 393 | /// \p Params. |
| 394 | static Function *getDeclarationForParams(Module *M, Intrinsic::ID, |
| 395 | ArrayRef<Value *> Params); |
| 396 | |
| 397 | static Optional<unsigned> getMaskParamPos(Intrinsic::ID IntrinsicID); |
| 398 | static Optional<unsigned> getVectorLengthParamPos(Intrinsic::ID IntrinsicID); |
| 399 | |
| 400 | /// The llvm.vp.* intrinsics for this instruction Opcode |
| 401 | static Intrinsic::ID getForOpcode(unsigned OC); |
| 402 | |
| 403 | // Whether \p ID is a VP intrinsic ID. |
| 404 | static bool isVPIntrinsic(Intrinsic::ID); |
| 405 | |
| 406 | /// \return The mask parameter or nullptr. |
| 407 | Value *getMaskParam() const; |
| 408 | void setMaskParam(Value *); |
| 409 | |
| 410 | /// \return The vector length parameter or nullptr. |
| 411 | Value *getVectorLengthParam() const; |
| 412 | void setVectorLengthParam(Value *); |
| 413 | |
| 414 | /// \return Whether the vector length param can be ignored. |
| 415 | bool canIgnoreVectorLengthParam() const; |
| 416 | |
| 417 | /// \return The static element count (vector number of elements) the vector |
| 418 | /// length parameter applies to. |
| 419 | ElementCount getStaticVectorLength() const; |
| 420 | |
| 421 | /// \return The alignment of the pointer used by this load/store/gather or |
| 422 | /// scatter. |
| 423 | MaybeAlign getPointerAlignment() const; |
| 424 | // MaybeAlign setPointerAlignment(Align NewAlign); // TODO |
| 425 | |
| 426 | /// \return The pointer operand of this load,store, gather or scatter. |
| 427 | Value *getMemoryPointerParam() const; |
| 428 | static Optional<unsigned> getMemoryPointerParamPos(Intrinsic::ID); |
| 429 | |
| 430 | /// \return The data (payload) operand of this store or scatter. |
| 431 | Value *getMemoryDataParam() const; |
| 432 | static Optional<unsigned> getMemoryDataParamPos(Intrinsic::ID); |
| 433 | |
| 434 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 435 | static bool classof(const IntrinsicInst *I) { |
| 436 | return isVPIntrinsic(I->getIntrinsicID()); |
| 437 | } |
| 438 | static bool classof(const Value *V) { |
| 439 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 440 | } |
| 441 | |
| 442 | // Equivalent non-predicated opcode |
| 443 | Optional<unsigned> getFunctionalOpcode() const { |
| 444 | return getFunctionalOpcodeForVP(getIntrinsicID()); |
| 445 | } |
| 446 | |
| 447 | // Equivalent non-predicated opcode |
| 448 | static Optional<unsigned> getFunctionalOpcodeForVP(Intrinsic::ID ID); |
| 449 | }; |
| 450 | |
| 451 | /// This is the common base class for constrained floating point intrinsics. |
| 452 | class ConstrainedFPIntrinsic : public IntrinsicInst { |
| 453 | public: |
| 454 | bool isUnaryOp() const; |
| 455 | bool isTernaryOp() const; |
| 456 | Optional<RoundingMode> getRoundingMode() const; |
| 457 | Optional<fp::ExceptionBehavior> getExceptionBehavior() const; |
| 458 | bool isDefaultFPEnvironment() const; |
| 459 | |
| 460 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 461 | static bool classof(const IntrinsicInst *I); |
| 462 | static bool classof(const Value *V) { |
| 463 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 464 | } |
| 465 | }; |
| 466 | |
| 467 | /// Constrained floating point compare intrinsics. |
| 468 | class ConstrainedFPCmpIntrinsic : public ConstrainedFPIntrinsic { |
| 469 | public: |
| 470 | FCmpInst::Predicate getPredicate() const; |
| 471 | |
| 472 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 473 | static bool classof(const IntrinsicInst *I) { |
| 474 | switch (I->getIntrinsicID()) { |
| 475 | case Intrinsic::experimental_constrained_fcmp: |
| 476 | case Intrinsic::experimental_constrained_fcmps: |
| 477 | return true; |
| 478 | default: |
| 479 | return false; |
| 480 | } |
| 481 | } |
| 482 | static bool classof(const Value *V) { |
| 483 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 484 | } |
| 485 | }; |
| 486 | |
| 487 | /// This class represents min/max intrinsics. |
| 488 | class MinMaxIntrinsic : public IntrinsicInst { |
| 489 | public: |
| 490 | static bool classof(const IntrinsicInst *I) { |
| 491 | switch (I->getIntrinsicID()) { |
| 492 | case Intrinsic::umin: |
| 493 | case Intrinsic::umax: |
| 494 | case Intrinsic::smin: |
| 495 | case Intrinsic::smax: |
| 496 | return true; |
| 497 | default: |
| 498 | return false; |
| 499 | } |
| 500 | } |
| 501 | static bool classof(const Value *V) { |
| 502 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 503 | } |
| 504 | |
| 505 | Value *getLHS() const { return const_cast<Value *>(getArgOperand(0)); } |
| 506 | Value *getRHS() const { return const_cast<Value *>(getArgOperand(1)); } |
| 507 | |
| 508 | /// Returns the comparison predicate underlying the intrinsic. |
| 509 | ICmpInst::Predicate getPredicate() const { |
| 510 | switch (getIntrinsicID()) { |
| 511 | case Intrinsic::umin: |
| 512 | return ICmpInst::Predicate::ICMP_ULT; |
| 513 | case Intrinsic::umax: |
| 514 | return ICmpInst::Predicate::ICMP_UGT; |
| 515 | case Intrinsic::smin: |
| 516 | return ICmpInst::Predicate::ICMP_SLT; |
| 517 | case Intrinsic::smax: |
| 518 | return ICmpInst::Predicate::ICMP_SGT; |
| 519 | default: |
| 520 | llvm_unreachable("Invalid intrinsic")__builtin_unreachable(); |
| 521 | } |
| 522 | } |
| 523 | |
| 524 | /// Whether the intrinsic is signed or unsigned. |
| 525 | bool isSigned() const { return ICmpInst::isSigned(getPredicate()); }; |
| 526 | }; |
| 527 | |
| 528 | /// This class represents an intrinsic that is based on a binary operation. |
| 529 | /// This includes op.with.overflow and saturating add/sub intrinsics. |
| 530 | class BinaryOpIntrinsic : public IntrinsicInst { |
| 531 | public: |
| 532 | static bool classof(const IntrinsicInst *I) { |
| 533 | switch (I->getIntrinsicID()) { |
| 534 | case Intrinsic::uadd_with_overflow: |
| 535 | case Intrinsic::sadd_with_overflow: |
| 536 | case Intrinsic::usub_with_overflow: |
| 537 | case Intrinsic::ssub_with_overflow: |
| 538 | case Intrinsic::umul_with_overflow: |
| 539 | case Intrinsic::smul_with_overflow: |
| 540 | case Intrinsic::uadd_sat: |
| 541 | case Intrinsic::sadd_sat: |
| 542 | case Intrinsic::usub_sat: |
| 543 | case Intrinsic::ssub_sat: |
| 544 | return true; |
| 545 | default: |
| 546 | return false; |
| 547 | } |
| 548 | } |
| 549 | static bool classof(const Value *V) { |
| 550 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 551 | } |
| 552 | |
| 553 | Value *getLHS() const { return const_cast<Value *>(getArgOperand(0)); } |
| 554 | Value *getRHS() const { return const_cast<Value *>(getArgOperand(1)); } |
| 555 | |
| 556 | /// Returns the binary operation underlying the intrinsic. |
| 557 | Instruction::BinaryOps getBinaryOp() const; |
| 558 | |
| 559 | /// Whether the intrinsic is signed or unsigned. |
| 560 | bool isSigned() const; |
| 561 | |
| 562 | /// Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap. |
| 563 | unsigned getNoWrapKind() const; |
| 564 | }; |
| 565 | |
| 566 | /// Represents an op.with.overflow intrinsic. |
| 567 | class WithOverflowInst : public BinaryOpIntrinsic { |
| 568 | public: |
| 569 | static bool classof(const IntrinsicInst *I) { |
| 570 | switch (I->getIntrinsicID()) { |
| 571 | case Intrinsic::uadd_with_overflow: |
| 572 | case Intrinsic::sadd_with_overflow: |
| 573 | case Intrinsic::usub_with_overflow: |
| 574 | case Intrinsic::ssub_with_overflow: |
| 575 | case Intrinsic::umul_with_overflow: |
| 576 | case Intrinsic::smul_with_overflow: |
| 577 | return true; |
| 578 | default: |
| 579 | return false; |
| 580 | } |
| 581 | } |
| 582 | static bool classof(const Value *V) { |
| 583 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 584 | } |
| 585 | }; |
| 586 | |
| 587 | /// Represents a saturating add/sub intrinsic. |
| 588 | class SaturatingInst : public BinaryOpIntrinsic { |
| 589 | public: |
| 590 | static bool classof(const IntrinsicInst *I) { |
| 591 | switch (I->getIntrinsicID()) { |
| 592 | case Intrinsic::uadd_sat: |
| 593 | case Intrinsic::sadd_sat: |
| 594 | case Intrinsic::usub_sat: |
| 595 | case Intrinsic::ssub_sat: |
| 596 | return true; |
| 597 | default: |
| 598 | return false; |
| 599 | } |
| 600 | } |
| 601 | static bool classof(const Value *V) { |
| 602 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 603 | } |
| 604 | }; |
| 605 | |
| 606 | /// Common base class for all memory intrinsics. Simply provides |
| 607 | /// common methods. |
| 608 | /// Written as CRTP to avoid a common base class amongst the |
| 609 | /// three atomicity hierarchies. |
| 610 | template <typename Derived> class MemIntrinsicBase : public IntrinsicInst { |
| 611 | private: |
| 612 | enum { ARG_DEST = 0, ARG_LENGTH = 2 }; |
| 613 | |
| 614 | public: |
| 615 | Value *getRawDest() const { |
| 616 | return const_cast<Value *>(getArgOperand(ARG_DEST)); |
| 617 | } |
| 618 | const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); } |
| 619 | Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); } |
| 620 | |
| 621 | Value *getLength() const { |
| 622 | return const_cast<Value *>(getArgOperand(ARG_LENGTH)); |
| 623 | } |
| 624 | const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); } |
| 625 | Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); } |
| 626 | |
| 627 | /// This is just like getRawDest, but it strips off any cast |
| 628 | /// instructions (including addrspacecast) that feed it, giving the |
| 629 | /// original input. The returned value is guaranteed to be a pointer. |
| 630 | Value *getDest() const { return getRawDest()->stripPointerCasts(); } |
| 631 | |
| 632 | unsigned getDestAddressSpace() const { |
| 633 | return cast<PointerType>(getRawDest()->getType())->getAddressSpace(); |
| 634 | } |
| 635 | |
| 636 | /// FIXME: Remove this function once transition to Align is over. |
| 637 | /// Use getDestAlign() instead. |
| 638 | unsigned getDestAlignment() const { |
| 639 | if (auto MA = getParamAlign(ARG_DEST)) |
| 640 | return MA->value(); |
| 641 | return 0; |
| 642 | } |
| 643 | MaybeAlign getDestAlign() const { return getParamAlign(ARG_DEST); } |
| 644 | |
| 645 | /// Set the specified arguments of the instruction. |
| 646 | void setDest(Value *Ptr) { |
| 647 | assert(getRawDest()->getType() == Ptr->getType() &&((void)0) |
| 648 | "setDest called with pointer of wrong type!")((void)0); |
| 649 | setArgOperand(ARG_DEST, Ptr); |
| 650 | } |
| 651 | |
| 652 | /// FIXME: Remove this function once transition to Align is over. |
| 653 | /// Use the version that takes MaybeAlign instead of this one. |
| 654 | void setDestAlignment(unsigned Alignment) { |
| 655 | setDestAlignment(MaybeAlign(Alignment)); |
| 656 | } |
| 657 | void setDestAlignment(MaybeAlign Alignment) { |
| 658 | removeParamAttr(ARG_DEST, Attribute::Alignment); |
| 659 | if (Alignment) |
| 660 | addParamAttr(ARG_DEST, |
| 661 | Attribute::getWithAlignment(getContext(), *Alignment)); |
| 662 | } |
| 663 | void setDestAlignment(Align Alignment) { |
| 664 | removeParamAttr(ARG_DEST, Attribute::Alignment); |
| 665 | addParamAttr(ARG_DEST, |
| 666 | Attribute::getWithAlignment(getContext(), Alignment)); |
| 667 | } |
| 668 | |
| 669 | void setLength(Value *L) { |
| 670 | assert(getLength()->getType() == L->getType() &&((void)0) |
| 671 | "setLength called with value of wrong type!")((void)0); |
| 672 | setArgOperand(ARG_LENGTH, L); |
| 673 | } |
| 674 | }; |
| 675 | |
| 676 | /// Common base class for all memory transfer intrinsics. Simply provides |
| 677 | /// common methods. |
| 678 | template <class BaseCL> class MemTransferBase : public BaseCL { |
| 679 | private: |
| 680 | enum { ARG_SOURCE = 1 }; |
| 681 | |
| 682 | public: |
| 683 | /// Return the arguments to the instruction. |
| 684 | Value *getRawSource() const { |
| 685 | return const_cast<Value *>(BaseCL::getArgOperand(ARG_SOURCE)); |
| 686 | } |
| 687 | const Use &getRawSourceUse() const { |
| 688 | return BaseCL::getArgOperandUse(ARG_SOURCE); |
| 689 | } |
| 690 | Use &getRawSourceUse() { return BaseCL::getArgOperandUse(ARG_SOURCE); } |
| 691 | |
| 692 | /// This is just like getRawSource, but it strips off any cast |
| 693 | /// instructions that feed it, giving the original input. The returned |
| 694 | /// value is guaranteed to be a pointer. |
| 695 | Value *getSource() const { return getRawSource()->stripPointerCasts(); } |
| 696 | |
| 697 | unsigned getSourceAddressSpace() const { |
| 698 | return cast<PointerType>(getRawSource()->getType())->getAddressSpace(); |
| 699 | } |
| 700 | |
| 701 | /// FIXME: Remove this function once transition to Align is over. |
| 702 | /// Use getSourceAlign() instead. |
| 703 | unsigned getSourceAlignment() const { |
| 704 | if (auto MA = BaseCL::getParamAlign(ARG_SOURCE)) |
| 705 | return MA->value(); |
| 706 | return 0; |
| 707 | } |
| 708 | |
| 709 | MaybeAlign getSourceAlign() const { |
| 710 | return BaseCL::getParamAlign(ARG_SOURCE); |
| 711 | } |
| 712 | |
| 713 | void setSource(Value *Ptr) { |
| 714 | assert(getRawSource()->getType() == Ptr->getType() &&((void)0) |
| 715 | "setSource called with pointer of wrong type!")((void)0); |
| 716 | BaseCL::setArgOperand(ARG_SOURCE, Ptr); |
| 717 | } |
| 718 | |
| 719 | /// FIXME: Remove this function once transition to Align is over. |
| 720 | /// Use the version that takes MaybeAlign instead of this one. |
| 721 | void setSourceAlignment(unsigned Alignment) { |
| 722 | setSourceAlignment(MaybeAlign(Alignment)); |
| 723 | } |
| 724 | void setSourceAlignment(MaybeAlign Alignment) { |
| 725 | BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment); |
| 726 | if (Alignment) |
| 727 | BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment( |
| 728 | BaseCL::getContext(), *Alignment)); |
| 729 | } |
| 730 | void setSourceAlignment(Align Alignment) { |
| 731 | BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment); |
| 732 | BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment( |
| 733 | BaseCL::getContext(), Alignment)); |
| 734 | } |
| 735 | }; |
| 736 | |
| 737 | /// Common base class for all memset intrinsics. Simply provides |
| 738 | /// common methods. |
| 739 | template <class BaseCL> class MemSetBase : public BaseCL { |
| 740 | private: |
| 741 | enum { ARG_VALUE = 1 }; |
| 742 | |
| 743 | public: |
| 744 | Value *getValue() const { |
| 745 | return const_cast<Value *>(BaseCL::getArgOperand(ARG_VALUE)); |
| 746 | } |
| 747 | const Use &getValueUse() const { return BaseCL::getArgOperandUse(ARG_VALUE); } |
| 748 | Use &getValueUse() { return BaseCL::getArgOperandUse(ARG_VALUE); } |
| 749 | |
| 750 | void setValue(Value *Val) { |
| 751 | assert(getValue()->getType() == Val->getType() &&((void)0) |
| 752 | "setValue called with value of wrong type!")((void)0); |
| 753 | BaseCL::setArgOperand(ARG_VALUE, Val); |
| 754 | } |
| 755 | }; |
| 756 | |
| 757 | // The common base class for the atomic memset/memmove/memcpy intrinsics |
| 758 | // i.e. llvm.element.unordered.atomic.memset/memcpy/memmove |
| 759 | class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> { |
| 760 | private: |
| 761 | enum { ARG_ELEMENTSIZE = 3 }; |
| 762 | |
| 763 | public: |
| 764 | Value *getRawElementSizeInBytes() const { |
| 765 | return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE)); |
| 766 | } |
| 767 | |
| 768 | ConstantInt *getElementSizeInBytesCst() const { |
| 769 | return cast<ConstantInt>(getRawElementSizeInBytes()); |
| 770 | } |
| 771 | |
| 772 | uint32_t getElementSizeInBytes() const { |
| 773 | return getElementSizeInBytesCst()->getZExtValue(); |
| 774 | } |
| 775 | |
| 776 | void setElementSizeInBytes(Constant *V) { |
| 777 | assert(V->getType() == Type::getInt8Ty(getContext()) &&((void)0) |
| 778 | "setElementSizeInBytes called with value of wrong type!")((void)0); |
| 779 | setArgOperand(ARG_ELEMENTSIZE, V); |
| 780 | } |
| 781 | |
| 782 | static bool classof(const IntrinsicInst *I) { |
| 783 | switch (I->getIntrinsicID()) { |
| 784 | case Intrinsic::memcpy_element_unordered_atomic: |
| 785 | case Intrinsic::memmove_element_unordered_atomic: |
| 786 | case Intrinsic::memset_element_unordered_atomic: |
| 787 | return true; |
| 788 | default: |
| 789 | return false; |
| 790 | } |
| 791 | } |
| 792 | static bool classof(const Value *V) { |
| 793 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 794 | } |
| 795 | }; |
| 796 | |
| 797 | /// This class represents atomic memset intrinsic |
| 798 | // i.e. llvm.element.unordered.atomic.memset |
| 799 | class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> { |
| 800 | public: |
| 801 | static bool classof(const IntrinsicInst *I) { |
| 802 | return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic; |
| 803 | } |
| 804 | static bool classof(const Value *V) { |
| 805 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 806 | } |
| 807 | }; |
| 808 | |
| 809 | // This class wraps the atomic memcpy/memmove intrinsics |
| 810 | // i.e. llvm.element.unordered.atomic.memcpy/memmove |
| 811 | class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> { |
| 812 | public: |
| 813 | static bool classof(const IntrinsicInst *I) { |
| 814 | switch (I->getIntrinsicID()) { |
| 815 | case Intrinsic::memcpy_element_unordered_atomic: |
| 816 | case Intrinsic::memmove_element_unordered_atomic: |
| 817 | return true; |
| 818 | default: |
| 819 | return false; |
| 820 | } |
| 821 | } |
| 822 | static bool classof(const Value *V) { |
| 823 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 824 | } |
| 825 | }; |
| 826 | |
| 827 | /// This class represents the atomic memcpy intrinsic |
| 828 | /// i.e. llvm.element.unordered.atomic.memcpy |
| 829 | class AtomicMemCpyInst : public AtomicMemTransferInst { |
| 830 | public: |
| 831 | static bool classof(const IntrinsicInst *I) { |
| 832 | return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic; |
| 833 | } |
| 834 | static bool classof(const Value *V) { |
| 835 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 836 | } |
| 837 | }; |
| 838 | |
| 839 | /// This class represents the atomic memmove intrinsic |
| 840 | /// i.e. llvm.element.unordered.atomic.memmove |
| 841 | class AtomicMemMoveInst : public AtomicMemTransferInst { |
| 842 | public: |
| 843 | static bool classof(const IntrinsicInst *I) { |
| 844 | return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic; |
| 845 | } |
| 846 | static bool classof(const Value *V) { |
| 847 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 848 | } |
| 849 | }; |
| 850 | |
| 851 | /// This is the common base class for memset/memcpy/memmove. |
| 852 | class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> { |
| 853 | private: |
| 854 | enum { ARG_VOLATILE = 3 }; |
| 855 | |
| 856 | public: |
| 857 | ConstantInt *getVolatileCst() const { |
| 858 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(ARG_VOLATILE))); |
| 859 | } |
| 860 | |
| 861 | bool isVolatile() const { return !getVolatileCst()->isZero(); } |
| 862 | |
| 863 | void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); } |
| 864 | |
| 865 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 866 | static bool classof(const IntrinsicInst *I) { |
| 867 | switch (I->getIntrinsicID()) { |
| 868 | case Intrinsic::memcpy: |
| 869 | case Intrinsic::memmove: |
| 870 | case Intrinsic::memset: |
| 871 | case Intrinsic::memcpy_inline: |
| 872 | return true; |
| 873 | default: |
| 874 | return false; |
| 875 | } |
| 876 | } |
| 877 | static bool classof(const Value *V) { |
| 878 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 879 | } |
| 880 | }; |
| 881 | |
| 882 | /// This class wraps the llvm.memset intrinsic. |
| 883 | class MemSetInst : public MemSetBase<MemIntrinsic> { |
| 884 | public: |
| 885 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 886 | static bool classof(const IntrinsicInst *I) { |
| 887 | return I->getIntrinsicID() == Intrinsic::memset; |
| 888 | } |
| 889 | static bool classof(const Value *V) { |
| 890 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 891 | } |
| 892 | }; |
| 893 | |
| 894 | /// This class wraps the llvm.memcpy/memmove intrinsics. |
| 895 | class MemTransferInst : public MemTransferBase<MemIntrinsic> { |
| 896 | public: |
| 897 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 898 | static bool classof(const IntrinsicInst *I) { |
| 899 | switch (I->getIntrinsicID()) { |
| 900 | case Intrinsic::memcpy: |
| 901 | case Intrinsic::memmove: |
| 902 | case Intrinsic::memcpy_inline: |
| 903 | return true; |
| 904 | default: |
| 905 | return false; |
| 906 | } |
| 907 | } |
| 908 | static bool classof(const Value *V) { |
| 909 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 910 | } |
| 911 | }; |
| 912 | |
| 913 | /// This class wraps the llvm.memcpy intrinsic. |
| 914 | class MemCpyInst : public MemTransferInst { |
| 915 | public: |
| 916 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 917 | static bool classof(const IntrinsicInst *I) { |
| 918 | return I->getIntrinsicID() == Intrinsic::memcpy || |
| 919 | I->getIntrinsicID() == Intrinsic::memcpy_inline; |
| 920 | } |
| 921 | static bool classof(const Value *V) { |
| 922 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 923 | } |
| 924 | }; |
| 925 | |
| 926 | /// This class wraps the llvm.memmove intrinsic. |
| 927 | class MemMoveInst : public MemTransferInst { |
| 928 | public: |
| 929 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 930 | static bool classof(const IntrinsicInst *I) { |
| 931 | return I->getIntrinsicID() == Intrinsic::memmove; |
| 932 | } |
| 933 | static bool classof(const Value *V) { |
| 934 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 935 | } |
| 936 | }; |
| 937 | |
| 938 | /// This class wraps the llvm.memcpy.inline intrinsic. |
| 939 | class MemCpyInlineInst : public MemCpyInst { |
| 940 | public: |
| 941 | ConstantInt *getLength() const { |
| 942 | return cast<ConstantInt>(MemCpyInst::getLength()); |
| 943 | } |
| 944 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
| 945 | static bool classof(const IntrinsicInst *I) { |
| 946 | return I->getIntrinsicID() == Intrinsic::memcpy_inline; |
| 947 | } |
| 948 | static bool classof(const Value *V) { |
| 949 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 950 | } |
| 951 | }; |
| 952 | |
| 953 | // The common base class for any memset/memmove/memcpy intrinsics; |
| 954 | // whether they be atomic or non-atomic. |
| 955 | // i.e. llvm.element.unordered.atomic.memset/memcpy/memmove |
| 956 | // and llvm.memset/memcpy/memmove |
| 957 | class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> { |
| 958 | public: |
| 959 | bool isVolatile() const { |
| 960 | // Only the non-atomic intrinsics can be volatile |
| 961 | if (auto *MI = dyn_cast<MemIntrinsic>(this)) |
| 962 | return MI->isVolatile(); |
| 963 | return false; |
| 964 | } |
| 965 | |
| 966 | static bool classof(const IntrinsicInst *I) { |
| 967 | switch (I->getIntrinsicID()) { |
| 968 | case Intrinsic::memcpy: |
| 969 | case Intrinsic::memcpy_inline: |
| 970 | case Intrinsic::memmove: |
| 971 | case Intrinsic::memset: |
| 972 | case Intrinsic::memcpy_element_unordered_atomic: |
| 973 | case Intrinsic::memmove_element_unordered_atomic: |
| 974 | case Intrinsic::memset_element_unordered_atomic: |
| 975 | return true; |
| 976 | default: |
| 977 | return false; |
| 978 | } |
| 979 | } |
| 980 | static bool classof(const Value *V) { |
| 981 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 982 | } |
| 983 | }; |
| 984 | |
| 985 | /// This class represents any memset intrinsic |
| 986 | // i.e. llvm.element.unordered.atomic.memset |
| 987 | // and llvm.memset |
| 988 | class AnyMemSetInst : public MemSetBase<AnyMemIntrinsic> { |
| 989 | public: |
| 990 | static bool classof(const IntrinsicInst *I) { |
| 991 | switch (I->getIntrinsicID()) { |
| 992 | case Intrinsic::memset: |
| 993 | case Intrinsic::memset_element_unordered_atomic: |
| 994 | return true; |
| 995 | default: |
| 996 | return false; |
| 997 | } |
| 998 | } |
| 999 | static bool classof(const Value *V) { |
| 1000 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1001 | } |
| 1002 | }; |
| 1003 | |
| 1004 | // This class wraps any memcpy/memmove intrinsics |
| 1005 | // i.e. llvm.element.unordered.atomic.memcpy/memmove |
| 1006 | // and llvm.memcpy/memmove |
| 1007 | class AnyMemTransferInst : public MemTransferBase<AnyMemIntrinsic> { |
| 1008 | public: |
| 1009 | static bool classof(const IntrinsicInst *I) { |
| 1010 | switch (I->getIntrinsicID()) { |
| 1011 | case Intrinsic::memcpy: |
| 1012 | case Intrinsic::memcpy_inline: |
| 1013 | case Intrinsic::memmove: |
| 1014 | case Intrinsic::memcpy_element_unordered_atomic: |
| 1015 | case Intrinsic::memmove_element_unordered_atomic: |
| 1016 | return true; |
| 1017 | default: |
| 1018 | return false; |
| 1019 | } |
| 1020 | } |
| 1021 | static bool classof(const Value *V) { |
| 1022 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1023 | } |
| 1024 | }; |
| 1025 | |
| 1026 | /// This class represents any memcpy intrinsic |
| 1027 | /// i.e. llvm.element.unordered.atomic.memcpy |
| 1028 | /// and llvm.memcpy |
| 1029 | class AnyMemCpyInst : public AnyMemTransferInst { |
| 1030 | public: |
| 1031 | static bool classof(const IntrinsicInst *I) { |
| 1032 | switch (I->getIntrinsicID()) { |
| 1033 | case Intrinsic::memcpy: |
| 1034 | case Intrinsic::memcpy_inline: |
| 1035 | case Intrinsic::memcpy_element_unordered_atomic: |
| 1036 | return true; |
| 1037 | default: |
| 1038 | return false; |
| 1039 | } |
| 1040 | } |
| 1041 | static bool classof(const Value *V) { |
| 1042 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1043 | } |
| 1044 | }; |
| 1045 | |
| 1046 | /// This class represents any memmove intrinsic |
| 1047 | /// i.e. llvm.element.unordered.atomic.memmove |
| 1048 | /// and llvm.memmove |
| 1049 | class AnyMemMoveInst : public AnyMemTransferInst { |
| 1050 | public: |
| 1051 | static bool classof(const IntrinsicInst *I) { |
| 1052 | switch (I->getIntrinsicID()) { |
| 1053 | case Intrinsic::memmove: |
| 1054 | case Intrinsic::memmove_element_unordered_atomic: |
| 1055 | return true; |
| 1056 | default: |
| 1057 | return false; |
| 1058 | } |
| 1059 | } |
| 1060 | static bool classof(const Value *V) { |
| 1061 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1062 | } |
| 1063 | }; |
| 1064 | |
| 1065 | /// This represents the llvm.va_start intrinsic. |
| 1066 | class VAStartInst : public IntrinsicInst { |
| 1067 | public: |
| 1068 | static bool classof(const IntrinsicInst *I) { |
| 1069 | return I->getIntrinsicID() == Intrinsic::vastart; |
| 1070 | } |
| 1071 | static bool classof(const Value *V) { |
| 1072 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1073 | } |
| 1074 | |
| 1075 | Value *getArgList() const { return const_cast<Value *>(getArgOperand(0)); } |
| 1076 | }; |
| 1077 | |
| 1078 | /// This represents the llvm.va_end intrinsic. |
| 1079 | class VAEndInst : public IntrinsicInst { |
| 1080 | public: |
| 1081 | static bool classof(const IntrinsicInst *I) { |
| 1082 | return I->getIntrinsicID() == Intrinsic::vaend; |
| 1083 | } |
| 1084 | static bool classof(const Value *V) { |
| 1085 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1086 | } |
| 1087 | |
| 1088 | Value *getArgList() const { return const_cast<Value *>(getArgOperand(0)); } |
| 1089 | }; |
| 1090 | |
| 1091 | /// This represents the llvm.va_copy intrinsic. |
| 1092 | class VACopyInst : public IntrinsicInst { |
| 1093 | public: |
| 1094 | static bool classof(const IntrinsicInst *I) { |
| 1095 | return I->getIntrinsicID() == Intrinsic::vacopy; |
| 1096 | } |
| 1097 | static bool classof(const Value *V) { |
| 1098 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1099 | } |
| 1100 | |
| 1101 | Value *getDest() const { return const_cast<Value *>(getArgOperand(0)); } |
| 1102 | Value *getSrc() const { return const_cast<Value *>(getArgOperand(1)); } |
| 1103 | }; |
| 1104 | |
| 1105 | /// This represents the llvm.instrprof_increment intrinsic. |
| 1106 | class InstrProfIncrementInst : public IntrinsicInst { |
| 1107 | public: |
| 1108 | static bool classof(const IntrinsicInst *I) { |
| 1109 | return I->getIntrinsicID() == Intrinsic::instrprof_increment; |
| 1110 | } |
| 1111 | static bool classof(const Value *V) { |
| 1112 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1113 | } |
| 1114 | |
| 1115 | GlobalVariable *getName() const { |
| 1116 | return cast<GlobalVariable>( |
| 1117 | const_cast<Value *>(getArgOperand(0))->stripPointerCasts()); |
| 1118 | } |
| 1119 | |
| 1120 | ConstantInt *getHash() const { |
| 1121 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1))); |
| 1122 | } |
| 1123 | |
| 1124 | ConstantInt *getNumCounters() const { |
| 1125 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2))); |
| 1126 | } |
| 1127 | |
| 1128 | ConstantInt *getIndex() const { |
| 1129 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3))); |
| 1130 | } |
| 1131 | |
| 1132 | Value *getStep() const; |
| 1133 | }; |
| 1134 | |
| 1135 | class InstrProfIncrementInstStep : public InstrProfIncrementInst { |
| 1136 | public: |
| 1137 | static bool classof(const IntrinsicInst *I) { |
| 1138 | return I->getIntrinsicID() == Intrinsic::instrprof_increment_step; |
| 1139 | } |
| 1140 | static bool classof(const Value *V) { |
| 1141 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1142 | } |
| 1143 | }; |
| 1144 | |
| 1145 | /// This represents the llvm.instrprof_value_profile intrinsic. |
| 1146 | class InstrProfValueProfileInst : public IntrinsicInst { |
| 1147 | public: |
| 1148 | static bool classof(const IntrinsicInst *I) { |
| 1149 | return I->getIntrinsicID() == Intrinsic::instrprof_value_profile; |
| 1150 | } |
| 1151 | static bool classof(const Value *V) { |
| 1152 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1153 | } |
| 1154 | |
| 1155 | GlobalVariable *getName() const { |
| 1156 | return cast<GlobalVariable>( |
| 1157 | const_cast<Value *>(getArgOperand(0))->stripPointerCasts()); |
| 1158 | } |
| 1159 | |
| 1160 | ConstantInt *getHash() const { |
| 1161 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1))); |
| 1162 | } |
| 1163 | |
| 1164 | Value *getTargetValue() const { |
| 1165 | return cast<Value>(const_cast<Value *>(getArgOperand(2))); |
| 1166 | } |
| 1167 | |
| 1168 | ConstantInt *getValueKind() const { |
| 1169 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3))); |
| 1170 | } |
| 1171 | |
| 1172 | // Returns the value site index. |
| 1173 | ConstantInt *getIndex() const { |
| 1174 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(4))); |
| 1175 | } |
| 1176 | }; |
| 1177 | |
| 1178 | class PseudoProbeInst : public IntrinsicInst { |
| 1179 | public: |
| 1180 | static bool classof(const IntrinsicInst *I) { |
| 1181 | return I->getIntrinsicID() == Intrinsic::pseudoprobe; |
| 1182 | } |
| 1183 | |
| 1184 | static bool classof(const Value *V) { |
| 1185 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1186 | } |
| 1187 | |
| 1188 | ConstantInt *getFuncGuid() const { |
| 1189 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(0))); |
| 1190 | } |
| 1191 | |
| 1192 | ConstantInt *getIndex() const { |
| 1193 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1))); |
| 1194 | } |
| 1195 | |
| 1196 | ConstantInt *getAttributes() const { |
| 1197 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2))); |
| 1198 | } |
| 1199 | |
| 1200 | ConstantInt *getFactor() const { |
| 1201 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3))); |
| 1202 | } |
| 1203 | }; |
| 1204 | |
| 1205 | class NoAliasScopeDeclInst : public IntrinsicInst { |
| 1206 | public: |
| 1207 | static bool classof(const IntrinsicInst *I) { |
| 1208 | return I->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl; |
| 1209 | } |
| 1210 | |
| 1211 | static bool classof(const Value *V) { |
| 1212 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1213 | } |
| 1214 | |
| 1215 | MDNode *getScopeList() const { |
| 1216 | auto *MV = |
| 1217 | cast<MetadataAsValue>(getOperand(Intrinsic::NoAliasScopeDeclScopeArg)); |
| 1218 | return cast<MDNode>(MV->getMetadata()); |
| 1219 | } |
| 1220 | |
| 1221 | void setScopeList(MDNode *ScopeList) { |
| 1222 | setOperand(Intrinsic::NoAliasScopeDeclScopeArg, |
| 1223 | MetadataAsValue::get(getContext(), ScopeList)); |
| 1224 | } |
| 1225 | }; |
| 1226 | |
| 1227 | // Defined in Statepoint.h -- NOT a subclass of IntrinsicInst |
| 1228 | class GCStatepointInst; |
| 1229 | |
| 1230 | /// Common base class for representing values projected from a statepoint. |
| 1231 | /// Currently, the only projections available are gc.result and gc.relocate. |
| 1232 | class GCProjectionInst : public IntrinsicInst { |
| 1233 | public: |
| 1234 | static bool classof(const IntrinsicInst *I) { |
| 1235 | return I->getIntrinsicID() == Intrinsic::experimental_gc_relocate || |
| 1236 | I->getIntrinsicID() == Intrinsic::experimental_gc_result; |
| 1237 | } |
| 1238 | |
| 1239 | static bool classof(const Value *V) { |
| 1240 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1241 | } |
| 1242 | |
| 1243 | /// Return true if this relocate is tied to the invoke statepoint. |
| 1244 | /// This includes relocates which are on the unwinding path. |
| 1245 | bool isTiedToInvoke() const { |
| 1246 | const Value *Token = getArgOperand(0); |
| 1247 | |
| 1248 | return isa<LandingPadInst>(Token) || isa<InvokeInst>(Token); |
| 1249 | } |
| 1250 | |
| 1251 | /// The statepoint with which this gc.relocate is associated. |
| 1252 | const GCStatepointInst *getStatepoint() const; |
| 1253 | }; |
| 1254 | |
| 1255 | /// Represents calls to the gc.relocate intrinsic. |
| 1256 | class GCRelocateInst : public GCProjectionInst { |
| 1257 | public: |
| 1258 | static bool classof(const IntrinsicInst *I) { |
| 1259 | return I->getIntrinsicID() == Intrinsic::experimental_gc_relocate; |
| 1260 | } |
| 1261 | |
| 1262 | static bool classof(const Value *V) { |
| 1263 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1264 | } |
| 1265 | |
| 1266 | /// The index into the associate statepoint's argument list |
| 1267 | /// which contains the base pointer of the pointer whose |
| 1268 | /// relocation this gc.relocate describes. |
| 1269 | unsigned getBasePtrIndex() const { |
| 1270 | return cast<ConstantInt>(getArgOperand(1))->getZExtValue(); |
| 1271 | } |
| 1272 | |
| 1273 | /// The index into the associate statepoint's argument list which |
| 1274 | /// contains the pointer whose relocation this gc.relocate describes. |
| 1275 | unsigned getDerivedPtrIndex() const { |
| 1276 | return cast<ConstantInt>(getArgOperand(2))->getZExtValue(); |
| 1277 | } |
| 1278 | |
| 1279 | Value *getBasePtr() const; |
| 1280 | Value *getDerivedPtr() const; |
| 1281 | }; |
| 1282 | |
| 1283 | /// Represents calls to the gc.result intrinsic. |
| 1284 | class GCResultInst : public GCProjectionInst { |
| 1285 | public: |
| 1286 | static bool classof(const IntrinsicInst *I) { |
| 1287 | return I->getIntrinsicID() == Intrinsic::experimental_gc_result; |
| 1288 | } |
| 1289 | |
| 1290 | static bool classof(const Value *V) { |
| 1291 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1292 | } |
| 1293 | }; |
| 1294 | |
| 1295 | |
| 1296 | /// This represents the llvm.assume intrinsic. |
| 1297 | class AssumeInst : public IntrinsicInst { |
| 1298 | public: |
| 1299 | static bool classof(const IntrinsicInst *I) { |
| 1300 | return I->getIntrinsicID() == Intrinsic::assume; |
| 1301 | } |
| 1302 | static bool classof(const Value *V) { |
| 1303 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
| 1304 | } |
| 1305 | }; |
| 1306 | |
| 1307 | } // end namespace llvm |
| 1308 | |
| 1309 | #endif // LLVM_IR_INTRINSICINST_H |
| 1 | //===-- llvm/Constants.h - Constant class subclass definitions --*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | /// @file |
| 10 | /// This file contains the declarations for the subclasses of Constant, |
| 11 | /// which represent the different flavors of constant values that live in LLVM. |
| 12 | /// Note that Constants are immutable (once created they never change) and are |
| 13 | /// fully shared by structural equivalence. This means that two structurally |
| 14 | /// equivalent constants will always have the same address. Constants are |
| 15 | /// created on demand as needed and never deleted: thus clients don't have to |
| 16 | /// worry about the lifetime of the objects. |
| 17 | // |
| 18 | //===----------------------------------------------------------------------===// |
| 19 | |
| 20 | #ifndef LLVM_IR_CONSTANTS_H |
| 21 | #define LLVM_IR_CONSTANTS_H |
| 22 | |
| 23 | #include "llvm/ADT/APFloat.h" |
| 24 | #include "llvm/ADT/APInt.h" |
| 25 | #include "llvm/ADT/ArrayRef.h" |
| 26 | #include "llvm/ADT/None.h" |
| 27 | #include "llvm/ADT/Optional.h" |
| 28 | #include "llvm/ADT/STLExtras.h" |
| 29 | #include "llvm/ADT/StringRef.h" |
| 30 | #include "llvm/IR/Constant.h" |
| 31 | #include "llvm/IR/DerivedTypes.h" |
| 32 | #include "llvm/IR/OperandTraits.h" |
| 33 | #include "llvm/IR/User.h" |
| 34 | #include "llvm/IR/Value.h" |
| 35 | #include "llvm/Support/Casting.h" |
| 36 | #include "llvm/Support/Compiler.h" |
| 37 | #include "llvm/Support/ErrorHandling.h" |
| 38 | #include <cassert> |
| 39 | #include <cstddef> |
| 40 | #include <cstdint> |
| 41 | |
| 42 | namespace llvm { |
| 43 | |
| 44 | template <class ConstantClass> struct ConstantAggrKeyType; |
| 45 | |
| 46 | /// Base class for constants with no operands. |
| 47 | /// |
| 48 | /// These constants have no operands; they represent their data directly. |
| 49 | /// Since they can be in use by unrelated modules (and are never based on |
| 50 | /// GlobalValues), it never makes sense to RAUW them. |
| 51 | class ConstantData : public Constant { |
| 52 | friend class Constant; |
| 53 | |
| 54 | Value *handleOperandChangeImpl(Value *From, Value *To) { |
| 55 | llvm_unreachable("Constant data does not have operands!")__builtin_unreachable(); |
| 56 | } |
| 57 | |
| 58 | protected: |
| 59 | explicit ConstantData(Type *Ty, ValueTy VT) : Constant(Ty, VT, nullptr, 0) {} |
| 60 | |
| 61 | void *operator new(size_t S) { return User::operator new(S, 0); } |
| 62 | |
| 63 | public: |
| 64 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
| 65 | |
| 66 | ConstantData(const ConstantData &) = delete; |
| 67 | |
| 68 | /// Methods to support type inquiry through isa, cast, and dyn_cast. |
| 69 | static bool classof(const Value *V) { |
| 70 | return V->getValueID() >= ConstantDataFirstVal && |
| 71 | V->getValueID() <= ConstantDataLastVal; |
| 72 | } |
| 73 | }; |
| 74 | |
| 75 | //===----------------------------------------------------------------------===// |
| 76 | /// This is the shared class of boolean and integer constants. This class |
| 77 | /// represents both boolean and integral constants. |
| 78 | /// Class for constant integers. |
| 79 | class ConstantInt final : public ConstantData { |
| 80 | friend class Constant; |
| 81 | |
| 82 | APInt Val; |
| 83 | |
| 84 | ConstantInt(IntegerType *Ty, const APInt &V); |
| 85 | |
| 86 | void destroyConstantImpl(); |
| 87 | |
| 88 | public: |
| 89 | ConstantInt(const ConstantInt &) = delete; |
| 90 | |
| 91 | static ConstantInt *getTrue(LLVMContext &Context); |
| 92 | static ConstantInt *getFalse(LLVMContext &Context); |
| 93 | static ConstantInt *getBool(LLVMContext &Context, bool V); |
| 94 | static Constant *getTrue(Type *Ty); |
| 95 | static Constant *getFalse(Type *Ty); |
| 96 | static Constant *getBool(Type *Ty, bool V); |
| 97 | |
| 98 | /// If Ty is a vector type, return a Constant with a splat of the given |
| 99 | /// value. Otherwise return a ConstantInt for the given value. |
| 100 | static Constant *get(Type *Ty, uint64_t V, bool IsSigned = false); |
| 101 | |
| 102 | /// Return a ConstantInt with the specified integer value for the specified |
| 103 | /// type. If the type is wider than 64 bits, the value will be zero-extended |
| 104 | /// to fit the type, unless IsSigned is true, in which case the value will |
| 105 | /// be interpreted as a 64-bit signed integer and sign-extended to fit |
| 106 | /// the type. |
| 107 | /// Get a ConstantInt for a specific value. |
| 108 | static ConstantInt *get(IntegerType *Ty, uint64_t V, bool IsSigned = false); |
| 109 | |
| 110 | /// Return a ConstantInt with the specified value for the specified type. The |
| 111 | /// value V will be canonicalized to a an unsigned APInt. Accessing it with |
| 112 | /// either getSExtValue() or getZExtValue() will yield a correctly sized and |
| 113 | /// signed value for the type Ty. |
| 114 | /// Get a ConstantInt for a specific signed value. |
| 115 | static ConstantInt *getSigned(IntegerType *Ty, int64_t V); |
| 116 | static Constant *getSigned(Type *Ty, int64_t V); |
| 117 | |
| 118 | /// Return a ConstantInt with the specified value and an implied Type. The |
| 119 | /// type is the integer type that corresponds to the bit width of the value. |
| 120 | static ConstantInt *get(LLVMContext &Context, const APInt &V); |
| 121 | |
| 122 | /// Return a ConstantInt constructed from the string strStart with the given |
| 123 | /// radix. |
| 124 | static ConstantInt *get(IntegerType *Ty, StringRef Str, uint8_t Radix); |
| 125 | |
| 126 | /// If Ty is a vector type, return a Constant with a splat of the given |
| 127 | /// value. Otherwise return a ConstantInt for the given value. |
| 128 | static Constant *get(Type *Ty, const APInt &V); |
| 129 | |
| 130 | /// Return the constant as an APInt value reference. This allows clients to |
| 131 | /// obtain a full-precision copy of the value. |
| 132 | /// Return the constant's value. |
| 133 | inline const APInt &getValue() const { return Val; } |
| 134 | |
| 135 | /// getBitWidth - Return the bitwidth of this constant. |
| 136 | unsigned getBitWidth() const { return Val.getBitWidth(); } |
| 137 | |
| 138 | /// Return the constant as a 64-bit unsigned integer value after it |
| 139 | /// has been zero extended as appropriate for the type of this constant. Note |
| 140 | /// that this method can assert if the value does not fit in 64 bits. |
| 141 | /// Return the zero extended value. |
| 142 | inline uint64_t getZExtValue() const { return Val.getZExtValue(); } |
| 143 | |
| 144 | /// Return the constant as a 64-bit integer value after it has been sign |
| 145 | /// extended as appropriate for the type of this constant. Note that |
| 146 | /// this method can assert if the value does not fit in 64 bits. |
| 147 | /// Return the sign extended value. |
| 148 | inline int64_t getSExtValue() const { return Val.getSExtValue(); } |
| 149 | |
| 150 | /// Return the constant as an llvm::MaybeAlign. |
| 151 | /// Note that this method can assert if the value does not fit in 64 bits or |
| 152 | /// is not a power of two. |
| 153 | inline MaybeAlign getMaybeAlignValue() const { |
| 154 | return MaybeAlign(getZExtValue()); |
| 155 | } |
| 156 | |
| 157 | /// Return the constant as an llvm::Align, interpreting `0` as `Align(1)`. |
| 158 | /// Note that this method can assert if the value does not fit in 64 bits or |
| 159 | /// is not a power of two. |
| 160 | inline Align getAlignValue() const { |
| 161 | return getMaybeAlignValue().valueOrOne(); |
| 162 | } |
| 163 | |
| 164 | /// A helper method that can be used to determine if the constant contained |
| 165 | /// within is equal to a constant. This only works for very small values, |
| 166 | /// because this is all that can be represented with all types. |
| 167 | /// Determine if this constant's value is same as an unsigned char. |
| 168 | bool equalsInt(uint64_t V) const { return Val == V; } |
| 169 | |
| 170 | /// getType - Specialize the getType() method to always return an IntegerType, |
| 171 | /// which reduces the amount of casting needed in parts of the compiler. |
| 172 | /// |
| 173 | inline IntegerType *getType() const { |
| 174 | return cast<IntegerType>(Value::getType()); |
| 175 | } |
| 176 | |
| 177 | /// This static method returns true if the type Ty is big enough to |
| 178 | /// represent the value V. This can be used to avoid having the get method |
| 179 | /// assert when V is larger than Ty can represent. Note that there are two |
| 180 | /// versions of this method, one for unsigned and one for signed integers. |
| 181 | /// Although ConstantInt canonicalizes everything to an unsigned integer, |
| 182 | /// the signed version avoids callers having to convert a signed quantity |
| 183 | /// to the appropriate unsigned type before calling the method. |
| 184 | /// @returns true if V is a valid value for type Ty |
| 185 | /// Determine if the value is in range for the given type. |
| 186 | static bool isValueValidForType(Type *Ty, uint64_t V); |
| 187 | static bool isValueValidForType(Type *Ty, int64_t V); |
| 188 | |
| 189 | bool isNegative() const { return Val.isNegative(); } |
| 190 | |
| 191 | /// This is just a convenience method to make client code smaller for a |
| 192 | /// common code. It also correctly performs the comparison without the |
| 193 | /// potential for an assertion from getZExtValue(). |
| 194 | bool isZero() const { return Val.isNullValue(); } |
| 195 | |
| 196 | /// This is just a convenience method to make client code smaller for a |
| 197 | /// common case. It also correctly performs the comparison without the |
| 198 | /// potential for an assertion from getZExtValue(). |
| 199 | /// Determine if the value is one. |
| 200 | bool isOne() const { return Val.isOneValue(); } |
| 201 | |
| 202 | /// This function will return true iff every bit in this constant is set |
| 203 | /// to true. |
| 204 | /// @returns true iff this constant's bits are all set to true. |
| 205 | /// Determine if the value is all ones. |
| 206 | bool isMinusOne() const { return Val.isAllOnesValue(); } |
| 207 | |
| 208 | /// This function will return true iff this constant represents the largest |
| 209 | /// value that may be represented by the constant's type. |
| 210 | /// @returns true iff this is the largest value that may be represented |
| 211 | /// by this type. |
| 212 | /// Determine if the value is maximal. |
| 213 | bool isMaxValue(bool IsSigned) const { |
| 214 | if (IsSigned) |
| 215 | return Val.isMaxSignedValue(); |
| 216 | else |
| 217 | return Val.isMaxValue(); |
| 218 | } |
| 219 | |
| 220 | /// This function will return true iff this constant represents the smallest |
| 221 | /// value that may be represented by this constant's type. |
| 222 | /// @returns true if this is the smallest value that may be represented by |
| 223 | /// this type. |
| 224 | /// Determine if the value is minimal. |
| 225 | bool isMinValue(bool IsSigned) const { |
| 226 | if (IsSigned) |
| 227 | return Val.isMinSignedValue(); |
| 228 | else |
| 229 | return Val.isMinValue(); |
| 230 | } |
| 231 | |
| 232 | /// This function will return true iff this constant represents a value with |
| 233 | /// active bits bigger than 64 bits or a value greater than the given uint64_t |
| 234 | /// value. |
| 235 | /// @returns true iff this constant is greater or equal to the given number. |
| 236 | /// Determine if the value is greater or equal to the given number. |
| 237 | bool uge(uint64_t Num) const { return Val.uge(Num); } |
| 238 | |
| 239 | /// getLimitedValue - If the value is smaller than the specified limit, |
| 240 | /// return it, otherwise return the limit value. This causes the value |
| 241 | /// to saturate to the limit. |
| 242 | /// @returns the min of the value of the constant and the specified value |
| 243 | /// Get the constant's value with a saturation limit |
| 244 | uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const { |
| 245 | return Val.getLimitedValue(Limit); |
| 246 | } |
| 247 | |
| 248 | /// Methods to support type inquiry through isa, cast, and dyn_cast. |
| 249 | static bool classof(const Value *V) { |
| 250 | return V->getValueID() == ConstantIntVal; |
| 251 | } |
| 252 | }; |
| 253 | |
| 254 | //===----------------------------------------------------------------------===// |
| 255 | /// ConstantFP - Floating Point Values [float, double] |
| 256 | /// |
| 257 | class ConstantFP final : public ConstantData { |
| 258 | friend class Constant; |
| 259 | |
| 260 | APFloat Val; |
| 261 | |
| 262 | ConstantFP(Type *Ty, const APFloat &V); |
| 263 | |
| 264 | void destroyConstantImpl(); |
| 265 | |
| 266 | public: |
| 267 | ConstantFP(const ConstantFP &) = delete; |
| 268 | |
| 269 | /// Floating point negation must be implemented with f(x) = -0.0 - x. This |
| 270 | /// method returns the negative zero constant for floating point or vector |
| 271 | /// floating point types; for all other types, it returns the null value. |
| 272 | static Constant *getZeroValueForNegation(Type *Ty); |
| 273 | |
| 274 | /// This returns a ConstantFP, or a vector containing a splat of a ConstantFP, |
| 275 | /// for the specified value in the specified type. This should only be used |
| 276 | /// for simple constant values like 2.0/1.0 etc, that are known-valid both as |
| 277 | /// host double and as the target format. |
| 278 | static Constant *get(Type *Ty, double V); |
| 279 | |
| 280 | /// If Ty is a vector type, return a Constant with a splat of the given |
| 281 | /// value. Otherwise return a ConstantFP for the given value. |
| 282 | static Constant *get(Type *Ty, const APFloat &V); |
| 283 | |
| 284 | static Constant *get(Type *Ty, StringRef Str); |
| 285 | static ConstantFP *get(LLVMContext &Context, const APFloat &V); |
| 286 | static Constant *getNaN(Type *Ty, bool Negative = false, |
| 287 | uint64_t Payload = 0); |
| 288 | static Constant *getQNaN(Type *Ty, bool Negative = false, |
| 289 | APInt *Payload = nullptr); |
| 290 | static Constant *getSNaN(Type *Ty, bool Negative = false, |
| 291 | APInt *Payload = nullptr); |
| 292 | static Constant *getNegativeZero(Type *Ty); |
| 293 | static Constant *getInfinity(Type *Ty, bool Negative = false); |
| 294 | |
| 295 | /// Return true if Ty is big enough to represent V. |
| 296 | static bool isValueValidForType(Type *Ty, const APFloat &V); |
| 297 | inline const APFloat &getValueAPF() const { return Val; } |
| 298 | inline const APFloat &getValue() const { return Val; } |
| 299 | |
| 300 | /// Return true if the value is positive or negative zero. |
| 301 | bool isZero() const { return Val.isZero(); } |
| 302 | |
| 303 | /// Return true if the sign bit is set. |
| 304 | bool isNegative() const { return Val.isNegative(); } |
| 305 | |
| 306 | /// Return true if the value is infinity |
| 307 | bool isInfinity() const { return Val.isInfinity(); } |
| 308 | |
| 309 | /// Return true if the value is a NaN. |
| 310 | bool isNaN() const { return Val.isNaN(); } |
| 311 | |
| 312 | /// We don't rely on operator== working on double values, as it returns true |
| 313 | /// for things that are clearly not equal, like -0.0 and 0.0. |
| 314 | /// As such, this method can be used to do an exact bit-for-bit comparison of |
| 315 | /// two floating point values. The version with a double operand is retained |
| 316 | /// because it's so convenient to write isExactlyValue(2.0), but please use |
| 317 | /// it only for simple constants. |
| 318 | bool isExactlyValue(const APFloat &V) const; |
| 319 | |
| 320 | bool isExactlyValue(double V) const { |
| 321 | bool ignored; |
| 322 | APFloat FV(V); |
| 323 | FV.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &ignored); |
| 324 | return isExactlyValue(FV); |
| 325 | } |
| 326 | |
| 327 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 328 | static bool classof(const Value *V) { |
| 329 | return V->getValueID() == ConstantFPVal; |
| 330 | } |
| 331 | }; |
| 332 | |
| 333 | //===----------------------------------------------------------------------===// |
| 334 | /// All zero aggregate value |
| 335 | /// |
| 336 | class ConstantAggregateZero final : public ConstantData { |
| 337 | friend class Constant; |
| 338 | |
| 339 | explicit ConstantAggregateZero(Type *Ty) |
| 340 | : ConstantData(Ty, ConstantAggregateZeroVal) {} |
| 341 | |
| 342 | void destroyConstantImpl(); |
| 343 | |
| 344 | public: |
| 345 | ConstantAggregateZero(const ConstantAggregateZero &) = delete; |
| 346 | |
| 347 | static ConstantAggregateZero *get(Type *Ty); |
| 348 | |
| 349 | /// If this CAZ has array or vector type, return a zero with the right element |
| 350 | /// type. |
| 351 | Constant *getSequentialElement() const; |
| 352 | |
| 353 | /// If this CAZ has struct type, return a zero with the right element type for |
| 354 | /// the specified element. |
| 355 | Constant *getStructElement(unsigned Elt) const; |
| 356 | |
| 357 | /// Return a zero of the right value for the specified GEP index if we can, |
| 358 | /// otherwise return null (e.g. if C is a ConstantExpr). |
| 359 | Constant *getElementValue(Constant *C) const; |
| 360 | |
| 361 | /// Return a zero of the right value for the specified GEP index. |
| 362 | Constant *getElementValue(unsigned Idx) const; |
| 363 | |
| 364 | /// Return the number of elements in the array, vector, or struct. |
| 365 | ElementCount getElementCount() const; |
| 366 | |
| 367 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 368 | /// |
| 369 | static bool classof(const Value *V) { |
| 370 | return V->getValueID() == ConstantAggregateZeroVal; |
| 371 | } |
| 372 | }; |
| 373 | |
| 374 | /// Base class for aggregate constants (with operands). |
| 375 | /// |
| 376 | /// These constants are aggregates of other constants, which are stored as |
| 377 | /// operands. |
| 378 | /// |
| 379 | /// Subclasses are \a ConstantStruct, \a ConstantArray, and \a |
| 380 | /// ConstantVector. |
| 381 | /// |
| 382 | /// \note Some subclasses of \a ConstantData are semantically aggregates -- |
| 383 | /// such as \a ConstantDataArray -- but are not subclasses of this because they |
| 384 | /// use operands. |
| 385 | class ConstantAggregate : public Constant { |
| 386 | protected: |
| 387 | ConstantAggregate(Type *T, ValueTy VT, ArrayRef<Constant *> V); |
| 388 | |
| 389 | public: |
| 390 | /// Transparently provide more efficient getOperand methods. |
| 391 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant)public: inline Constant *getOperand(unsigned) const; inline void setOperand(unsigned, Constant*); inline op_iterator op_begin (); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
| 392 | |
| 393 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 394 | static bool classof(const Value *V) { |
| 395 | return V->getValueID() >= ConstantAggregateFirstVal && |
| 396 | V->getValueID() <= ConstantAggregateLastVal; |
| 397 | } |
| 398 | }; |
| 399 | |
| 400 | template <> |
| 401 | struct OperandTraits<ConstantAggregate> |
| 402 | : public VariadicOperandTraits<ConstantAggregate> {}; |
| 403 | |
| 404 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantAggregate, Constant)ConstantAggregate::op_iterator ConstantAggregate::op_begin() { return OperandTraits<ConstantAggregate>::op_begin(this ); } ConstantAggregate::const_op_iterator ConstantAggregate:: op_begin() const { return OperandTraits<ConstantAggregate> ::op_begin(const_cast<ConstantAggregate*>(this)); } ConstantAggregate ::op_iterator ConstantAggregate::op_end() { return OperandTraits <ConstantAggregate>::op_end(this); } ConstantAggregate:: const_op_iterator ConstantAggregate::op_end() const { return OperandTraits <ConstantAggregate>::op_end(const_cast<ConstantAggregate *>(this)); } Constant *ConstantAggregate::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null<Constant >( OperandTraits<ConstantAggregate>::op_begin(const_cast <ConstantAggregate*>(this))[i_nocapture].get()); } void ConstantAggregate::setOperand(unsigned i_nocapture, Constant *Val_nocapture) { ((void)0); OperandTraits<ConstantAggregate >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ConstantAggregate::getNumOperands() const { return OperandTraits <ConstantAggregate>::operands(this); } template <int Idx_nocapture> Use &ConstantAggregate::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &ConstantAggregate::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
| 405 | |
| 406 | //===----------------------------------------------------------------------===// |
| 407 | /// ConstantArray - Constant Array Declarations |
| 408 | /// |
| 409 | class ConstantArray final : public ConstantAggregate { |
| 410 | friend struct ConstantAggrKeyType<ConstantArray>; |
| 411 | friend class Constant; |
| 412 | |
| 413 | ConstantArray(ArrayType *T, ArrayRef<Constant *> Val); |
| 414 | |
| 415 | void destroyConstantImpl(); |
| 416 | Value *handleOperandChangeImpl(Value *From, Value *To); |
| 417 | |
| 418 | public: |
| 419 | // ConstantArray accessors |
| 420 | static Constant *get(ArrayType *T, ArrayRef<Constant *> V); |
| 421 | |
| 422 | private: |
| 423 | static Constant *getImpl(ArrayType *T, ArrayRef<Constant *> V); |
| 424 | |
| 425 | public: |
| 426 | /// Specialize the getType() method to always return an ArrayType, |
| 427 | /// which reduces the amount of casting needed in parts of the compiler. |
| 428 | inline ArrayType *getType() const { |
| 429 | return cast<ArrayType>(Value::getType()); |
| 430 | } |
| 431 | |
| 432 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 433 | static bool classof(const Value *V) { |
| 434 | return V->getValueID() == ConstantArrayVal; |
| 435 | } |
| 436 | }; |
| 437 | |
| 438 | //===----------------------------------------------------------------------===// |
| 439 | // Constant Struct Declarations |
| 440 | // |
| 441 | class ConstantStruct final : public ConstantAggregate { |
| 442 | friend struct ConstantAggrKeyType<ConstantStruct>; |
| 443 | friend class Constant; |
| 444 | |
| 445 | ConstantStruct(StructType *T, ArrayRef<Constant *> Val); |
| 446 | |
| 447 | void destroyConstantImpl(); |
| 448 | Value *handleOperandChangeImpl(Value *From, Value *To); |
| 449 | |
| 450 | public: |
| 451 | // ConstantStruct accessors |
| 452 | static Constant *get(StructType *T, ArrayRef<Constant *> V); |
| 453 | |
| 454 | template <typename... Csts> |
| 455 | static std::enable_if_t<are_base_of<Constant, Csts...>::value, Constant *> |
| 456 | get(StructType *T, Csts *...Vs) { |
| 457 | return get(T, ArrayRef<Constant *>({Vs...})); |
| 458 | } |
| 459 | |
| 460 | /// Return an anonymous struct that has the specified elements. |
| 461 | /// If the struct is possibly empty, then you must specify a context. |
| 462 | static Constant *getAnon(ArrayRef<Constant *> V, bool Packed = false) { |
| 463 | return get(getTypeForElements(V, Packed), V); |
| 464 | } |
| 465 | static Constant *getAnon(LLVMContext &Ctx, ArrayRef<Constant *> V, |
| 466 | bool Packed = false) { |
| 467 | return get(getTypeForElements(Ctx, V, Packed), V); |
| 468 | } |
| 469 | |
| 470 | /// Return an anonymous struct type to use for a constant with the specified |
| 471 | /// set of elements. The list must not be empty. |
| 472 | static StructType *getTypeForElements(ArrayRef<Constant *> V, |
| 473 | bool Packed = false); |
| 474 | /// This version of the method allows an empty list. |
| 475 | static StructType *getTypeForElements(LLVMContext &Ctx, |
| 476 | ArrayRef<Constant *> V, |
| 477 | bool Packed = false); |
| 478 | |
| 479 | /// Specialization - reduce amount of casting. |
| 480 | inline StructType *getType() const { |
| 481 | return cast<StructType>(Value::getType()); |
| 482 | } |
| 483 | |
| 484 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 485 | static bool classof(const Value *V) { |
| 486 | return V->getValueID() == ConstantStructVal; |
| 487 | } |
| 488 | }; |
| 489 | |
| 490 | //===----------------------------------------------------------------------===// |
| 491 | /// Constant Vector Declarations |
| 492 | /// |
| 493 | class ConstantVector final : public ConstantAggregate { |
| 494 | friend struct ConstantAggrKeyType<ConstantVector>; |
| 495 | friend class Constant; |
| 496 | |
| 497 | ConstantVector(VectorType *T, ArrayRef<Constant *> Val); |
| 498 | |
| 499 | void destroyConstantImpl(); |
| 500 | Value *handleOperandChangeImpl(Value *From, Value *To); |
| 501 | |
| 502 | public: |
| 503 | // ConstantVector accessors |
| 504 | static Constant *get(ArrayRef<Constant *> V); |
| 505 | |
| 506 | private: |
| 507 | static Constant *getImpl(ArrayRef<Constant *> V); |
| 508 | |
| 509 | public: |
| 510 | /// Return a ConstantVector with the specified constant in each element. |
| 511 | /// Note that this might not return an instance of ConstantVector |
| 512 | static Constant *getSplat(ElementCount EC, Constant *Elt); |
| 513 | |
| 514 | /// Specialize the getType() method to always return a FixedVectorType, |
| 515 | /// which reduces the amount of casting needed in parts of the compiler. |
| 516 | inline FixedVectorType *getType() const { |
| 517 | return cast<FixedVectorType>(Value::getType()); |
| 518 | } |
| 519 | |
| 520 | /// If all elements of the vector constant have the same value, return that |
| 521 | /// value. Otherwise, return nullptr. Ignore undefined elements by setting |
| 522 | /// AllowUndefs to true. |
| 523 | Constant *getSplatValue(bool AllowUndefs = false) const; |
| 524 | |
| 525 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 526 | static bool classof(const Value *V) { |
| 527 | return V->getValueID() == ConstantVectorVal; |
| 528 | } |
| 529 | }; |
| 530 | |
| 531 | //===----------------------------------------------------------------------===// |
| 532 | /// A constant pointer value that points to null |
| 533 | /// |
| 534 | class ConstantPointerNull final : public ConstantData { |
| 535 | friend class Constant; |
| 536 | |
| 537 | explicit ConstantPointerNull(PointerType *T) |
| 538 | : ConstantData(T, Value::ConstantPointerNullVal) {} |
| 539 | |
| 540 | void destroyConstantImpl(); |
| 541 | |
| 542 | public: |
| 543 | ConstantPointerNull(const ConstantPointerNull &) = delete; |
| 544 | |
| 545 | /// Static factory methods - Return objects of the specified value |
| 546 | static ConstantPointerNull *get(PointerType *T); |
| 547 | |
| 548 | /// Specialize the getType() method to always return an PointerType, |
| 549 | /// which reduces the amount of casting needed in parts of the compiler. |
| 550 | inline PointerType *getType() const { |
| 551 | return cast<PointerType>(Value::getType()); |
| 552 | } |
| 553 | |
| 554 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 555 | static bool classof(const Value *V) { |
| 556 | return V->getValueID() == ConstantPointerNullVal; |
| 557 | } |
| 558 | }; |
| 559 | |
| 560 | //===----------------------------------------------------------------------===// |
| 561 | /// ConstantDataSequential - A vector or array constant whose element type is a |
| 562 | /// simple 1/2/4/8-byte integer or half/bfloat/float/double, and whose elements |
| 563 | /// are just simple data values (i.e. ConstantInt/ConstantFP). This Constant |
| 564 | /// node has no operands because it stores all of the elements of the constant |
| 565 | /// as densely packed data, instead of as Value*'s. |
| 566 | /// |
| 567 | /// This is the common base class of ConstantDataArray and ConstantDataVector. |
| 568 | /// |
| 569 | class ConstantDataSequential : public ConstantData { |
| 570 | friend class LLVMContextImpl; |
| 571 | friend class Constant; |
| 572 | |
| 573 | /// A pointer to the bytes underlying this constant (which is owned by the |
| 574 | /// uniquing StringMap). |
| 575 | const char *DataElements; |
| 576 | |
| 577 | /// This forms a link list of ConstantDataSequential nodes that have |
| 578 | /// the same value but different type. For example, 0,0,0,1 could be a 4 |
| 579 | /// element array of i8, or a 1-element array of i32. They'll both end up in |
| 580 | /// the same StringMap bucket, linked up. |
| 581 | std::unique_ptr<ConstantDataSequential> Next; |
| 582 | |
| 583 | void destroyConstantImpl(); |
| 584 | |
| 585 | protected: |
| 586 | explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data) |
| 587 | : ConstantData(ty, VT), DataElements(Data) {} |
| 588 | |
| 589 | static Constant *getImpl(StringRef Bytes, Type *Ty); |
| 590 | |
| 591 | public: |
| 592 | ConstantDataSequential(const ConstantDataSequential &) = delete; |
| 593 | |
| 594 | /// Return true if a ConstantDataSequential can be formed with a vector or |
| 595 | /// array of the specified element type. |
| 596 | /// ConstantDataArray only works with normal float and int types that are |
| 597 | /// stored densely in memory, not with things like i42 or x86_f80. |
| 598 | static bool isElementTypeCompatible(Type *Ty); |
| 599 | |
| 600 | /// If this is a sequential container of integers (of any size), return the |
| 601 | /// specified element in the low bits of a uint64_t. |
| 602 | uint64_t getElementAsInteger(unsigned i) const; |
| 603 | |
| 604 | /// If this is a sequential container of integers (of any size), return the |
| 605 | /// specified element as an APInt. |
| 606 | APInt getElementAsAPInt(unsigned i) const; |
| 607 | |
| 608 | /// If this is a sequential container of floating point type, return the |
| 609 | /// specified element as an APFloat. |
| 610 | APFloat getElementAsAPFloat(unsigned i) const; |
| 611 | |
| 612 | /// If this is an sequential container of floats, return the specified element |
| 613 | /// as a float. |
| 614 | float getElementAsFloat(unsigned i) const; |
| 615 | |
| 616 | /// If this is an sequential container of doubles, return the specified |
| 617 | /// element as a double. |
| 618 | double getElementAsDouble(unsigned i) const; |
| 619 | |
| 620 | /// Return a Constant for a specified index's element. |
| 621 | /// Note that this has to compute a new constant to return, so it isn't as |
| 622 | /// efficient as getElementAsInteger/Float/Double. |
| 623 | Constant *getElementAsConstant(unsigned i) const; |
| 624 | |
| 625 | /// Return the element type of the array/vector. |
| 626 | Type *getElementType() const; |
| 627 | |
| 628 | /// Return the number of elements in the array or vector. |
| 629 | unsigned getNumElements() const; |
| 630 | |
| 631 | /// Return the size (in bytes) of each element in the array/vector. |
| 632 | /// The size of the elements is known to be a multiple of one byte. |
| 633 | uint64_t getElementByteSize() const; |
| 634 | |
| 635 | /// This method returns true if this is an array of \p CharSize integers. |
| 636 | bool isString(unsigned CharSize = 8) const; |
| 637 | |
| 638 | /// This method returns true if the array "isString", ends with a null byte, |
| 639 | /// and does not contains any other null bytes. |
| 640 | bool isCString() const; |
| 641 | |
| 642 | /// If this array is isString(), then this method returns the array as a |
| 643 | /// StringRef. Otherwise, it asserts out. |
| 644 | StringRef getAsString() const { |
| 645 | assert(isString() && "Not a string")((void)0); |
| 646 | return getRawDataValues(); |
| 647 | } |
| 648 | |
| 649 | /// If this array is isCString(), then this method returns the array (without |
| 650 | /// the trailing null byte) as a StringRef. Otherwise, it asserts out. |
| 651 | StringRef getAsCString() const { |
| 652 | assert(isCString() && "Isn't a C string")((void)0); |
| 653 | StringRef Str = getAsString(); |
| 654 | return Str.substr(0, Str.size() - 1); |
| 655 | } |
| 656 | |
| 657 | /// Return the raw, underlying, bytes of this data. Note that this is an |
| 658 | /// extremely tricky thing to work with, as it exposes the host endianness of |
| 659 | /// the data elements. |
| 660 | StringRef getRawDataValues() const; |
| 661 | |
| 662 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 663 | static bool classof(const Value *V) { |
| 664 | return V->getValueID() == ConstantDataArrayVal || |
| 665 | V->getValueID() == ConstantDataVectorVal; |
| 666 | } |
| 667 | |
| 668 | private: |
| 669 | const char *getElementPointer(unsigned Elt) const; |
| 670 | }; |
| 671 | |
| 672 | //===----------------------------------------------------------------------===// |
| 673 | /// An array constant whose element type is a simple 1/2/4/8-byte integer or |
| 674 | /// float/double, and whose elements are just simple data values |
| 675 | /// (i.e. ConstantInt/ConstantFP). This Constant node has no operands because it |
| 676 | /// stores all of the elements of the constant as densely packed data, instead |
| 677 | /// of as Value*'s. |
| 678 | class ConstantDataArray final : public ConstantDataSequential { |
| 679 | friend class ConstantDataSequential; |
| 680 | |
| 681 | explicit ConstantDataArray(Type *ty, const char *Data) |
| 682 | : ConstantDataSequential(ty, ConstantDataArrayVal, Data) {} |
| 683 | |
| 684 | public: |
| 685 | ConstantDataArray(const ConstantDataArray &) = delete; |
| 686 | |
| 687 | /// get() constructor - Return a constant with array type with an element |
| 688 | /// count and element type matching the ArrayRef passed in. Note that this |
| 689 | /// can return a ConstantAggregateZero object. |
| 690 | template <typename ElementTy> |
| 691 | static Constant *get(LLVMContext &Context, ArrayRef<ElementTy> Elts) { |
| 692 | const char *Data = reinterpret_cast<const char *>(Elts.data()); |
| 693 | return getRaw(StringRef(Data, Elts.size() * sizeof(ElementTy)), Elts.size(), |
| 694 | Type::getScalarTy<ElementTy>(Context)); |
| 695 | } |
| 696 | |
| 697 | /// get() constructor - ArrayTy needs to be compatible with |
| 698 | /// ArrayRef<ElementTy>. Calls get(LLVMContext, ArrayRef<ElementTy>). |
| 699 | template <typename ArrayTy> |
| 700 | static Constant *get(LLVMContext &Context, ArrayTy &Elts) { |
| 701 | return ConstantDataArray::get(Context, makeArrayRef(Elts)); |
| 702 | } |
| 703 | |
| 704 | /// getRaw() constructor - Return a constant with array type with an element |
| 705 | /// count and element type matching the NumElements and ElementTy parameters |
| 706 | /// passed in. Note that this can return a ConstantAggregateZero object. |
| 707 | /// ElementTy must be one of i8/i16/i32/i64/half/bfloat/float/double. Data is |
| 708 | /// the buffer containing the elements. Be careful to make sure Data uses the |
| 709 | /// right endianness, the buffer will be used as-is. |
| 710 | static Constant *getRaw(StringRef Data, uint64_t NumElements, |
| 711 | Type *ElementTy) { |
| 712 | Type *Ty = ArrayType::get(ElementTy, NumElements); |
| 713 | return getImpl(Data, Ty); |
| 714 | } |
| 715 | |
| 716 | /// getFP() constructors - Return a constant of array type with a float |
| 717 | /// element type taken from argument `ElementType', and count taken from |
| 718 | /// argument `Elts'. The amount of bits of the contained type must match the |
| 719 | /// number of bits of the type contained in the passed in ArrayRef. |
| 720 | /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note |
| 721 | /// that this can return a ConstantAggregateZero object. |
| 722 | static Constant *getFP(Type *ElementType, ArrayRef<uint16_t> Elts); |
| 723 | static Constant *getFP(Type *ElementType, ArrayRef<uint32_t> Elts); |
| 724 | static Constant *getFP(Type *ElementType, ArrayRef<uint64_t> Elts); |
| 725 | |
| 726 | /// This method constructs a CDS and initializes it with a text string. |
| 727 | /// The default behavior (AddNull==true) causes a null terminator to |
| 728 | /// be placed at the end of the array (increasing the length of the string by |
| 729 | /// one more than the StringRef would normally indicate. Pass AddNull=false |
| 730 | /// to disable this behavior. |
| 731 | static Constant *getString(LLVMContext &Context, StringRef Initializer, |
| 732 | bool AddNull = true); |
| 733 | |
| 734 | /// Specialize the getType() method to always return an ArrayType, |
| 735 | /// which reduces the amount of casting needed in parts of the compiler. |
| 736 | inline ArrayType *getType() const { |
| 737 | return cast<ArrayType>(Value::getType()); |
| 738 | } |
| 739 | |
| 740 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 741 | static bool classof(const Value *V) { |
| 742 | return V->getValueID() == ConstantDataArrayVal; |
| 743 | } |
| 744 | }; |
| 745 | |
| 746 | //===----------------------------------------------------------------------===// |
| 747 | /// A vector constant whose element type is a simple 1/2/4/8-byte integer or |
| 748 | /// float/double, and whose elements are just simple data values |
| 749 | /// (i.e. ConstantInt/ConstantFP). This Constant node has no operands because it |
| 750 | /// stores all of the elements of the constant as densely packed data, instead |
| 751 | /// of as Value*'s. |
| 752 | class ConstantDataVector final : public ConstantDataSequential { |
| 753 | friend class ConstantDataSequential; |
| 754 | |
| 755 | explicit ConstantDataVector(Type *ty, const char *Data) |
| 756 | : ConstantDataSequential(ty, ConstantDataVectorVal, Data), |
| 757 | IsSplatSet(false) {} |
| 758 | // Cache whether or not the constant is a splat. |
| 759 | mutable bool IsSplatSet : 1; |
| 760 | mutable bool IsSplat : 1; |
| 761 | bool isSplatData() const; |
| 762 | |
| 763 | public: |
| 764 | ConstantDataVector(const ConstantDataVector &) = delete; |
| 765 | |
| 766 | /// get() constructors - Return a constant with vector type with an element |
| 767 | /// count and element type matching the ArrayRef passed in. Note that this |
| 768 | /// can return a ConstantAggregateZero object. |
| 769 | static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts); |
| 770 | static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts); |
| 771 | static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts); |
| 772 | static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts); |
| 773 | static Constant *get(LLVMContext &Context, ArrayRef<float> Elts); |
| 774 | static Constant *get(LLVMContext &Context, ArrayRef<double> Elts); |
| 775 | |
| 776 | /// getRaw() constructor - Return a constant with vector type with an element |
| 777 | /// count and element type matching the NumElements and ElementTy parameters |
| 778 | /// passed in. Note that this can return a ConstantAggregateZero object. |
| 779 | /// ElementTy must be one of i8/i16/i32/i64/half/bfloat/float/double. Data is |
| 780 | /// the buffer containing the elements. Be careful to make sure Data uses the |
| 781 | /// right endianness, the buffer will be used as-is. |
| 782 | static Constant *getRaw(StringRef Data, uint64_t NumElements, |
| 783 | Type *ElementTy) { |
| 784 | Type *Ty = VectorType::get(ElementTy, ElementCount::getFixed(NumElements)); |
| 785 | return getImpl(Data, Ty); |
| 786 | } |
| 787 | |
| 788 | /// getFP() constructors - Return a constant of vector type with a float |
| 789 | /// element type taken from argument `ElementType', and count taken from |
| 790 | /// argument `Elts'. The amount of bits of the contained type must match the |
| 791 | /// number of bits of the type contained in the passed in ArrayRef. |
| 792 | /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note |
| 793 | /// that this can return a ConstantAggregateZero object. |
| 794 | static Constant *getFP(Type *ElementType, ArrayRef<uint16_t> Elts); |
| 795 | static Constant *getFP(Type *ElementType, ArrayRef<uint32_t> Elts); |
| 796 | static Constant *getFP(Type *ElementType, ArrayRef<uint64_t> Elts); |
| 797 | |
| 798 | /// Return a ConstantVector with the specified constant in each element. |
| 799 | /// The specified constant has to be a of a compatible type (i8/i16/ |
| 800 | /// i32/i64/half/bfloat/float/double) and must be a ConstantFP or ConstantInt. |
| 801 | static Constant *getSplat(unsigned NumElts, Constant *Elt); |
| 802 | |
| 803 | /// Returns true if this is a splat constant, meaning that all elements have |
| 804 | /// the same value. |
| 805 | bool isSplat() const; |
| 806 | |
| 807 | /// If this is a splat constant, meaning that all of the elements have the |
| 808 | /// same value, return that value. Otherwise return NULL. |
| 809 | Constant *getSplatValue() const; |
| 810 | |
| 811 | /// Specialize the getType() method to always return a FixedVectorType, |
| 812 | /// which reduces the amount of casting needed in parts of the compiler. |
| 813 | inline FixedVectorType *getType() const { |
| 814 | return cast<FixedVectorType>(Value::getType()); |
| 815 | } |
| 816 | |
| 817 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 818 | static bool classof(const Value *V) { |
| 819 | return V->getValueID() == ConstantDataVectorVal; |
| 820 | } |
| 821 | }; |
| 822 | |
| 823 | //===----------------------------------------------------------------------===// |
| 824 | /// A constant token which is empty |
| 825 | /// |
| 826 | class ConstantTokenNone final : public ConstantData { |
| 827 | friend class Constant; |
| 828 | |
| 829 | explicit ConstantTokenNone(LLVMContext &Context) |
| 830 | : ConstantData(Type::getTokenTy(Context), ConstantTokenNoneVal) {} |
| 831 | |
| 832 | void destroyConstantImpl(); |
| 833 | |
| 834 | public: |
| 835 | ConstantTokenNone(const ConstantTokenNone &) = delete; |
| 836 | |
| 837 | /// Return the ConstantTokenNone. |
| 838 | static ConstantTokenNone *get(LLVMContext &Context); |
| 839 | |
| 840 | /// Methods to support type inquiry through isa, cast, and dyn_cast. |
| 841 | static bool classof(const Value *V) { |
| 842 | return V->getValueID() == ConstantTokenNoneVal; |
| 843 | } |
| 844 | }; |
| 845 | |
| 846 | /// The address of a basic block. |
| 847 | /// |
| 848 | class BlockAddress final : public Constant { |
| 849 | friend class Constant; |
| 850 | |
| 851 | BlockAddress(Function *F, BasicBlock *BB); |
| 852 | |
| 853 | void *operator new(size_t S) { return User::operator new(S, 2); } |
| 854 | |
| 855 | void destroyConstantImpl(); |
| 856 | Value *handleOperandChangeImpl(Value *From, Value *To); |
| 857 | |
| 858 | public: |
| 859 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
| 860 | |
| 861 | /// Return a BlockAddress for the specified function and basic block. |
| 862 | static BlockAddress *get(Function *F, BasicBlock *BB); |
| 863 | |
| 864 | /// Return a BlockAddress for the specified basic block. The basic |
| 865 | /// block must be embedded into a function. |
| 866 | static BlockAddress *get(BasicBlock *BB); |
| 867 | |
| 868 | /// Lookup an existing \c BlockAddress constant for the given BasicBlock. |
| 869 | /// |
| 870 | /// \returns 0 if \c !BB->hasAddressTaken(), otherwise the \c BlockAddress. |
| 871 | static BlockAddress *lookup(const BasicBlock *BB); |
| 872 | |
| 873 | /// Transparently provide more efficient getOperand methods. |
| 874 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
| 875 | |
| 876 | Function *getFunction() const { return (Function *)Op<0>().get(); } |
| 877 | BasicBlock *getBasicBlock() const { return (BasicBlock *)Op<1>().get(); } |
| 878 | |
| 879 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 880 | static bool classof(const Value *V) { |
| 881 | return V->getValueID() == BlockAddressVal; |
| 882 | } |
| 883 | }; |
| 884 | |
| 885 | template <> |
| 886 | struct OperandTraits<BlockAddress> |
| 887 | : public FixedNumOperandTraits<BlockAddress, 2> {}; |
| 888 | |
| 889 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BlockAddress, Value)BlockAddress::op_iterator BlockAddress::op_begin() { return OperandTraits <BlockAddress>::op_begin(this); } BlockAddress::const_op_iterator BlockAddress::op_begin() const { return OperandTraits<BlockAddress >::op_begin(const_cast<BlockAddress*>(this)); } BlockAddress ::op_iterator BlockAddress::op_end() { return OperandTraits< BlockAddress>::op_end(this); } BlockAddress::const_op_iterator BlockAddress::op_end() const { return OperandTraits<BlockAddress >::op_end(const_cast<BlockAddress*>(this)); } Value * BlockAddress::getOperand(unsigned i_nocapture) const { ((void )0); return cast_or_null<Value>( OperandTraits<BlockAddress >::op_begin(const_cast<BlockAddress*>(this))[i_nocapture ].get()); } void BlockAddress::setOperand(unsigned i_nocapture , Value *Val_nocapture) { ((void)0); OperandTraits<BlockAddress >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BlockAddress::getNumOperands() const { return OperandTraits< BlockAddress>::operands(this); } template <int Idx_nocapture > Use &BlockAddress::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &BlockAddress::Op() const { return this->OpFrom <Idx_nocapture>(this); } |
| 890 | |
| 891 | /// Wrapper for a function that represents a value that |
| 892 | /// functionally represents the original function. This can be a function, |
| 893 | /// global alias to a function, or an ifunc. |
| 894 | class DSOLocalEquivalent final : public Constant { |
| 895 | friend class Constant; |
| 896 | |
| 897 | DSOLocalEquivalent(GlobalValue *GV); |
| 898 | |
| 899 | void *operator new(size_t S) { return User::operator new(S, 1); } |
| 900 | |
| 901 | void destroyConstantImpl(); |
| 902 | Value *handleOperandChangeImpl(Value *From, Value *To); |
| 903 | |
| 904 | public: |
| 905 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
| 906 | |
| 907 | /// Return a DSOLocalEquivalent for the specified global value. |
| 908 | static DSOLocalEquivalent *get(GlobalValue *GV); |
| 909 | |
| 910 | /// Transparently provide more efficient getOperand methods. |
| 911 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
| 912 | |
| 913 | GlobalValue *getGlobalValue() const { |
| 914 | return cast<GlobalValue>(Op<0>().get()); |
| 915 | } |
| 916 | |
| 917 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 918 | static bool classof(const Value *V) { |
| 919 | return V->getValueID() == DSOLocalEquivalentVal; |
| 920 | } |
| 921 | }; |
| 922 | |
| 923 | template <> |
| 924 | struct OperandTraits<DSOLocalEquivalent> |
| 925 | : public FixedNumOperandTraits<DSOLocalEquivalent, 1> {}; |
| 926 | |
| 927 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(DSOLocalEquivalent, Value)DSOLocalEquivalent::op_iterator DSOLocalEquivalent::op_begin( ) { return OperandTraits<DSOLocalEquivalent>::op_begin( this); } DSOLocalEquivalent::const_op_iterator DSOLocalEquivalent ::op_begin() const { return OperandTraits<DSOLocalEquivalent >::op_begin(const_cast<DSOLocalEquivalent*>(this)); } DSOLocalEquivalent::op_iterator DSOLocalEquivalent::op_end() { return OperandTraits<DSOLocalEquivalent>::op_end(this ); } DSOLocalEquivalent::const_op_iterator DSOLocalEquivalent ::op_end() const { return OperandTraits<DSOLocalEquivalent >::op_end(const_cast<DSOLocalEquivalent*>(this)); } Value *DSOLocalEquivalent::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null<Value>( OperandTraits< DSOLocalEquivalent>::op_begin(const_cast<DSOLocalEquivalent *>(this))[i_nocapture].get()); } void DSOLocalEquivalent:: setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((void )0); OperandTraits<DSOLocalEquivalent>::op_begin(this)[ i_nocapture] = Val_nocapture; } unsigned DSOLocalEquivalent:: getNumOperands() const { return OperandTraits<DSOLocalEquivalent >::operands(this); } template <int Idx_nocapture> Use &DSOLocalEquivalent::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & DSOLocalEquivalent::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
| 928 | |
| 929 | //===----------------------------------------------------------------------===// |
| 930 | /// A constant value that is initialized with an expression using |
| 931 | /// other constant values. |
| 932 | /// |
| 933 | /// This class uses the standard Instruction opcodes to define the various |
| 934 | /// constant expressions. The Opcode field for the ConstantExpr class is |
| 935 | /// maintained in the Value::SubclassData field. |
| 936 | class ConstantExpr : public Constant { |
| 937 | friend struct ConstantExprKeyType; |
| 938 | friend class Constant; |
| 939 | |
| 940 | void destroyConstantImpl(); |
| 941 | Value *handleOperandChangeImpl(Value *From, Value *To); |
| 942 | |
| 943 | protected: |
| 944 | ConstantExpr(Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps) |
| 945 | : Constant(ty, ConstantExprVal, Ops, NumOps) { |
| 946 | // Operation type (an Instruction opcode) is stored as the SubclassData. |
| 947 | setValueSubclassData(Opcode); |
| 948 | } |
| 949 | |
| 950 | ~ConstantExpr() = default; |
| 951 | |
| 952 | public: |
| 953 | // Static methods to construct a ConstantExpr of different kinds. Note that |
| 954 | // these methods may return a object that is not an instance of the |
| 955 | // ConstantExpr class, because they will attempt to fold the constant |
| 956 | // expression into something simpler if possible. |
| 957 | |
| 958 | /// getAlignOf constant expr - computes the alignment of a type in a target |
| 959 | /// independent way (Note: the return type is an i64). |
| 960 | static Constant *getAlignOf(Type *Ty); |
| 961 | |
| 962 | /// getSizeOf constant expr - computes the (alloc) size of a type (in |
| 963 | /// address-units, not bits) in a target independent way (Note: the return |
| 964 | /// type is an i64). |
| 965 | /// |
| 966 | static Constant *getSizeOf(Type *Ty); |
| 967 | |
| 968 | /// getOffsetOf constant expr - computes the offset of a struct field in a |
| 969 | /// target independent way (Note: the return type is an i64). |
| 970 | /// |
| 971 | static Constant *getOffsetOf(StructType *STy, unsigned FieldNo); |
| 972 | |
| 973 | /// getOffsetOf constant expr - This is a generalized form of getOffsetOf, |
| 974 | /// which supports any aggregate type, and any Constant index. |
| 975 | /// |
| 976 | static Constant *getOffsetOf(Type *Ty, Constant *FieldNo); |
| 977 | |
| 978 | static Constant *getNeg(Constant *C, bool HasNUW = false, |
| 979 | bool HasNSW = false); |
| 980 | static Constant *getFNeg(Constant *C); |
| 981 | static Constant *getNot(Constant *C); |
| 982 | static Constant *getAdd(Constant *C1, Constant *C2, bool HasNUW = false, |
| 983 | bool HasNSW = false); |
| 984 | static Constant *getFAdd(Constant *C1, Constant *C2); |
| 985 | static Constant *getSub(Constant *C1, Constant *C2, bool HasNUW = false, |
| 986 | bool HasNSW = false); |
| 987 | static Constant *getFSub(Constant *C1, Constant *C2); |
| 988 | static Constant *getMul(Constant *C1, Constant *C2, bool HasNUW = false, |
| 989 | bool HasNSW = false); |
| 990 | static Constant *getFMul(Constant *C1, Constant *C2); |
| 991 | static Constant *getUDiv(Constant *C1, Constant *C2, bool isExact = false); |
| 992 | static Constant *getSDiv(Constant *C1, Constant *C2, bool isExact = false); |
| 993 | static Constant *getFDiv(Constant *C1, Constant *C2); |
| 994 | static Constant *getURem(Constant *C1, Constant *C2); |
| 995 | static Constant *getSRem(Constant *C1, Constant *C2); |
| 996 | static Constant *getFRem(Constant *C1, Constant *C2); |
| 997 | static Constant *getAnd(Constant *C1, Constant *C2); |
| 998 | static Constant *getOr(Constant *C1, Constant *C2); |
| 999 | static Constant *getXor(Constant *C1, Constant *C2); |
| 1000 | static Constant *getUMin(Constant *C1, Constant *C2); |
| 1001 | static Constant *getShl(Constant *C1, Constant *C2, bool HasNUW = false, |
| 1002 | bool HasNSW = false); |
| 1003 | static Constant *getLShr(Constant *C1, Constant *C2, bool isExact = false); |
| 1004 | static Constant *getAShr(Constant *C1, Constant *C2, bool isExact = false); |
| 1005 | static Constant *getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
| 1006 | static Constant *getSExt(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
| 1007 | static Constant *getZExt(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
| 1008 | static Constant *getFPTrunc(Constant *C, Type *Ty, |
| 1009 | bool OnlyIfReduced = false); |
| 1010 | static Constant *getFPExtend(Constant *C, Type *Ty, |
| 1011 | bool OnlyIfReduced = false); |
| 1012 | static Constant *getUIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
| 1013 | static Constant *getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
| 1014 | static Constant *getFPToUI(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
| 1015 | static Constant *getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
| 1016 | static Constant *getPtrToInt(Constant *C, Type *Ty, |
| 1017 | bool OnlyIfReduced = false); |
| 1018 | static Constant *getIntToPtr(Constant *C, Type *Ty, |
| 1019 | bool OnlyIfReduced = false); |
| 1020 | static Constant *getBitCast(Constant *C, Type *Ty, |
| 1021 | bool OnlyIfReduced = false); |
| 1022 | static Constant *getAddrSpaceCast(Constant *C, Type *Ty, |
| 1023 | bool OnlyIfReduced = false); |
| 1024 | |
| 1025 | static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); } |
| 1026 | static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); } |
| 1027 | |
| 1028 | static Constant *getNSWAdd(Constant *C1, Constant *C2) { |
| 1029 | return getAdd(C1, C2, false, true); |
| 1030 | } |
| 1031 | |
| 1032 | static Constant *getNUWAdd(Constant *C1, Constant *C2) { |
| 1033 | return getAdd(C1, C2, true, false); |
| 1034 | } |
| 1035 | |
| 1036 | static Constant *getNSWSub(Constant *C1, Constant *C2) { |
| 1037 | return getSub(C1, C2, false, true); |
| 1038 | } |
| 1039 | |
| 1040 | static Constant *getNUWSub(Constant *C1, Constant *C2) { |
| 1041 | return getSub(C1, C2, true, false); |
| 1042 | } |
| 1043 | |
| 1044 | static Constant *getNSWMul(Constant *C1, Constant *C2) { |
| 1045 | return getMul(C1, C2, false, true); |
| 1046 | } |
| 1047 | |
| 1048 | static Constant *getNUWMul(Constant *C1, Constant *C2) { |
| 1049 | return getMul(C1, C2, true, false); |
| 1050 | } |
| 1051 | |
| 1052 | static Constant *getNSWShl(Constant *C1, Constant *C2) { |
| 1053 | return getShl(C1, C2, false, true); |
| 1054 | } |
| 1055 | |
| 1056 | static Constant *getNUWShl(Constant *C1, Constant *C2) { |
| 1057 | return getShl(C1, C2, true, false); |
| 1058 | } |
| 1059 | |
| 1060 | static Constant *getExactSDiv(Constant *C1, Constant *C2) { |
| 1061 | return getSDiv(C1, C2, true); |
| 1062 | } |
| 1063 | |
| 1064 | static Constant *getExactUDiv(Constant *C1, Constant *C2) { |
| 1065 | return getUDiv(C1, C2, true); |
| 1066 | } |
| 1067 | |
| 1068 | static Constant *getExactAShr(Constant *C1, Constant *C2) { |
| 1069 | return getAShr(C1, C2, true); |
| 1070 | } |
| 1071 | |
| 1072 | static Constant *getExactLShr(Constant *C1, Constant *C2) { |
| 1073 | return getLShr(C1, C2, true); |
| 1074 | } |
| 1075 | |
| 1076 | /// If C is a scalar/fixed width vector of known powers of 2, then this |
| 1077 | /// function returns a new scalar/fixed width vector obtained from logBase2 |
| 1078 | /// of C. Undef vector elements are set to zero. |
| 1079 | /// Return a null pointer otherwise. |
| 1080 | static Constant *getExactLogBase2(Constant *C); |
| 1081 | |
| 1082 | /// Return the identity constant for a binary opcode. |
| 1083 | /// The identity constant C is defined as X op C = X and C op X = X for every |
| 1084 | /// X when the binary operation is commutative. If the binop is not |
| 1085 | /// commutative, callers can acquire the operand 1 identity constant by |
| 1086 | /// setting AllowRHSConstant to true. For example, any shift has a zero |
| 1087 | /// identity constant for operand 1: X shift 0 = X. |
| 1088 | /// Return nullptr if the operator does not have an identity constant. |
| 1089 | static Constant *getBinOpIdentity(unsigned Opcode, Type *Ty, |
| 1090 | bool AllowRHSConstant = false); |
| 1091 | |
| 1092 | /// Return the absorbing element for the given binary |
| 1093 | /// operation, i.e. a constant C such that X op C = C and C op X = C for |
| 1094 | /// every X. For example, this returns zero for integer multiplication. |
| 1095 | /// It returns null if the operator doesn't have an absorbing element. |
| 1096 | static Constant *getBinOpAbsorber(unsigned Opcode, Type *Ty); |
| 1097 | |
| 1098 | /// Transparently provide more efficient getOperand methods. |
| 1099 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant)public: inline Constant *getOperand(unsigned) const; inline void setOperand(unsigned, Constant*); inline op_iterator op_begin (); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
| 1100 | |
| 1101 | /// Convenience function for getting a Cast operation. |
| 1102 | /// |
| 1103 | /// \param ops The opcode for the conversion |
| 1104 | /// \param C The constant to be converted |
| 1105 | /// \param Ty The type to which the constant is converted |
| 1106 | /// \param OnlyIfReduced see \a getWithOperands() docs. |
| 1107 | static Constant *getCast(unsigned ops, Constant *C, Type *Ty, |
| 1108 | bool OnlyIfReduced = false); |
| 1109 | |
| 1110 | // Create a ZExt or BitCast cast constant expression |
| 1111 | static Constant * |
| 1112 | getZExtOrBitCast(Constant *C, ///< The constant to zext or bitcast |
| 1113 | Type *Ty ///< The type to zext or bitcast C to |
| 1114 | ); |
| 1115 | |
| 1116 | // Create a SExt or BitCast cast constant expression |
| 1117 | static Constant * |
| 1118 | getSExtOrBitCast(Constant *C, ///< The constant to sext or bitcast |
| 1119 | Type *Ty ///< The type to sext or bitcast C to |
| 1120 | ); |
| 1121 | |
| 1122 | // Create a Trunc or BitCast cast constant expression |
| 1123 | static Constant * |
| 1124 | getTruncOrBitCast(Constant *C, ///< The constant to trunc or bitcast |
| 1125 | Type *Ty ///< The type to trunc or bitcast C to |
| 1126 | ); |
| 1127 | |
| 1128 | /// Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant |
| 1129 | /// expression. |
| 1130 | static Constant * |
| 1131 | getPointerCast(Constant *C, ///< The pointer value to be casted (operand 0) |
| 1132 | Type *Ty ///< The type to which cast should be made |
| 1133 | ); |
| 1134 | |
| 1135 | /// Create a BitCast or AddrSpaceCast for a pointer type depending on |
| 1136 | /// the address space. |
| 1137 | static Constant *getPointerBitCastOrAddrSpaceCast( |
| 1138 | Constant *C, ///< The constant to addrspacecast or bitcast |
| 1139 | Type *Ty ///< The type to bitcast or addrspacecast C to |
| 1140 | ); |
| 1141 | |
| 1142 | /// Create a ZExt, Bitcast or Trunc for integer -> integer casts |
| 1143 | static Constant * |
| 1144 | getIntegerCast(Constant *C, ///< The integer constant to be casted |
| 1145 | Type *Ty, ///< The integer type to cast to |
| 1146 | bool IsSigned ///< Whether C should be treated as signed or not |
| 1147 | ); |
| 1148 | |
| 1149 | /// Create a FPExt, Bitcast or FPTrunc for fp -> fp casts |
| 1150 | static Constant *getFPCast(Constant *C, ///< The integer constant to be casted |
| 1151 | Type *Ty ///< The integer type to cast to |
| 1152 | ); |
| 1153 | |
| 1154 | /// Return true if this is a convert constant expression |
| 1155 | bool isCast() const; |
| 1156 | |
| 1157 | /// Return true if this is a compare constant expression |
| 1158 | bool isCompare() const; |
| 1159 | |
| 1160 | /// Return true if this is an insertvalue or extractvalue expression, |
| 1161 | /// and the getIndices() method may be used. |
| 1162 | bool hasIndices() const; |
| 1163 | |
| 1164 | /// Return true if this is a getelementptr expression and all |
| 1165 | /// the index operands are compile-time known integers within the |
| 1166 | /// corresponding notional static array extents. Note that this is |
| 1167 | /// not equivalant to, a subset of, or a superset of the "inbounds" |
| 1168 | /// property. |
| 1169 | bool isGEPWithNoNotionalOverIndexing() const; |
| 1170 | |
| 1171 | /// Select constant expr |
| 1172 | /// |
| 1173 | /// \param OnlyIfReducedTy see \a getWithOperands() docs. |
| 1174 | static Constant *getSelect(Constant *C, Constant *V1, Constant *V2, |
| 1175 | Type *OnlyIfReducedTy = nullptr); |
| 1176 | |
| 1177 | /// get - Return a unary operator constant expression, |
| 1178 | /// folding if possible. |
| 1179 | /// |
| 1180 | /// \param OnlyIfReducedTy see \a getWithOperands() docs. |
| 1181 | static Constant *get(unsigned Opcode, Constant *C1, unsigned Flags = 0, |
| 1182 | Type *OnlyIfReducedTy = nullptr); |
| 1183 | |
| 1184 | /// get - Return a binary or shift operator constant expression, |
| 1185 | /// folding if possible. |
| 1186 | /// |
| 1187 | /// \param OnlyIfReducedTy see \a getWithOperands() docs. |
| 1188 | static Constant *get(unsigned Opcode, Constant *C1, Constant *C2, |
| 1189 | unsigned Flags = 0, Type *OnlyIfReducedTy = nullptr); |
| 1190 | |
| 1191 | /// Return an ICmp or FCmp comparison operator constant expression. |
| 1192 | /// |
| 1193 | /// \param OnlyIfReduced see \a getWithOperands() docs. |
| 1194 | static Constant *getCompare(unsigned short pred, Constant *C1, Constant *C2, |
| 1195 | bool OnlyIfReduced = false); |
| 1196 | |
| 1197 | /// get* - Return some common constants without having to |
| 1198 | /// specify the full Instruction::OPCODE identifier. |
| 1199 | /// |
| 1200 | static Constant *getICmp(unsigned short pred, Constant *LHS, Constant *RHS, |
| 1201 | bool OnlyIfReduced = false); |
| 1202 | static Constant *getFCmp(unsigned short pred, Constant *LHS, Constant *RHS, |
| 1203 | bool OnlyIfReduced = false); |
| 1204 | |
| 1205 | /// Getelementptr form. Value* is only accepted for convenience; |
| 1206 | /// all elements must be Constants. |
| 1207 | /// |
| 1208 | /// \param InRangeIndex the inrange index if present or None. |
| 1209 | /// \param OnlyIfReducedTy see \a getWithOperands() docs. |
| 1210 | static Constant *getGetElementPtr(Type *Ty, Constant *C, |
| 1211 | ArrayRef<Constant *> IdxList, |
| 1212 | bool InBounds = false, |
| 1213 | Optional<unsigned> InRangeIndex = None, |
| 1214 | Type *OnlyIfReducedTy = nullptr) { |
| 1215 | return getGetElementPtr( |
| 1216 | Ty, C, makeArrayRef((Value *const *)IdxList.data(), IdxList.size()), |
| 1217 | InBounds, InRangeIndex, OnlyIfReducedTy); |
| 1218 | } |
| 1219 | static Constant *getGetElementPtr(Type *Ty, Constant *C, Constant *Idx, |
| 1220 | bool InBounds = false, |
| 1221 | Optional<unsigned> InRangeIndex = None, |
| 1222 | Type *OnlyIfReducedTy = nullptr) { |
| 1223 | // This form of the function only exists to avoid ambiguous overload |
| 1224 | // warnings about whether to convert Idx to ArrayRef<Constant *> or |
| 1225 | // ArrayRef<Value *>. |
| 1226 | return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, InRangeIndex, |
| 1227 | OnlyIfReducedTy); |
| 1228 | } |
| 1229 | static Constant *getGetElementPtr(Type *Ty, Constant *C, |
| 1230 | ArrayRef<Value *> IdxList, |
| 1231 | bool InBounds = false, |
| 1232 | Optional<unsigned> InRangeIndex = None, |
| 1233 | Type *OnlyIfReducedTy = nullptr); |
| 1234 | |
| 1235 | /// Create an "inbounds" getelementptr. See the documentation for the |
| 1236 | /// "inbounds" flag in LangRef.html for details. |
| 1237 | static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C, |
| 1238 | ArrayRef<Constant *> IdxList) { |
| 1239 | return getGetElementPtr(Ty, C, IdxList, true); |
| 1240 | } |
| 1241 | static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C, |
| 1242 | Constant *Idx) { |
| 1243 | // This form of the function only exists to avoid ambiguous overload |
| 1244 | // warnings about whether to convert Idx to ArrayRef<Constant *> or |
| 1245 | // ArrayRef<Value *>. |
| 1246 | return getGetElementPtr(Ty, C, Idx, true); |
| 1247 | } |
| 1248 | static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C, |
| 1249 | ArrayRef<Value *> IdxList) { |
| 1250 | return getGetElementPtr(Ty, C, IdxList, true); |
| 1251 | } |
| 1252 | |
| 1253 | static Constant *getExtractElement(Constant *Vec, Constant *Idx, |
| 1254 | Type *OnlyIfReducedTy = nullptr); |
| 1255 | static Constant *getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, |
| 1256 | Type *OnlyIfReducedTy = nullptr); |
| 1257 | static Constant *getShuffleVector(Constant *V1, Constant *V2, |
| 1258 | ArrayRef<int> Mask, |
| 1259 | Type *OnlyIfReducedTy = nullptr); |
| 1260 | static Constant *getExtractValue(Constant *Agg, ArrayRef<unsigned> Idxs, |
| 1261 | Type *OnlyIfReducedTy = nullptr); |
| 1262 | static Constant *getInsertValue(Constant *Agg, Constant *Val, |
| 1263 | ArrayRef<unsigned> Idxs, |
| 1264 | Type *OnlyIfReducedTy = nullptr); |
| 1265 | |
| 1266 | /// Return the opcode at the root of this constant expression |
| 1267 | unsigned getOpcode() const { return getSubclassDataFromValue(); } |
| 1268 | |
| 1269 | /// Return the ICMP or FCMP predicate value. Assert if this is not an ICMP or |
| 1270 | /// FCMP constant expression. |
| 1271 | unsigned getPredicate() const; |
| 1272 | |
| 1273 | /// Assert that this is an insertvalue or exactvalue |
| 1274 | /// expression and return the list of indices. |
| 1275 | ArrayRef<unsigned> getIndices() const; |
| 1276 | |
| 1277 | /// Assert that this is a shufflevector and return the mask. See class |
| 1278 | /// ShuffleVectorInst for a description of the mask representation. |
| 1279 | ArrayRef<int> getShuffleMask() const; |
| 1280 | |
| 1281 | /// Assert that this is a shufflevector and return the mask. |
| 1282 | /// |
| 1283 | /// TODO: This is a temporary hack until we update the bitcode format for |
| 1284 | /// shufflevector. |
| 1285 | Constant *getShuffleMaskForBitcode() const; |
| 1286 | |
| 1287 | /// Return a string representation for an opcode. |
| 1288 | const char *getOpcodeName() const; |
| 1289 | |
| 1290 | /// Return a constant expression identical to this one, but with the specified |
| 1291 | /// operand set to the specified value. |
| 1292 | Constant *getWithOperandReplaced(unsigned OpNo, Constant *Op) const; |
| 1293 | |
| 1294 | /// This returns the current constant expression with the operands replaced |
| 1295 | /// with the specified values. The specified array must have the same number |
| 1296 | /// of operands as our current one. |
| 1297 | Constant *getWithOperands(ArrayRef<Constant *> Ops) const { |
| 1298 | return getWithOperands(Ops, getType()); |
| 1299 | } |
| 1300 | |
| 1301 | /// Get the current expression with the operands replaced. |
| 1302 | /// |
| 1303 | /// Return the current constant expression with the operands replaced with \c |
| 1304 | /// Ops and the type with \c Ty. The new operands must have the same number |
| 1305 | /// as the current ones. |
| 1306 | /// |
| 1307 | /// If \c OnlyIfReduced is \c true, nullptr will be returned unless something |
| 1308 | /// gets constant-folded, the type changes, or the expression is otherwise |
| 1309 | /// canonicalized. This parameter should almost always be \c false. |
| 1310 | Constant *getWithOperands(ArrayRef<Constant *> Ops, Type *Ty, |
| 1311 | bool OnlyIfReduced = false, |
| 1312 | Type *SrcTy = nullptr) const; |
| 1313 | |
| 1314 | /// Returns an Instruction which implements the same operation as this |
| 1315 | /// ConstantExpr. The instruction is not linked to any basic block. |
| 1316 | /// |
| 1317 | /// A better approach to this could be to have a constructor for Instruction |
| 1318 | /// which would take a ConstantExpr parameter, but that would have spread |
| 1319 | /// implementation details of ConstantExpr outside of Constants.cpp, which |
| 1320 | /// would make it harder to remove ConstantExprs altogether. |
| 1321 | Instruction *getAsInstruction() const; |
| 1322 | |
| 1323 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 1324 | static bool classof(const Value *V) { |
| 1325 | return V->getValueID() == ConstantExprVal; |
| 1326 | } |
| 1327 | |
| 1328 | private: |
| 1329 | // Shadow Value::setValueSubclassData with a private forwarding method so that |
| 1330 | // subclasses cannot accidentally use it. |
| 1331 | void setValueSubclassData(unsigned short D) { |
| 1332 | Value::setValueSubclassData(D); |
| 1333 | } |
| 1334 | }; |
| 1335 | |
| 1336 | template <> |
| 1337 | struct OperandTraits<ConstantExpr> |
| 1338 | : public VariadicOperandTraits<ConstantExpr, 1> {}; |
| 1339 | |
| 1340 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant)ConstantExpr::op_iterator ConstantExpr::op_begin() { return OperandTraits <ConstantExpr>::op_begin(this); } ConstantExpr::const_op_iterator ConstantExpr::op_begin() const { return OperandTraits<ConstantExpr >::op_begin(const_cast<ConstantExpr*>(this)); } ConstantExpr ::op_iterator ConstantExpr::op_end() { return OperandTraits< ConstantExpr>::op_end(this); } ConstantExpr::const_op_iterator ConstantExpr::op_end() const { return OperandTraits<ConstantExpr >::op_end(const_cast<ConstantExpr*>(this)); } Constant *ConstantExpr::getOperand(unsigned i_nocapture) const { ((void )0); return cast_or_null<Constant>( OperandTraits<ConstantExpr >::op_begin(const_cast<ConstantExpr*>(this))[i_nocapture ].get()); } void ConstantExpr::setOperand(unsigned i_nocapture , Constant *Val_nocapture) { ((void)0); OperandTraits<ConstantExpr >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ConstantExpr::getNumOperands() const { return OperandTraits< ConstantExpr>::operands(this); } template <int Idx_nocapture > Use &ConstantExpr::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ConstantExpr::Op() const { return this->OpFrom <Idx_nocapture>(this); } |
| 1341 | |
| 1342 | //===----------------------------------------------------------------------===// |
| 1343 | /// 'undef' values are things that do not have specified contents. |
| 1344 | /// These are used for a variety of purposes, including global variable |
| 1345 | /// initializers and operands to instructions. 'undef' values can occur with |
| 1346 | /// any first-class type. |
| 1347 | /// |
| 1348 | /// Undef values aren't exactly constants; if they have multiple uses, they |
| 1349 | /// can appear to have different bit patterns at each use. See |
| 1350 | /// LangRef.html#undefvalues for details. |
| 1351 | /// |
| 1352 | class UndefValue : public ConstantData { |
| 1353 | friend class Constant; |
| 1354 | |
| 1355 | explicit UndefValue(Type *T) : ConstantData(T, UndefValueVal) {} |
| 1356 | |
| 1357 | void destroyConstantImpl(); |
| 1358 | |
| 1359 | protected: |
| 1360 | explicit UndefValue(Type *T, ValueTy vty) : ConstantData(T, vty) {} |
| 1361 | |
| 1362 | public: |
| 1363 | UndefValue(const UndefValue &) = delete; |
| 1364 | |
| 1365 | /// Static factory methods - Return an 'undef' object of the specified type. |
| 1366 | static UndefValue *get(Type *T); |
| 1367 | |
| 1368 | /// If this Undef has array or vector type, return a undef with the right |
| 1369 | /// element type. |
| 1370 | UndefValue *getSequentialElement() const; |
| 1371 | |
| 1372 | /// If this undef has struct type, return a undef with the right element type |
| 1373 | /// for the specified element. |
| 1374 | UndefValue *getStructElement(unsigned Elt) const; |
| 1375 | |
| 1376 | /// Return an undef of the right value for the specified GEP index if we can, |
| 1377 | /// otherwise return null (e.g. if C is a ConstantExpr). |
| 1378 | UndefValue *getElementValue(Constant *C) const; |
| 1379 | |
| 1380 | /// Return an undef of the right value for the specified GEP index. |
| 1381 | UndefValue *getElementValue(unsigned Idx) const; |
| 1382 | |
| 1383 | /// Return the number of elements in the array, vector, or struct. |
| 1384 | unsigned getNumElements() const; |
| 1385 | |
| 1386 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 1387 | static bool classof(const Value *V) { |
| 1388 | return V->getValueID() == UndefValueVal || |
| 1389 | V->getValueID() == PoisonValueVal; |
| 1390 | } |
| 1391 | }; |
| 1392 | |
| 1393 | //===----------------------------------------------------------------------===// |
| 1394 | /// In order to facilitate speculative execution, many instructions do not |
| 1395 | /// invoke immediate undefined behavior when provided with illegal operands, |
| 1396 | /// and return a poison value instead. |
| 1397 | /// |
| 1398 | /// see LangRef.html#poisonvalues for details. |
| 1399 | /// |
| 1400 | class PoisonValue final : public UndefValue { |
| 1401 | friend class Constant; |
| 1402 | |
| 1403 | explicit PoisonValue(Type *T) : UndefValue(T, PoisonValueVal) {} |
| 1404 | |
| 1405 | void destroyConstantImpl(); |
| 1406 | |
| 1407 | public: |
| 1408 | PoisonValue(const PoisonValue &) = delete; |
| 1409 | |
| 1410 | /// Static factory methods - Return an 'poison' object of the specified type. |
| 1411 | static PoisonValue *get(Type *T); |
| 1412 | |
| 1413 | /// If this poison has array or vector type, return a poison with the right |
| 1414 | /// element type. |
| 1415 | PoisonValue *getSequentialElement() const; |
| 1416 | |
| 1417 | /// If this poison has struct type, return a poison with the right element |
| 1418 | /// type for the specified element. |
| 1419 | PoisonValue *getStructElement(unsigned Elt) const; |
| 1420 | |
| 1421 | /// Return an poison of the right value for the specified GEP index if we can, |
| 1422 | /// otherwise return null (e.g. if C is a ConstantExpr). |
| 1423 | PoisonValue *getElementValue(Constant *C) const; |
| 1424 | |
| 1425 | /// Return an poison of the right value for the specified GEP index. |
| 1426 | PoisonValue *getElementValue(unsigned Idx) const; |
| 1427 | |
| 1428 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| 1429 | static bool classof(const Value *V) { |
| 1430 | return V->getValueID() == PoisonValueVal; |
| 1431 | } |
| 1432 | }; |
| 1433 | |
| 1434 | } // end namespace llvm |
| 1435 | |
| 1436 | #endif // LLVM_IR_CONSTANTS_H |
| 1 | //===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | /// |
| 9 | /// \file |
| 10 | /// This file implements a class to represent arbitrary precision |
| 11 | /// integral constant values and operations on them. |
| 12 | /// |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #ifndef LLVM_ADT_APINT_H |
| 16 | #define LLVM_ADT_APINT_H |
| 17 | |
| 18 | #include "llvm/Support/Compiler.h" |
| 19 | #include "llvm/Support/MathExtras.h" |
| 20 | #include <cassert> |
| 21 | #include <climits> |
| 22 | #include <cstring> |
| 23 | #include <utility> |
| 24 | |
| 25 | namespace llvm { |
| 26 | class FoldingSetNodeID; |
| 27 | class StringRef; |
| 28 | class hash_code; |
| 29 | class raw_ostream; |
| 30 | |
| 31 | template <typename T> class SmallVectorImpl; |
| 32 | template <typename T> class ArrayRef; |
| 33 | template <typename T> class Optional; |
| 34 | template <typename T> struct DenseMapInfo; |
| 35 | |
| 36 | class APInt; |
| 37 | |
| 38 | inline APInt operator-(APInt); |
| 39 | |
| 40 | //===----------------------------------------------------------------------===// |
| 41 | // APInt Class |
| 42 | //===----------------------------------------------------------------------===// |
| 43 | |
| 44 | /// Class for arbitrary precision integers. |
| 45 | /// |
| 46 | /// APInt is a functional replacement for common case unsigned integer type like |
| 47 | /// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width |
| 48 | /// integer sizes and large integer value types such as 3-bits, 15-bits, or more |
| 49 | /// than 64-bits of precision. APInt provides a variety of arithmetic operators |
| 50 | /// and methods to manipulate integer values of any bit-width. It supports both |
| 51 | /// the typical integer arithmetic and comparison operations as well as bitwise |
| 52 | /// manipulation. |
| 53 | /// |
| 54 | /// The class has several invariants worth noting: |
| 55 | /// * All bit, byte, and word positions are zero-based. |
| 56 | /// * Once the bit width is set, it doesn't change except by the Truncate, |
| 57 | /// SignExtend, or ZeroExtend operations. |
| 58 | /// * All binary operators must be on APInt instances of the same bit width. |
| 59 | /// Attempting to use these operators on instances with different bit |
| 60 | /// widths will yield an assertion. |
| 61 | /// * The value is stored canonically as an unsigned value. For operations |
| 62 | /// where it makes a difference, there are both signed and unsigned variants |
| 63 | /// of the operation. For example, sdiv and udiv. However, because the bit |
| 64 | /// widths must be the same, operations such as Mul and Add produce the same |
| 65 | /// results regardless of whether the values are interpreted as signed or |
| 66 | /// not. |
| 67 | /// * In general, the class tries to follow the style of computation that LLVM |
| 68 | /// uses in its IR. This simplifies its use for LLVM. |
| 69 | /// |
| 70 | class LLVM_NODISCARD[[clang::warn_unused_result]] APInt { |
| 71 | public: |
| 72 | typedef uint64_t WordType; |
| 73 | |
| 74 | /// This enum is used to hold the constants we needed for APInt. |
| 75 | enum : unsigned { |
| 76 | /// Byte size of a word. |
| 77 | APINT_WORD_SIZE = sizeof(WordType), |
| 78 | /// Bits in a word. |
| 79 | APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT8 |
| 80 | }; |
| 81 | |
| 82 | enum class Rounding { |
| 83 | DOWN, |
| 84 | TOWARD_ZERO, |
| 85 | UP, |
| 86 | }; |
| 87 | |
| 88 | static constexpr WordType WORDTYPE_MAX = ~WordType(0); |
| 89 | |
| 90 | private: |
| 91 | /// This union is used to store the integer value. When the |
| 92 | /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal. |
| 93 | union { |
| 94 | uint64_t VAL; ///< Used to store the <= 64 bits integer value. |
| 95 | uint64_t *pVal; ///< Used to store the >64 bits integer value. |
| 96 | } U; |
| 97 | |
| 98 | unsigned BitWidth; ///< The number of bits in this APInt. |
| 99 | |
| 100 | friend struct DenseMapInfo<APInt>; |
| 101 | |
| 102 | friend class APSInt; |
| 103 | |
| 104 | /// Fast internal constructor |
| 105 | /// |
| 106 | /// This constructor is used only internally for speed of construction of |
| 107 | /// temporaries. It is unsafe for general use so it is not public. |
| 108 | APInt(uint64_t *val, unsigned bits) : BitWidth(bits) { |
| 109 | U.pVal = val; |
| 110 | } |
| 111 | |
| 112 | /// Determine which word a bit is in. |
| 113 | /// |
| 114 | /// \returns the word position for the specified bit position. |
| 115 | static unsigned whichWord(unsigned bitPosition) { |
| 116 | return bitPosition / APINT_BITS_PER_WORD; |
| 117 | } |
| 118 | |
| 119 | /// Determine which bit in a word a bit is in. |
| 120 | /// |
| 121 | /// \returns the bit position in a word for the specified bit position |
| 122 | /// in the APInt. |
| 123 | static unsigned whichBit(unsigned bitPosition) { |
| 124 | return bitPosition % APINT_BITS_PER_WORD; |
| 125 | } |
| 126 | |
| 127 | /// Get a single bit mask. |
| 128 | /// |
| 129 | /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set |
| 130 | /// This method generates and returns a uint64_t (word) mask for a single |
| 131 | /// bit at a specific bit position. This is used to mask the bit in the |
| 132 | /// corresponding word. |
| 133 | static uint64_t maskBit(unsigned bitPosition) { |
| 134 | return 1ULL << whichBit(bitPosition); |
| 135 | } |
| 136 | |
| 137 | /// Clear unused high order bits |
| 138 | /// |
| 139 | /// This method is used internally to clear the top "N" bits in the high order |
| 140 | /// word that are not used by the APInt. This is needed after the most |
| 141 | /// significant word is assigned a value to ensure that those bits are |
| 142 | /// zero'd out. |
| 143 | APInt &clearUnusedBits() { |
| 144 | // Compute how many bits are used in the final word |
| 145 | unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1; |
| 146 | |
| 147 | // Mask out the high bits. |
| 148 | uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - WordBits); |
| 149 | if (isSingleWord()) |
| 150 | U.VAL &= mask; |
| 151 | else |
| 152 | U.pVal[getNumWords() - 1] &= mask; |
| 153 | return *this; |
| 154 | } |
| 155 | |
| 156 | /// Get the word corresponding to a bit position |
| 157 | /// \returns the corresponding word for the specified bit position. |
| 158 | uint64_t getWord(unsigned bitPosition) const { |
| 159 | return isSingleWord() ? U.VAL : U.pVal[whichWord(bitPosition)]; |
| 160 | } |
| 161 | |
| 162 | /// Utility method to change the bit width of this APInt to new bit width, |
| 163 | /// allocating and/or deallocating as necessary. There is no guarantee on the |
| 164 | /// value of any bits upon return. Caller should populate the bits after. |
| 165 | void reallocate(unsigned NewBitWidth); |
| 166 | |
| 167 | /// Convert a char array into an APInt |
| 168 | /// |
| 169 | /// \param radix 2, 8, 10, 16, or 36 |
| 170 | /// Converts a string into a number. The string must be non-empty |
| 171 | /// and well-formed as a number of the given base. The bit-width |
| 172 | /// must be sufficient to hold the result. |
| 173 | /// |
| 174 | /// This is used by the constructors that take string arguments. |
| 175 | /// |
| 176 | /// StringRef::getAsInteger is superficially similar but (1) does |
| 177 | /// not assume that the string is well-formed and (2) grows the |
| 178 | /// result to hold the input. |
| 179 | void fromString(unsigned numBits, StringRef str, uint8_t radix); |
| 180 | |
| 181 | /// An internal division function for dividing APInts. |
| 182 | /// |
| 183 | /// This is used by the toString method to divide by the radix. It simply |
| 184 | /// provides a more convenient form of divide for internal use since KnuthDiv |
| 185 | /// has specific constraints on its inputs. If those constraints are not met |
| 186 | /// then it provides a simpler form of divide. |
| 187 | static void divide(const WordType *LHS, unsigned lhsWords, |
| 188 | const WordType *RHS, unsigned rhsWords, WordType *Quotient, |
| 189 | WordType *Remainder); |
| 190 | |
| 191 | /// out-of-line slow case for inline constructor |
| 192 | void initSlowCase(uint64_t val, bool isSigned); |
| 193 | |
| 194 | /// shared code between two array constructors |
| 195 | void initFromArray(ArrayRef<uint64_t> array); |
| 196 | |
| 197 | /// out-of-line slow case for inline copy constructor |
| 198 | void initSlowCase(const APInt &that); |
| 199 | |
| 200 | /// out-of-line slow case for shl |
| 201 | void shlSlowCase(unsigned ShiftAmt); |
| 202 | |
| 203 | /// out-of-line slow case for lshr. |
| 204 | void lshrSlowCase(unsigned ShiftAmt); |
| 205 | |
| 206 | /// out-of-line slow case for ashr. |
| 207 | void ashrSlowCase(unsigned ShiftAmt); |
| 208 | |
| 209 | /// out-of-line slow case for operator= |
| 210 | void AssignSlowCase(const APInt &RHS); |
| 211 | |
| 212 | /// out-of-line slow case for operator== |
| 213 | bool EqualSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
| 214 | |
| 215 | /// out-of-line slow case for countLeadingZeros |
| 216 | unsigned countLeadingZerosSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
| 217 | |
| 218 | /// out-of-line slow case for countLeadingOnes. |
| 219 | unsigned countLeadingOnesSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
| 220 | |
| 221 | /// out-of-line slow case for countTrailingZeros. |
| 222 | unsigned countTrailingZerosSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
| 223 | |
| 224 | /// out-of-line slow case for countTrailingOnes |
| 225 | unsigned countTrailingOnesSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
| 226 | |
| 227 | /// out-of-line slow case for countPopulation |
| 228 | unsigned countPopulationSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
| 229 | |
| 230 | /// out-of-line slow case for intersects. |
| 231 | bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
| 232 | |
| 233 | /// out-of-line slow case for isSubsetOf. |
| 234 | bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
| 235 | |
| 236 | /// out-of-line slow case for setBits. |
| 237 | void setBitsSlowCase(unsigned loBit, unsigned hiBit); |
| 238 | |
| 239 | /// out-of-line slow case for flipAllBits. |
| 240 | void flipAllBitsSlowCase(); |
| 241 | |
| 242 | /// out-of-line slow case for operator&=. |
| 243 | void AndAssignSlowCase(const APInt& RHS); |
| 244 | |
| 245 | /// out-of-line slow case for operator|=. |
| 246 | void OrAssignSlowCase(const APInt& RHS); |
| 247 | |
| 248 | /// out-of-line slow case for operator^=. |
| 249 | void XorAssignSlowCase(const APInt& RHS); |
| 250 | |
| 251 | /// Unsigned comparison. Returns -1, 0, or 1 if this APInt is less than, equal |
| 252 | /// to, or greater than RHS. |
| 253 | int compare(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
| 254 | |
| 255 | /// Signed comparison. Returns -1, 0, or 1 if this APInt is less than, equal |
| 256 | /// to, or greater than RHS. |
| 257 | int compareSigned(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
| 258 | |
| 259 | public: |
| 260 | /// \name Constructors |
| 261 | /// @{ |
| 262 | |
| 263 | /// Create a new APInt of numBits width, initialized as val. |
| 264 | /// |
| 265 | /// If isSigned is true then val is treated as if it were a signed value |
| 266 | /// (i.e. as an int64_t) and the appropriate sign extension to the bit width |
| 267 | /// will be done. Otherwise, no sign extension occurs (high order bits beyond |
| 268 | /// the range of val are zero filled). |
| 269 | /// |
| 270 | /// \param numBits the bit width of the constructed APInt |
| 271 | /// \param val the initial value of the APInt |
| 272 | /// \param isSigned how to treat signedness of val |
| 273 | APInt(unsigned numBits, uint64_t val, bool isSigned = false) |
| 274 | : BitWidth(numBits) { |
| 275 | assert(BitWidth && "bitwidth too small")((void)0); |
| 276 | if (isSingleWord()) { |
| 277 | U.VAL = val; |
| 278 | clearUnusedBits(); |
| 279 | } else { |
| 280 | initSlowCase(val, isSigned); |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | /// Construct an APInt of numBits width, initialized as bigVal[]. |
| 285 | /// |
| 286 | /// Note that bigVal.size() can be smaller or larger than the corresponding |
| 287 | /// bit width but any extraneous bits will be dropped. |
| 288 | /// |
| 289 | /// \param numBits the bit width of the constructed APInt |
| 290 | /// \param bigVal a sequence of words to form the initial value of the APInt |
| 291 | APInt(unsigned numBits, ArrayRef<uint64_t> bigVal); |
| 292 | |
| 293 | /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but |
| 294 | /// deprecated because this constructor is prone to ambiguity with the |
| 295 | /// APInt(unsigned, uint64_t, bool) constructor. |
| 296 | /// |
| 297 | /// If this overload is ever deleted, care should be taken to prevent calls |
| 298 | /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool) |
| 299 | /// constructor. |
| 300 | APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]); |
| 301 | |
| 302 | /// Construct an APInt from a string representation. |
| 303 | /// |
| 304 | /// This constructor interprets the string \p str in the given radix. The |
| 305 | /// interpretation stops when the first character that is not suitable for the |
| 306 | /// radix is encountered, or the end of the string. Acceptable radix values |
| 307 | /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the |
| 308 | /// string to require more bits than numBits. |
| 309 | /// |
| 310 | /// \param numBits the bit width of the constructed APInt |
| 311 | /// \param str the string to be interpreted |
| 312 | /// \param radix the radix to use for the conversion |
| 313 | APInt(unsigned numBits, StringRef str, uint8_t radix); |
| 314 | |
| 315 | /// Simply makes *this a copy of that. |
| 316 | /// Copy Constructor. |
| 317 | APInt(const APInt &that) : BitWidth(that.BitWidth) { |
| 318 | if (isSingleWord()) |
| 319 | U.VAL = that.U.VAL; |
| 320 | else |
| 321 | initSlowCase(that); |
| 322 | } |
| 323 | |
| 324 | /// Move Constructor. |
| 325 | APInt(APInt &&that) : BitWidth(that.BitWidth) { |
| 326 | memcpy(&U, &that.U, sizeof(U)); |
| 327 | that.BitWidth = 0; |
| 328 | } |
| 329 | |
| 330 | /// Destructor. |
| 331 | ~APInt() { |
| 332 | if (needsCleanup()) |
| 333 | delete[] U.pVal; |
| 334 | } |
| 335 | |
| 336 | /// Default constructor that creates an uninteresting APInt |
| 337 | /// representing a 1-bit zero value. |
| 338 | /// |
| 339 | /// This is useful for object deserialization (pair this with the static |
| 340 | /// method Read). |
| 341 | explicit APInt() : BitWidth(1) { U.VAL = 0; } |
| 342 | |
| 343 | /// Returns whether this instance allocated memory. |
| 344 | bool needsCleanup() const { return !isSingleWord(); } |
| 345 | |
| 346 | /// Used to insert APInt objects, or objects that contain APInt objects, into |
| 347 | /// FoldingSets. |
| 348 | void Profile(FoldingSetNodeID &id) const; |
| 349 | |
| 350 | /// @} |
| 351 | /// \name Value Tests |
| 352 | /// @{ |
| 353 | |
| 354 | /// Determine if this APInt just has one word to store value. |
| 355 | /// |
| 356 | /// \returns true if the number of bits <= 64, false otherwise. |
| 357 | bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; } |
| 358 | |
| 359 | /// Determine sign of this APInt. |
| 360 | /// |
| 361 | /// This tests the high bit of this APInt to determine if it is set. |
| 362 | /// |
| 363 | /// \returns true if this APInt is negative, false otherwise |
| 364 | bool isNegative() const { return (*this)[BitWidth - 1]; } |
| 365 | |
| 366 | /// Determine if this APInt Value is non-negative (>= 0) |
| 367 | /// |
| 368 | /// This tests the high bit of the APInt to determine if it is unset. |
| 369 | bool isNonNegative() const { return !isNegative(); } |
| 370 | |
| 371 | /// Determine if sign bit of this APInt is set. |
| 372 | /// |
| 373 | /// This tests the high bit of this APInt to determine if it is set. |
| 374 | /// |
| 375 | /// \returns true if this APInt has its sign bit set, false otherwise. |
| 376 | bool isSignBitSet() const { return (*this)[BitWidth-1]; } |
| 377 | |
| 378 | /// Determine if sign bit of this APInt is clear. |
| 379 | /// |
| 380 | /// This tests the high bit of this APInt to determine if it is clear. |
| 381 | /// |
| 382 | /// \returns true if this APInt has its sign bit clear, false otherwise. |
| 383 | bool isSignBitClear() const { return !isSignBitSet(); } |
| 384 | |
| 385 | /// Determine if this APInt Value is positive. |
| 386 | /// |
| 387 | /// This tests if the value of this APInt is positive (> 0). Note |
| 388 | /// that 0 is not a positive value. |
| 389 | /// |
| 390 | /// \returns true if this APInt is positive. |
| 391 | bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); } |
| 392 | |
| 393 | /// Determine if this APInt Value is non-positive (<= 0). |
| 394 | /// |
| 395 | /// \returns true if this APInt is non-positive. |
| 396 | bool isNonPositive() const { return !isStrictlyPositive(); } |
| 397 | |
| 398 | /// Determine if all bits are set |
| 399 | /// |
| 400 | /// This checks to see if the value has all bits of the APInt are set or not. |
| 401 | bool isAllOnesValue() const { |
| 402 | if (isSingleWord()) |
| 403 | return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth); |
| 404 | return countTrailingOnesSlowCase() == BitWidth; |
| 405 | } |
| 406 | |
| 407 | /// Determine if all bits are clear |
| 408 | /// |
| 409 | /// This checks to see if the value has all bits of the APInt are clear or |
| 410 | /// not. |
| 411 | bool isNullValue() const { return !*this; } |
| 412 | |
| 413 | /// Determine if this is a value of 1. |
| 414 | /// |
| 415 | /// This checks to see if the value of this APInt is one. |
| 416 | bool isOneValue() const { |
| 417 | if (isSingleWord()) |
| 418 | return U.VAL == 1; |
| 419 | return countLeadingZerosSlowCase() == BitWidth - 1; |
| 420 | } |
| 421 | |
| 422 | /// Determine if this is the largest unsigned value. |
| 423 | /// |
| 424 | /// This checks to see if the value of this APInt is the maximum unsigned |
| 425 | /// value for the APInt's bit width. |
| 426 | bool isMaxValue() const { return isAllOnesValue(); } |
| 427 | |
| 428 | /// Determine if this is the largest signed value. |
| 429 | /// |
| 430 | /// This checks to see if the value of this APInt is the maximum signed |
| 431 | /// value for the APInt's bit width. |
| 432 | bool isMaxSignedValue() const { |
| 433 | if (isSingleWord()) |
| 434 | return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1); |
| 435 | return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1; |
| 436 | } |
| 437 | |
| 438 | /// Determine if this is the smallest unsigned value. |
| 439 | /// |
| 440 | /// This checks to see if the value of this APInt is the minimum unsigned |
| 441 | /// value for the APInt's bit width. |
| 442 | bool isMinValue() const { return isNullValue(); } |
| 443 | |
| 444 | /// Determine if this is the smallest signed value. |
| 445 | /// |
| 446 | /// This checks to see if the value of this APInt is the minimum signed |
| 447 | /// value for the APInt's bit width. |
| 448 | bool isMinSignedValue() const { |
| 449 | if (isSingleWord()) |
| 450 | return U.VAL == (WordType(1) << (BitWidth - 1)); |
| 451 | return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1; |
| 452 | } |
| 453 | |
| 454 | /// Check if this APInt has an N-bits unsigned integer value. |
| 455 | bool isIntN(unsigned N) const { |
| 456 | assert(N && "N == 0 ???")((void)0); |
| 457 | return getActiveBits() <= N; |
| 458 | } |
| 459 | |
| 460 | /// Check if this APInt has an N-bits signed integer value. |
| 461 | bool isSignedIntN(unsigned N) const { |
| 462 | assert(N && "N == 0 ???")((void)0); |
| 463 | return getMinSignedBits() <= N; |
| 464 | } |
| 465 | |
| 466 | /// Check if this APInt's value is a power of two greater than zero. |
| 467 | /// |
| 468 | /// \returns true if the argument APInt value is a power of two > 0. |
| 469 | bool isPowerOf2() const { |
| 470 | if (isSingleWord()) |
| 471 | return isPowerOf2_64(U.VAL); |
| 472 | return countPopulationSlowCase() == 1; |
| 473 | } |
| 474 | |
| 475 | /// Check if the APInt's value is returned by getSignMask. |
| 476 | /// |
| 477 | /// \returns true if this is the value returned by getSignMask. |
| 478 | bool isSignMask() const { return isMinSignedValue(); } |
| 479 | |
| 480 | /// Convert APInt to a boolean value. |
| 481 | /// |
| 482 | /// This converts the APInt to a boolean value as a test against zero. |
| 483 | bool getBoolValue() const { return !!*this; } |
| 484 | |
| 485 | /// If this value is smaller than the specified limit, return it, otherwise |
| 486 | /// return the limit value. This causes the value to saturate to the limit. |
| 487 | uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX0xffffffffffffffffULL) const { |
| 488 | return ugt(Limit) ? Limit : getZExtValue(); |
| 489 | } |
| 490 | |
| 491 | /// Check if the APInt consists of a repeated bit pattern. |
| 492 | /// |
| 493 | /// e.g. 0x01010101 satisfies isSplat(8). |
| 494 | /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit |
| 495 | /// width without remainder. |
| 496 | bool isSplat(unsigned SplatSizeInBits) const; |
| 497 | |
| 498 | /// \returns true if this APInt value is a sequence of \param numBits ones |
| 499 | /// starting at the least significant bit with the remainder zero. |
| 500 | bool isMask(unsigned numBits) const { |
| 501 | assert(numBits != 0 && "numBits must be non-zero")((void)0); |
| 502 | assert(numBits <= BitWidth && "numBits out of range")((void)0); |
| 503 | if (isSingleWord()) |
| 504 | return U.VAL == (WORDTYPE_MAX >> (APINT_BITS_PER_WORD - numBits)); |
| 505 | unsigned Ones = countTrailingOnesSlowCase(); |
| 506 | return (numBits == Ones) && |
| 507 | ((Ones + countLeadingZerosSlowCase()) == BitWidth); |
| 508 | } |
| 509 | |
| 510 | /// \returns true if this APInt is a non-empty sequence of ones starting at |
| 511 | /// the least significant bit with the remainder zero. |
| 512 | /// Ex. isMask(0x0000FFFFU) == true. |
| 513 | bool isMask() const { |
| 514 | if (isSingleWord()) |
| 515 | return isMask_64(U.VAL); |
| 516 | unsigned Ones = countTrailingOnesSlowCase(); |
| 517 | return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth); |
| 518 | } |
| 519 | |
| 520 | /// Return true if this APInt value contains a sequence of ones with |
| 521 | /// the remainder zero. |
| 522 | bool isShiftedMask() const { |
| 523 | if (isSingleWord()) |
| 524 | return isShiftedMask_64(U.VAL); |
| 525 | unsigned Ones = countPopulationSlowCase(); |
| 526 | unsigned LeadZ = countLeadingZerosSlowCase(); |
| 527 | return (Ones + LeadZ + countTrailingZeros()) == BitWidth; |
| 528 | } |
| 529 | |
| 530 | /// @} |
| 531 | /// \name Value Generators |
| 532 | /// @{ |
| 533 | |
| 534 | /// Gets maximum unsigned value of APInt for specific bit width. |
| 535 | static APInt getMaxValue(unsigned numBits) { |
| 536 | return getAllOnesValue(numBits); |
| 537 | } |
| 538 | |
| 539 | /// Gets maximum signed value of APInt for a specific bit width. |
| 540 | static APInt getSignedMaxValue(unsigned numBits) { |
| 541 | APInt API = getAllOnesValue(numBits); |
| 542 | API.clearBit(numBits - 1); |
| 543 | return API; |
| 544 | } |
| 545 | |
| 546 | /// Gets minimum unsigned value of APInt for a specific bit width. |
| 547 | static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); } |
| 548 | |
| 549 | /// Gets minimum signed value of APInt for a specific bit width. |
| 550 | static APInt getSignedMinValue(unsigned numBits) { |
| 551 | APInt API(numBits, 0); |
| 552 | API.setBit(numBits - 1); |
| 553 | return API; |
| 554 | } |
| 555 | |
| 556 | /// Get the SignMask for a specific bit width. |
| 557 | /// |
| 558 | /// This is just a wrapper function of getSignedMinValue(), and it helps code |
| 559 | /// readability when we want to get a SignMask. |
| 560 | static APInt getSignMask(unsigned BitWidth) { |
| 561 | return getSignedMinValue(BitWidth); |
| 562 | } |
| 563 | |
| 564 | /// Get the all-ones value. |
| 565 | /// |
| 566 | /// \returns the all-ones value for an APInt of the specified bit-width. |
| 567 | static APInt getAllOnesValue(unsigned numBits) { |
| 568 | return APInt(numBits, WORDTYPE_MAX, true); |
| 569 | } |
| 570 | |
| 571 | /// Get the '0' value. |
| 572 | /// |
| 573 | /// \returns the '0' value for an APInt of the specified bit-width. |
| 574 | static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); } |
| 575 | |
| 576 | /// Compute an APInt containing numBits highbits from this APInt. |
| 577 | /// |
| 578 | /// Get an APInt with the same BitWidth as this APInt, just zero mask |
| 579 | /// the low bits and right shift to the least significant bit. |
| 580 | /// |
| 581 | /// \returns the high "numBits" bits of this APInt. |
| 582 | APInt getHiBits(unsigned numBits) const; |
| 583 | |
| 584 | /// Compute an APInt containing numBits lowbits from this APInt. |
| 585 | /// |
| 586 | /// Get an APInt with the same BitWidth as this APInt, just zero mask |
| 587 | /// the high bits. |
| 588 | /// |
| 589 | /// \returns the low "numBits" bits of this APInt. |
| 590 | APInt getLoBits(unsigned numBits) const; |
| 591 | |
| 592 | /// Return an APInt with exactly one bit set in the result. |
| 593 | static APInt getOneBitSet(unsigned numBits, unsigned BitNo) { |
| 594 | APInt Res(numBits, 0); |
| 595 | Res.setBit(BitNo); |
| 596 | return Res; |
| 597 | } |
| 598 | |
| 599 | /// Get a value with a block of bits set. |
| 600 | /// |
| 601 | /// Constructs an APInt value that has a contiguous range of bits set. The |
| 602 | /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other |
| 603 | /// bits will be zero. For example, with parameters(32, 0, 16) you would get |
| 604 | /// 0x0000FFFF. Please call getBitsSetWithWrap if \p loBit may be greater than |
| 605 | /// \p hiBit. |
| 606 | /// |
| 607 | /// \param numBits the intended bit width of the result |
| 608 | /// \param loBit the index of the lowest bit set. |
| 609 | /// \param hiBit the index of the highest bit set. |
| 610 | /// |
| 611 | /// \returns An APInt value with the requested bits set. |
| 612 | static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) { |
| 613 | assert(loBit <= hiBit && "loBit greater than hiBit")((void)0); |
| 614 | APInt Res(numBits, 0); |
| 615 | Res.setBits(loBit, hiBit); |
| 616 | return Res; |
| 617 | } |
| 618 | |
| 619 | /// Wrap version of getBitsSet. |
| 620 | /// If \p hiBit is bigger than \p loBit, this is same with getBitsSet. |
| 621 | /// If \p hiBit is not bigger than \p loBit, the set bits "wrap". For example, |
| 622 | /// with parameters (32, 28, 4), you would get 0xF000000F. |
| 623 | /// If \p hiBit is equal to \p loBit, you would get a result with all bits |
| 624 | /// set. |
| 625 | static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, |
| 626 | unsigned hiBit) { |
| 627 | APInt Res(numBits, 0); |
| 628 | Res.setBitsWithWrap(loBit, hiBit); |
| 629 | return Res; |
| 630 | } |
| 631 | |
| 632 | /// Get a value with upper bits starting at loBit set. |
| 633 | /// |
| 634 | /// Constructs an APInt value that has a contiguous range of bits set. The |
| 635 | /// bits from loBit (inclusive) to numBits (exclusive) will be set. All other |
| 636 | /// bits will be zero. For example, with parameters(32, 12) you would get |
| 637 | /// 0xFFFFF000. |
| 638 | /// |
| 639 | /// \param numBits the intended bit width of the result |
| 640 | /// \param loBit the index of the lowest bit to set. |
| 641 | /// |
| 642 | /// \returns An APInt value with the requested bits set. |
| 643 | static APInt getBitsSetFrom(unsigned numBits, unsigned loBit) { |
| 644 | APInt Res(numBits, 0); |
| 645 | Res.setBitsFrom(loBit); |
| 646 | return Res; |
| 647 | } |
| 648 | |
| 649 | /// Get a value with high bits set |
| 650 | /// |
| 651 | /// Constructs an APInt value that has the top hiBitsSet bits set. |
| 652 | /// |
| 653 | /// \param numBits the bitwidth of the result |
| 654 | /// \param hiBitsSet the number of high-order bits set in the result. |
| 655 | static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) { |
| 656 | APInt Res(numBits, 0); |
| 657 | Res.setHighBits(hiBitsSet); |
| 658 | return Res; |
| 659 | } |
| 660 | |
| 661 | /// Get a value with low bits set |
| 662 | /// |
| 663 | /// Constructs an APInt value that has the bottom loBitsSet bits set. |
| 664 | /// |
| 665 | /// \param numBits the bitwidth of the result |
| 666 | /// \param loBitsSet the number of low-order bits set in the result. |
| 667 | static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) { |
| 668 | APInt Res(numBits, 0); |
| 669 | Res.setLowBits(loBitsSet); |
| 670 | return Res; |
| 671 | } |
| 672 | |
| 673 | /// Return a value containing V broadcasted over NewLen bits. |
| 674 | static APInt getSplat(unsigned NewLen, const APInt &V); |
| 675 | |
| 676 | /// Determine if two APInts have the same value, after zero-extending |
| 677 | /// one of them (if needed!) to ensure that the bit-widths match. |
| 678 | static bool isSameValue(const APInt &I1, const APInt &I2) { |
| 679 | if (I1.getBitWidth() == I2.getBitWidth()) |
| 680 | return I1 == I2; |
| 681 | |
| 682 | if (I1.getBitWidth() > I2.getBitWidth()) |
| 683 | return I1 == I2.zext(I1.getBitWidth()); |
| 684 | |
| 685 | return I1.zext(I2.getBitWidth()) == I2; |
| 686 | } |
| 687 | |
| 688 | /// Overload to compute a hash_code for an APInt value. |
| 689 | friend hash_code hash_value(const APInt &Arg); |
| 690 | |
| 691 | /// This function returns a pointer to the internal storage of the APInt. |
| 692 | /// This is useful for writing out the APInt in binary form without any |
| 693 | /// conversions. |
| 694 | const uint64_t *getRawData() const { |
| 695 | if (isSingleWord()) |
| 696 | return &U.VAL; |
| 697 | return &U.pVal[0]; |
| 698 | } |
| 699 | |
| 700 | /// @} |
| 701 | /// \name Unary Operators |
| 702 | /// @{ |
| 703 | |
| 704 | /// Postfix increment operator. |
| 705 | /// |
| 706 | /// Increments *this by 1. |
| 707 | /// |
| 708 | /// \returns a new APInt value representing the original value of *this. |
| 709 | const APInt operator++(int) { |
| 710 | APInt API(*this); |
| 711 | ++(*this); |
| 712 | return API; |
| 713 | } |
| 714 | |
| 715 | /// Prefix increment operator. |
| 716 | /// |
| 717 | /// \returns *this incremented by one |
| 718 | APInt &operator++(); |
| 719 | |
| 720 | /// Postfix decrement operator. |
| 721 | /// |
| 722 | /// Decrements *this by 1. |
| 723 | /// |
| 724 | /// \returns a new APInt value representing the original value of *this. |
| 725 | const APInt operator--(int) { |
| 726 | APInt API(*this); |
| 727 | --(*this); |
| 728 | return API; |
| 729 | } |
| 730 | |
| 731 | /// Prefix decrement operator. |
| 732 | /// |
| 733 | /// \returns *this decremented by one. |
| 734 | APInt &operator--(); |
| 735 | |
| 736 | /// Logical negation operator. |
| 737 | /// |
| 738 | /// Performs logical negation operation on this APInt. |
| 739 | /// |
| 740 | /// \returns true if *this is zero, false otherwise. |
| 741 | bool operator!() const { |
| 742 | if (isSingleWord()) |
| 743 | return U.VAL == 0; |
| 744 | return countLeadingZerosSlowCase() == BitWidth; |
| 745 | } |
| 746 | |
| 747 | /// @} |
| 748 | /// \name Assignment Operators |
| 749 | /// @{ |
| 750 | |
| 751 | /// Copy assignment operator. |
| 752 | /// |
| 753 | /// \returns *this after assignment of RHS. |
| 754 | APInt &operator=(const APInt &RHS) { |
| 755 | // If the bitwidths are the same, we can avoid mucking with memory |
| 756 | if (isSingleWord() && RHS.isSingleWord()) { |
| 757 | U.VAL = RHS.U.VAL; |
| 758 | BitWidth = RHS.BitWidth; |
| 759 | return clearUnusedBits(); |
| 760 | } |
| 761 | |
| 762 | AssignSlowCase(RHS); |
| 763 | return *this; |
| 764 | } |
| 765 | |
| 766 | /// Move assignment operator. |
| 767 | APInt &operator=(APInt &&that) { |
| 768 | #ifdef EXPENSIVE_CHECKS |
| 769 | // Some std::shuffle implementations still do self-assignment. |
| 770 | if (this == &that) |
| 771 | return *this; |
| 772 | #endif |
| 773 | assert(this != &that && "Self-move not supported")((void)0); |
| 774 | if (!isSingleWord()) |
| 775 | delete[] U.pVal; |
| 776 | |
| 777 | // Use memcpy so that type based alias analysis sees both VAL and pVal |
| 778 | // as modified. |
| 779 | memcpy(&U, &that.U, sizeof(U)); |
| 780 | |
| 781 | BitWidth = that.BitWidth; |
| 782 | that.BitWidth = 0; |
| 783 | |
| 784 | return *this; |
| 785 | } |
| 786 | |
| 787 | /// Assignment operator. |
| 788 | /// |
| 789 | /// The RHS value is assigned to *this. If the significant bits in RHS exceed |
| 790 | /// the bit width, the excess bits are truncated. If the bit width is larger |
| 791 | /// than 64, the value is zero filled in the unspecified high order bits. |
| 792 | /// |
| 793 | /// \returns *this after assignment of RHS value. |
| 794 | APInt &operator=(uint64_t RHS) { |
| 795 | if (isSingleWord()) { |
| 796 | U.VAL = RHS; |
| 797 | return clearUnusedBits(); |
| 798 | } |
| 799 | U.pVal[0] = RHS; |
| 800 | memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE); |
| 801 | return *this; |
| 802 | } |
| 803 | |
| 804 | /// Bitwise AND assignment operator. |
| 805 | /// |
| 806 | /// Performs a bitwise AND operation on this APInt and RHS. The result is |
| 807 | /// assigned to *this. |
| 808 | /// |
| 809 | /// \returns *this after ANDing with RHS. |
| 810 | APInt &operator&=(const APInt &RHS) { |
| 811 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0); |
| 812 | if (isSingleWord()) |
| 813 | U.VAL &= RHS.U.VAL; |
| 814 | else |
| 815 | AndAssignSlowCase(RHS); |
| 816 | return *this; |
| 817 | } |
| 818 | |
| 819 | /// Bitwise AND assignment operator. |
| 820 | /// |
| 821 | /// Performs a bitwise AND operation on this APInt and RHS. RHS is |
| 822 | /// logically zero-extended or truncated to match the bit-width of |
| 823 | /// the LHS. |
| 824 | APInt &operator&=(uint64_t RHS) { |
| 825 | if (isSingleWord()) { |
| 826 | U.VAL &= RHS; |
| 827 | return *this; |
| 828 | } |
| 829 | U.pVal[0] &= RHS; |
| 830 | memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE); |
| 831 | return *this; |
| 832 | } |
| 833 | |
| 834 | /// Bitwise OR assignment operator. |
| 835 | /// |
| 836 | /// Performs a bitwise OR operation on this APInt and RHS. The result is |
| 837 | /// assigned *this; |
| 838 | /// |
| 839 | /// \returns *this after ORing with RHS. |
| 840 | APInt &operator|=(const APInt &RHS) { |
| 841 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0); |
| 842 | if (isSingleWord()) |
| 843 | U.VAL |= RHS.U.VAL; |
| 844 | else |
| 845 | OrAssignSlowCase(RHS); |
| 846 | return *this; |
| 847 | } |
| 848 | |
| 849 | /// Bitwise OR assignment operator. |
| 850 | /// |
| 851 | /// Performs a bitwise OR operation on this APInt and RHS. RHS is |
| 852 | /// logically zero-extended or truncated to match the bit-width of |
| 853 | /// the LHS. |
| 854 | APInt &operator|=(uint64_t RHS) { |
| 855 | if (isSingleWord()) { |
| 856 | U.VAL |= RHS; |
| 857 | return clearUnusedBits(); |
| 858 | } |
| 859 | U.pVal[0] |= RHS; |
| 860 | return *this; |
| 861 | } |
| 862 | |
| 863 | /// Bitwise XOR assignment operator. |
| 864 | /// |
| 865 | /// Performs a bitwise XOR operation on this APInt and RHS. The result is |
| 866 | /// assigned to *this. |
| 867 | /// |
| 868 | /// \returns *this after XORing with RHS. |
| 869 | APInt &operator^=(const APInt &RHS) { |
| 870 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0); |
| 871 | if (isSingleWord()) |
| 872 | U.VAL ^= RHS.U.VAL; |
| 873 | else |
| 874 | XorAssignSlowCase(RHS); |
| 875 | return *this; |
| 876 | } |
| 877 | |
| 878 | /// Bitwise XOR assignment operator. |
| 879 | /// |
| 880 | /// Performs a bitwise XOR operation on this APInt and RHS. RHS is |
| 881 | /// logically zero-extended or truncated to match the bit-width of |
| 882 | /// the LHS. |
| 883 | APInt &operator^=(uint64_t RHS) { |
| 884 | if (isSingleWord()) { |
| 885 | U.VAL ^= RHS; |
| 886 | return clearUnusedBits(); |
| 887 | } |
| 888 | U.pVal[0] ^= RHS; |
| 889 | return *this; |
| 890 | } |
| 891 | |
| 892 | /// Multiplication assignment operator. |
| 893 | /// |
| 894 | /// Multiplies this APInt by RHS and assigns the result to *this. |
| 895 | /// |
| 896 | /// \returns *this |
| 897 | APInt &operator*=(const APInt &RHS); |
| 898 | APInt &operator*=(uint64_t RHS); |
| 899 | |
| 900 | /// Addition assignment operator. |
| 901 | /// |
| 902 | /// Adds RHS to *this and assigns the result to *this. |
| 903 | /// |
| 904 | /// \returns *this |
| 905 | APInt &operator+=(const APInt &RHS); |
| 906 | APInt &operator+=(uint64_t RHS); |
| 907 | |
| 908 | /// Subtraction assignment operator. |
| 909 | /// |
| 910 | /// Subtracts RHS from *this and assigns the result to *this. |
| 911 | /// |
| 912 | /// \returns *this |
| 913 | APInt &operator-=(const APInt &RHS); |
| 914 | APInt &operator-=(uint64_t RHS); |
| 915 | |
| 916 | /// Left-shift assignment function. |
| 917 | /// |
| 918 | /// Shifts *this left by shiftAmt and assigns the result to *this. |
| 919 | /// |
| 920 | /// \returns *this after shifting left by ShiftAmt |
| 921 | APInt &operator<<=(unsigned ShiftAmt) { |
| 922 | assert(ShiftAmt <= BitWidth && "Invalid shift amount")((void)0); |
| 923 | if (isSingleWord()) { |
| 924 | if (ShiftAmt == BitWidth) |
| 925 | U.VAL = 0; |
| 926 | else |
| 927 | U.VAL <<= ShiftAmt; |
| 928 | return clearUnusedBits(); |
| 929 | } |
| 930 | shlSlowCase(ShiftAmt); |
| 931 | return *this; |
| 932 | } |
| 933 | |
| 934 | /// Left-shift assignment function. |
| 935 | /// |
| 936 | /// Shifts *this left by shiftAmt and assigns the result to *this. |
| 937 | /// |
| 938 | /// \returns *this after shifting left by ShiftAmt |
| 939 | APInt &operator<<=(const APInt &ShiftAmt); |
| 940 | |
| 941 | /// @} |
| 942 | /// \name Binary Operators |
| 943 | /// @{ |
| 944 | |
| 945 | /// Multiplication operator. |
| 946 | /// |
| 947 | /// Multiplies this APInt by RHS and returns the result. |
| 948 | APInt operator*(const APInt &RHS) const; |
| 949 | |
| 950 | /// Left logical shift operator. |
| 951 | /// |
| 952 | /// Shifts this APInt left by \p Bits and returns the result. |
| 953 | APInt operator<<(unsigned Bits) const { return shl(Bits); } |
| 954 | |
| 955 | /// Left logical shift operator. |
| 956 | /// |
| 957 | /// Shifts this APInt left by \p Bits and returns the result. |
| 958 | APInt operator<<(const APInt &Bits) const { return shl(Bits); } |
| 959 | |
| 960 | /// Arithmetic right-shift function. |
| 961 | /// |
| 962 | /// Arithmetic right-shift this APInt by shiftAmt. |
| 963 | APInt ashr(unsigned ShiftAmt) const { |
| 964 | APInt R(*this); |
| 965 | R.ashrInPlace(ShiftAmt); |
| 966 | return R; |
| 967 | } |
| 968 | |
| 969 | /// Arithmetic right-shift this APInt by ShiftAmt in place. |
| 970 | void ashrInPlace(unsigned ShiftAmt) { |
| 971 | assert(ShiftAmt <= BitWidth && "Invalid shift amount")((void)0); |
| 972 | if (isSingleWord()) { |
| 973 | int64_t SExtVAL = SignExtend64(U.VAL, BitWidth); |
| 974 | if (ShiftAmt == BitWidth) |
| 975 | U.VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); // Fill with sign bit. |
| 976 | else |
| 977 | U.VAL = SExtVAL >> ShiftAmt; |
| 978 | clearUnusedBits(); |
| 979 | return; |
| 980 | } |
| 981 | ashrSlowCase(ShiftAmt); |
| 982 | } |
| 983 | |
| 984 | /// Logical right-shift function. |
| 985 | /// |
| 986 | /// Logical right-shift this APInt by shiftAmt. |
| 987 | APInt lshr(unsigned shiftAmt) const { |
| 988 | APInt R(*this); |
| 989 | R.lshrInPlace(shiftAmt); |
| 990 | return R; |
| 991 | } |
| 992 | |
| 993 | /// Logical right-shift this APInt by ShiftAmt in place. |
| 994 | void lshrInPlace(unsigned ShiftAmt) { |
| 995 | assert(ShiftAmt <= BitWidth && "Invalid shift amount")((void)0); |
| 996 | if (isSingleWord()) { |
| 997 | if (ShiftAmt == BitWidth) |
| 998 | U.VAL = 0; |
| 999 | else |
| 1000 | U.VAL >>= ShiftAmt; |
| 1001 | return; |
| 1002 | } |
| 1003 | lshrSlowCase(ShiftAmt); |
| 1004 | } |
| 1005 | |
| 1006 | /// Left-shift function. |
| 1007 | /// |
| 1008 | /// Left-shift this APInt by shiftAmt. |
| 1009 | APInt shl(unsigned shiftAmt) const { |
| 1010 | APInt R(*this); |
| 1011 | R <<= shiftAmt; |
| 1012 | return R; |
| 1013 | } |
| 1014 | |
| 1015 | /// Rotate left by rotateAmt. |
| 1016 | APInt rotl(unsigned rotateAmt) const; |
| 1017 | |
| 1018 | /// Rotate right by rotateAmt. |
| 1019 | APInt rotr(unsigned rotateAmt) const; |
| 1020 | |
| 1021 | /// Arithmetic right-shift function. |
| 1022 | /// |
| 1023 | /// Arithmetic right-shift this APInt by shiftAmt. |
| 1024 | APInt ashr(const APInt &ShiftAmt) const { |
| 1025 | APInt R(*this); |
| 1026 | R.ashrInPlace(ShiftAmt); |
| 1027 | return R; |
| 1028 | } |
| 1029 | |
| 1030 | /// Arithmetic right-shift this APInt by shiftAmt in place. |
| 1031 | void ashrInPlace(const APInt &shiftAmt); |
| 1032 | |
| 1033 | /// Logical right-shift function. |
| 1034 | /// |
| 1035 | /// Logical right-shift this APInt by shiftAmt. |
| 1036 | APInt lshr(const APInt &ShiftAmt) const { |
| 1037 | APInt R(*this); |
| 1038 | R.lshrInPlace(ShiftAmt); |
| 1039 | return R; |
| 1040 | } |
| 1041 | |
| 1042 | /// Logical right-shift this APInt by ShiftAmt in place. |
| 1043 | void lshrInPlace(const APInt &ShiftAmt); |
| 1044 | |
| 1045 | /// Left-shift function. |
| 1046 | /// |
| 1047 | /// Left-shift this APInt by shiftAmt. |
| 1048 | APInt shl(const APInt &ShiftAmt) const { |
| 1049 | APInt R(*this); |
| 1050 | R <<= ShiftAmt; |
| 1051 | return R; |
| 1052 | } |
| 1053 | |
| 1054 | /// Rotate left by rotateAmt. |
| 1055 | APInt rotl(const APInt &rotateAmt) const; |
| 1056 | |
| 1057 | /// Rotate right by rotateAmt. |
| 1058 | APInt rotr(const APInt &rotateAmt) const; |
| 1059 | |
| 1060 | /// Unsigned division operation. |
| 1061 | /// |
| 1062 | /// Perform an unsigned divide operation on this APInt by RHS. Both this and |
| 1063 | /// RHS are treated as unsigned quantities for purposes of this division. |
| 1064 | /// |
| 1065 | /// \returns a new APInt value containing the division result, rounded towards |
| 1066 | /// zero. |
| 1067 | APInt udiv(const APInt &RHS) const; |
| 1068 | APInt udiv(uint64_t RHS) const; |
| 1069 | |
| 1070 | /// Signed division function for APInt. |
| 1071 | /// |
| 1072 | /// Signed divide this APInt by APInt RHS. |
| 1073 | /// |
| 1074 | /// The result is rounded towards zero. |
| 1075 | APInt sdiv(const APInt &RHS) const; |
| 1076 | APInt sdiv(int64_t RHS) const; |
| 1077 | |
| 1078 | /// Unsigned remainder operation. |
| 1079 | /// |
| 1080 | /// Perform an unsigned remainder operation on this APInt with RHS being the |
| 1081 | /// divisor. Both this and RHS are treated as unsigned quantities for purposes |
| 1082 | /// of this operation. Note that this is a true remainder operation and not a |
| 1083 | /// modulo operation because the sign follows the sign of the dividend which |
| 1084 | /// is *this. |
| 1085 | /// |
| 1086 | /// \returns a new APInt value containing the remainder result |
| 1087 | APInt urem(const APInt &RHS) const; |
| 1088 | uint64_t urem(uint64_t RHS) const; |
| 1089 | |
| 1090 | /// Function for signed remainder operation. |
| 1091 | /// |
| 1092 | /// Signed remainder operation on APInt. |
| 1093 | APInt srem(const APInt &RHS) const; |
| 1094 | int64_t srem(int64_t RHS) const; |
| 1095 | |
| 1096 | /// Dual division/remainder interface. |
| 1097 | /// |
| 1098 | /// Sometimes it is convenient to divide two APInt values and obtain both the |
| 1099 | /// quotient and remainder. This function does both operations in the same |
| 1100 | /// computation making it a little more efficient. The pair of input arguments |
| 1101 | /// may overlap with the pair of output arguments. It is safe to call |
| 1102 | /// udivrem(X, Y, X, Y), for example. |
| 1103 | static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, |
| 1104 | APInt &Remainder); |
| 1105 | static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient, |
| 1106 | uint64_t &Remainder); |
| 1107 | |
| 1108 | static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, |
| 1109 | APInt &Remainder); |
| 1110 | static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient, |
| 1111 | int64_t &Remainder); |
| 1112 | |
| 1113 | // Operations that return overflow indicators. |
| 1114 | APInt sadd_ov(const APInt &RHS, bool &Overflow) const; |
| 1115 | APInt uadd_ov(const APInt &RHS, bool &Overflow) const; |
| 1116 | APInt ssub_ov(const APInt &RHS, bool &Overflow) const; |
| 1117 | APInt usub_ov(const APInt &RHS, bool &Overflow) const; |
| 1118 | APInt sdiv_ov(const APInt &RHS, bool &Overflow) const; |
| 1119 | APInt smul_ov(const APInt &RHS, bool &Overflow) const; |
| 1120 | APInt umul_ov(const APInt &RHS, bool &Overflow) const; |
| 1121 | APInt sshl_ov(const APInt &Amt, bool &Overflow) const; |
| 1122 | APInt ushl_ov(const APInt &Amt, bool &Overflow) const; |
| 1123 | |
| 1124 | // Operations that saturate |
| 1125 | APInt sadd_sat(const APInt &RHS) const; |
| 1126 | APInt uadd_sat(const APInt &RHS) const; |
| 1127 | APInt ssub_sat(const APInt &RHS) const; |
| 1128 | APInt usub_sat(const APInt &RHS) const; |
| 1129 | APInt smul_sat(const APInt &RHS) const; |
| 1130 | APInt umul_sat(const APInt &RHS) const; |
| 1131 | APInt sshl_sat(const APInt &RHS) const; |
| 1132 | APInt ushl_sat(const APInt &RHS) const; |
| 1133 | |
| 1134 | /// Array-indexing support. |
| 1135 | /// |
| 1136 | /// \returns the bit value at bitPosition |
| 1137 | bool operator[](unsigned bitPosition) const { |
| 1138 | assert(bitPosition < getBitWidth() && "Bit position out of bounds!")((void)0); |
| 1139 | return (maskBit(bitPosition) & getWord(bitPosition)) != 0; |
| 1140 | } |
| 1141 | |
| 1142 | /// @} |
| 1143 | /// \name Comparison Operators |
| 1144 | /// @{ |
| 1145 | |
| 1146 | /// Equality operator. |
| 1147 | /// |
| 1148 | /// Compares this APInt with RHS for the validity of the equality |
| 1149 | /// relationship. |
| 1150 | bool operator==(const APInt &RHS) const { |
| 1151 | assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths")((void)0); |
| 1152 | if (isSingleWord()) |
| 1153 | return U.VAL == RHS.U.VAL; |
| 1154 | return EqualSlowCase(RHS); |
| 1155 | } |
| 1156 | |
| 1157 | /// Equality operator. |
| 1158 | /// |
| 1159 | /// Compares this APInt with a uint64_t for the validity of the equality |
| 1160 | /// relationship. |
| 1161 | /// |
| 1162 | /// \returns true if *this == Val |
| 1163 | bool operator==(uint64_t Val) const { |
| 1164 | return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val; |
| 1165 | } |
| 1166 | |
| 1167 | /// Equality comparison. |
| 1168 | /// |
| 1169 | /// Compares this APInt with RHS for the validity of the equality |
| 1170 | /// relationship. |
| 1171 | /// |
| 1172 | /// \returns true if *this == Val |
| 1173 | bool eq(const APInt &RHS) const { return (*this) == RHS; } |
| 1174 | |
| 1175 | /// Inequality operator. |
| 1176 | /// |
| 1177 | /// Compares this APInt with RHS for the validity of the inequality |
| 1178 | /// relationship. |
| 1179 | /// |
| 1180 | /// \returns true if *this != Val |
| 1181 | bool operator!=(const APInt &RHS) const { return !((*this) == RHS); } |
| 1182 | |
| 1183 | /// Inequality operator. |
| 1184 | /// |
| 1185 | /// Compares this APInt with a uint64_t for the validity of the inequality |
| 1186 | /// relationship. |
| 1187 | /// |
| 1188 | /// \returns true if *this != Val |
| 1189 | bool operator!=(uint64_t Val) const { return !((*this) == Val); } |
| 1190 | |
| 1191 | /// Inequality comparison |
| 1192 | /// |
| 1193 | /// Compares this APInt with RHS for the validity of the inequality |
| 1194 | /// relationship. |
| 1195 | /// |
| 1196 | /// \returns true if *this != Val |
| 1197 | bool ne(const APInt &RHS) const { return !((*this) == RHS); } |
| 1198 | |
| 1199 | /// Unsigned less than comparison |
| 1200 | /// |
| 1201 | /// Regards both *this and RHS as unsigned quantities and compares them for |
| 1202 | /// the validity of the less-than relationship. |
| 1203 | /// |
| 1204 | /// \returns true if *this < RHS when both are considered unsigned. |
| 1205 | bool ult(const APInt &RHS) const { return compare(RHS) < 0; } |
| 1206 | |
| 1207 | /// Unsigned less than comparison |
| 1208 | /// |
| 1209 | /// Regards both *this as an unsigned quantity and compares it with RHS for |
| 1210 | /// the validity of the less-than relationship. |
| 1211 | /// |
| 1212 | /// \returns true if *this < RHS when considered unsigned. |
| 1213 | bool ult(uint64_t RHS) const { |
| 1214 | // Only need to check active bits if not a single word. |
| 1215 | return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS; |
| 1216 | } |
| 1217 | |
| 1218 | /// Signed less than comparison |
| 1219 | /// |
| 1220 | /// Regards both *this and RHS as signed quantities and compares them for |
| 1221 | /// validity of the less-than relationship. |
| 1222 | /// |
| 1223 | /// \returns true if *this < RHS when both are considered signed. |
| 1224 | bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; } |
| 1225 | |
| 1226 | /// Signed less than comparison |
| 1227 | /// |
| 1228 | /// Regards both *this as a signed quantity and compares it with RHS for |
| 1229 | /// the validity of the less-than relationship. |
| 1230 | /// |
| 1231 | /// \returns true if *this < RHS when considered signed. |
| 1232 | bool slt(int64_t RHS) const { |
| 1233 | return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative() |
| 1234 | : getSExtValue() < RHS; |
| 1235 | } |
| 1236 | |
| 1237 | /// Unsigned less or equal comparison |
| 1238 | /// |
| 1239 | /// Regards both *this and RHS as unsigned quantities and compares them for |
| 1240 | /// validity of the less-or-equal relationship. |
| 1241 | /// |
| 1242 | /// \returns true if *this <= RHS when both are considered unsigned. |
| 1243 | bool ule(const APInt &RHS) const { return compare(RHS) <= 0; } |
| 1244 | |
| 1245 | /// Unsigned less or equal comparison |
| 1246 | /// |
| 1247 | /// Regards both *this as an unsigned quantity and compares it with RHS for |
| 1248 | /// the validity of the less-or-equal relationship. |
| 1249 | /// |
| 1250 | /// \returns true if *this <= RHS when considered unsigned. |
| 1251 | bool ule(uint64_t RHS) const { return !ugt(RHS); } |
| 1252 | |
| 1253 | /// Signed less or equal comparison |
| 1254 | /// |
| 1255 | /// Regards both *this and RHS as signed quantities and compares them for |
| 1256 | /// validity of the less-or-equal relationship. |
| 1257 | /// |
| 1258 | /// \returns true if *this <= RHS when both are considered signed. |
| 1259 | bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; } |
| 1260 | |
| 1261 | /// Signed less or equal comparison |
| 1262 | /// |
| 1263 | /// Regards both *this as a signed quantity and compares it with RHS for the |
| 1264 | /// validity of the less-or-equal relationship. |
| 1265 | /// |
| 1266 | /// \returns true if *this <= RHS when considered signed. |
| 1267 | bool sle(uint64_t RHS) const { return !sgt(RHS); } |
| 1268 | |
| 1269 | /// Unsigned greater than comparison |
| 1270 | /// |
| 1271 | /// Regards both *this and RHS as unsigned quantities and compares them for |
| 1272 | /// the validity of the greater-than relationship. |
| 1273 | /// |
| 1274 | /// \returns true if *this > RHS when both are considered unsigned. |
| 1275 | bool ugt(const APInt &RHS) const { return !ule(RHS); } |
| 1276 | |
| 1277 | /// Unsigned greater than comparison |
| 1278 | /// |
| 1279 | /// Regards both *this as an unsigned quantity and compares it with RHS for |
| 1280 | /// the validity of the greater-than relationship. |
| 1281 | /// |
| 1282 | /// \returns true if *this > RHS when considered unsigned. |
| 1283 | bool ugt(uint64_t RHS) const { |
| 1284 | // Only need to check active bits if not a single word. |
| 1285 | return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS; |
| 1286 | } |
| 1287 | |
| 1288 | /// Signed greater than comparison |
| 1289 | /// |
| 1290 | /// Regards both *this and RHS as signed quantities and compares them for the |
| 1291 | /// validity of the greater-than relationship. |
| 1292 | /// |
| 1293 | /// \returns true if *this > RHS when both are considered signed. |
| 1294 | bool sgt(const APInt &RHS) const { return !sle(RHS); } |
| 1295 | |
| 1296 | /// Signed greater than comparison |
| 1297 | /// |
| 1298 | /// Regards both *this as a signed quantity and compares it with RHS for |
| 1299 | /// the validity of the greater-than relationship. |
| 1300 | /// |
| 1301 | /// \returns true if *this > RHS when considered signed. |
| 1302 | bool sgt(int64_t RHS) const { |
| 1303 | return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative() |
| 1304 | : getSExtValue() > RHS; |
| 1305 | } |
| 1306 | |
| 1307 | /// Unsigned greater or equal comparison |
| 1308 | /// |
| 1309 | /// Regards both *this and RHS as unsigned quantities and compares them for |
| 1310 | /// validity of the greater-or-equal relationship. |
| 1311 | /// |
| 1312 | /// \returns true if *this >= RHS when both are considered unsigned. |
| 1313 | bool uge(const APInt &RHS) const { return !ult(RHS); } |
| 1314 | |
| 1315 | /// Unsigned greater or equal comparison |
| 1316 | /// |
| 1317 | /// Regards both *this as an unsigned quantity and compares it with RHS for |
| 1318 | /// the validity of the greater-or-equal relationship. |
| 1319 | /// |
| 1320 | /// \returns true if *this >= RHS when considered unsigned. |
| 1321 | bool uge(uint64_t RHS) const { return !ult(RHS); } |
| 1322 | |
| 1323 | /// Signed greater or equal comparison |
| 1324 | /// |
| 1325 | /// Regards both *this and RHS as signed quantities and compares them for |
| 1326 | /// validity of the greater-or-equal relationship. |
| 1327 | /// |
| 1328 | /// \returns true if *this >= RHS when both are considered signed. |
| 1329 | bool sge(const APInt &RHS) const { return !slt(RHS); } |
| 1330 | |
| 1331 | /// Signed greater or equal comparison |
| 1332 | /// |
| 1333 | /// Regards both *this as a signed quantity and compares it with RHS for |
| 1334 | /// the validity of the greater-or-equal relationship. |
| 1335 | /// |
| 1336 | /// \returns true if *this >= RHS when considered signed. |
| 1337 | bool sge(int64_t RHS) const { return !slt(RHS); } |
| 1338 | |
| 1339 | /// This operation tests if there are any pairs of corresponding bits |
| 1340 | /// between this APInt and RHS that are both set. |
| 1341 | bool intersects(const APInt &RHS) const { |
| 1342 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0); |
| 1343 | if (isSingleWord()) |
| 1344 | return (U.VAL & RHS.U.VAL) != 0; |
| 1345 | return intersectsSlowCase(RHS); |
| 1346 | } |
| 1347 | |
| 1348 | /// This operation checks that all bits set in this APInt are also set in RHS. |
| 1349 | bool isSubsetOf(const APInt &RHS) const { |
| 1350 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0); |
| 1351 | if (isSingleWord()) |
| 1352 | return (U.VAL & ~RHS.U.VAL) == 0; |
| 1353 | return isSubsetOfSlowCase(RHS); |
| 1354 | } |
| 1355 | |
| 1356 | /// @} |
| 1357 | /// \name Resizing Operators |
| 1358 | /// @{ |
| 1359 | |
| 1360 | /// Truncate to new width. |
| 1361 | /// |
| 1362 | /// Truncate the APInt to a specified width. It is an error to specify a width |
| 1363 | /// that is greater than or equal to the current width. |
| 1364 | APInt trunc(unsigned width) const; |
| 1365 | |
| 1366 | /// Truncate to new width with unsigned saturation. |
| 1367 | /// |
| 1368 | /// If the APInt, treated as unsigned integer, can be losslessly truncated to |
| 1369 | /// the new bitwidth, then return truncated APInt. Else, return max value. |
| 1370 | APInt truncUSat(unsigned width) const; |
| 1371 | |
| 1372 | /// Truncate to new width with signed saturation. |
| 1373 | /// |
| 1374 | /// If this APInt, treated as signed integer, can be losslessly truncated to |
| 1375 | /// the new bitwidth, then return truncated APInt. Else, return either |
| 1376 | /// signed min value if the APInt was negative, or signed max value. |
| 1377 | APInt truncSSat(unsigned width) const; |
| 1378 | |
| 1379 | /// Sign extend to a new width. |
| 1380 | /// |
| 1381 | /// This operation sign extends the APInt to a new width. If the high order |
| 1382 | /// bit is set, the fill on the left will be done with 1 bits, otherwise zero. |
| 1383 | /// It is an error to specify a width that is less than or equal to the |
| 1384 | /// current width. |
| 1385 | APInt sext(unsigned width) const; |
| 1386 | |
| 1387 | /// Zero extend to a new width. |
| 1388 | /// |
| 1389 | /// This operation zero extends the APInt to a new width. The high order bits |
| 1390 | /// are filled with 0 bits. It is an error to specify a width that is less |
| 1391 | /// than or equal to the current width. |
| 1392 | APInt zext(unsigned width) const; |
| 1393 | |
| 1394 | /// Sign extend or truncate to width |
| 1395 | /// |
| 1396 | /// Make this APInt have the bit width given by \p width. The value is sign |
| 1397 | /// extended, truncated, or left alone to make it that width. |
| 1398 | APInt sextOrTrunc(unsigned width) const; |
| 1399 | |
| 1400 | /// Zero extend or truncate to width |
| 1401 | /// |
| 1402 | /// Make this APInt have the bit width given by \p width. The value is zero |
| 1403 | /// extended, truncated, or left alone to make it that width. |
| 1404 | APInt zextOrTrunc(unsigned width) const; |
| 1405 | |
| 1406 | /// Truncate to width |
| 1407 | /// |
| 1408 | /// Make this APInt have the bit width given by \p width. The value is |
| 1409 | /// truncated or left alone to make it that width. |
| 1410 | APInt truncOrSelf(unsigned width) const; |
| 1411 | |
| 1412 | /// Sign extend or truncate to width |
| 1413 | /// |
| 1414 | /// Make this APInt have the bit width given by \p width. The value is sign |
| 1415 | /// extended, or left alone to make it that width. |
| 1416 | APInt sextOrSelf(unsigned width) const; |
| 1417 | |
| 1418 | /// Zero extend or truncate to width |
| 1419 | /// |
| 1420 | /// Make this APInt have the bit width given by \p width. The value is zero |
| 1421 | /// extended, or left alone to make it that width. |
| 1422 | APInt zextOrSelf(unsigned width) const; |
| 1423 | |
| 1424 | /// @} |
| 1425 | /// \name Bit Manipulation Operators |
| 1426 | /// @{ |
| 1427 | |
| 1428 | /// Set every bit to 1. |
| 1429 | void setAllBits() { |
| 1430 | if (isSingleWord()) |
| 1431 | U.VAL = WORDTYPE_MAX; |
| 1432 | else |
| 1433 | // Set all the bits in all the words. |
| 1434 | memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE); |
| 1435 | // Clear the unused ones |
| 1436 | clearUnusedBits(); |
| 1437 | } |
| 1438 | |
| 1439 | /// Set a given bit to 1. |
| 1440 | /// |
| 1441 | /// Set the given bit to 1 whose position is given as "bitPosition". |
| 1442 | void setBit(unsigned BitPosition) { |
| 1443 | assert(BitPosition < BitWidth && "BitPosition out of range")((void)0); |
| 1444 | WordType Mask = maskBit(BitPosition); |
| 1445 | if (isSingleWord()) |
| 1446 | U.VAL |= Mask; |
| 1447 | else |
| 1448 | U.pVal[whichWord(BitPosition)] |= Mask; |
| 1449 | } |
| 1450 | |
| 1451 | /// Set the sign bit to 1. |
| 1452 | void setSignBit() { |
| 1453 | setBit(BitWidth - 1); |
| 1454 | } |
| 1455 | |
| 1456 | /// Set a given bit to a given value. |
| 1457 | void setBitVal(unsigned BitPosition, bool BitValue) { |
| 1458 | if (BitValue) |
| 1459 | setBit(BitPosition); |
| 1460 | else |
| 1461 | clearBit(BitPosition); |
| 1462 | } |
| 1463 | |
| 1464 | /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1. |
| 1465 | /// This function handles "wrap" case when \p loBit >= \p hiBit, and calls |
| 1466 | /// setBits when \p loBit < \p hiBit. |
| 1467 | /// For \p loBit == \p hiBit wrap case, set every bit to 1. |
| 1468 | void setBitsWithWrap(unsigned loBit, unsigned hiBit) { |
| 1469 | assert(hiBit <= BitWidth && "hiBit out of range")((void)0); |
| 1470 | assert(loBit <= BitWidth && "loBit out of range")((void)0); |
| 1471 | if (loBit < hiBit) { |
| 1472 | setBits(loBit, hiBit); |
| 1473 | return; |
| 1474 | } |
| 1475 | setLowBits(hiBit); |
| 1476 | setHighBits(BitWidth - loBit); |
| 1477 | } |
| 1478 | |
| 1479 | /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1. |
| 1480 | /// This function handles case when \p loBit <= \p hiBit. |
| 1481 | void setBits(unsigned loBit, unsigned hiBit) { |
| 1482 | assert(hiBit <= BitWidth && "hiBit out of range")((void)0); |
| 1483 | assert(loBit <= BitWidth && "loBit out of range")((void)0); |
| 1484 | assert(loBit <= hiBit && "loBit greater than hiBit")((void)0); |
| 1485 | if (loBit == hiBit) |
| 1486 | return; |
| 1487 | if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) { |
| 1488 | uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit)); |
| 1489 | mask <<= loBit; |
| 1490 | if (isSingleWord()) |
| 1491 | U.VAL |= mask; |
| 1492 | else |
| 1493 | U.pVal[0] |= mask; |
| 1494 | } else { |
| 1495 | setBitsSlowCase(loBit, hiBit); |
| 1496 | } |
| 1497 | } |
| 1498 | |
| 1499 | /// Set the top bits starting from loBit. |
| 1500 | void setBitsFrom(unsigned loBit) { |
| 1501 | return setBits(loBit, BitWidth); |
| 1502 | } |
| 1503 | |
| 1504 | /// Set the bottom loBits bits. |
| 1505 | void setLowBits(unsigned loBits) { |
| 1506 | return setBits(0, loBits); |
| 1507 | } |
| 1508 | |
| 1509 | /// Set the top hiBits bits. |
| 1510 | void setHighBits(unsigned hiBits) { |
| 1511 | return setBits(BitWidth - hiBits, BitWidth); |
| 1512 | } |
| 1513 | |
| 1514 | /// Set every bit to 0. |
| 1515 | void clearAllBits() { |
| 1516 | if (isSingleWord()) |
| 1517 | U.VAL = 0; |
| 1518 | else |
| 1519 | memset(U.pVal, 0, getNumWords() * APINT_WORD_SIZE); |
| 1520 | } |
| 1521 | |
| 1522 | /// Set a given bit to 0. |
| 1523 | /// |
| 1524 | /// Set the given bit to 0 whose position is given as "bitPosition". |
| 1525 | void clearBit(unsigned BitPosition) { |
| 1526 | assert(BitPosition < BitWidth && "BitPosition out of range")((void)0); |
| 1527 | WordType Mask = ~maskBit(BitPosition); |
| 1528 | if (isSingleWord()) |
| 1529 | U.VAL &= Mask; |
| 1530 | else |
| 1531 | U.pVal[whichWord(BitPosition)] &= Mask; |
| 1532 | } |
| 1533 | |
| 1534 | /// Set bottom loBits bits to 0. |
| 1535 | void clearLowBits(unsigned loBits) { |
| 1536 | assert(loBits <= BitWidth && "More bits than bitwidth")((void)0); |
| 1537 | APInt Keep = getHighBitsSet(BitWidth, BitWidth - loBits); |
| 1538 | *this &= Keep; |
| 1539 | } |
| 1540 | |
| 1541 | /// Set the sign bit to 0. |
| 1542 | void clearSignBit() { |
| 1543 | clearBit(BitWidth - 1); |
| 1544 | } |
| 1545 | |
| 1546 | /// Toggle every bit to its opposite value. |
| 1547 | void flipAllBits() { |
| 1548 | if (isSingleWord()) { |
| 1549 | U.VAL ^= WORDTYPE_MAX; |
| 1550 | clearUnusedBits(); |
| 1551 | } else { |
| 1552 | flipAllBitsSlowCase(); |
| 1553 | } |
| 1554 | } |
| 1555 | |
| 1556 | /// Toggles a given bit to its opposite value. |
| 1557 | /// |
| 1558 | /// Toggle a given bit to its opposite value whose position is given |
| 1559 | /// as "bitPosition". |
| 1560 | void flipBit(unsigned bitPosition); |
| 1561 | |
| 1562 | /// Negate this APInt in place. |
| 1563 | void negate() { |
| 1564 | flipAllBits(); |
| 1565 | ++(*this); |
| 1566 | } |
| 1567 | |
| 1568 | /// Insert the bits from a smaller APInt starting at bitPosition. |
| 1569 | void insertBits(const APInt &SubBits, unsigned bitPosition); |
| 1570 | void insertBits(uint64_t SubBits, unsigned bitPosition, unsigned numBits); |
| 1571 | |
| 1572 | /// Return an APInt with the extracted bits [bitPosition,bitPosition+numBits). |
| 1573 | APInt extractBits(unsigned numBits, unsigned bitPosition) const; |
| 1574 | uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const; |
| 1575 | |
| 1576 | /// @} |
| 1577 | /// \name Value Characterization Functions |
| 1578 | /// @{ |
| 1579 | |
| 1580 | /// Return the number of bits in the APInt. |
| 1581 | unsigned getBitWidth() const { return BitWidth; } |
| 1582 | |
| 1583 | /// Get the number of words. |
| 1584 | /// |
| 1585 | /// Here one word's bitwidth equals to that of uint64_t. |
| 1586 | /// |
| 1587 | /// \returns the number of words to hold the integer value of this APInt. |
| 1588 | unsigned getNumWords() const { return getNumWords(BitWidth); } |
| 1589 | |
| 1590 | /// Get the number of words. |
| 1591 | /// |
| 1592 | /// *NOTE* Here one word's bitwidth equals to that of uint64_t. |
| 1593 | /// |
| 1594 | /// \returns the number of words to hold the integer value with a given bit |
| 1595 | /// width. |
| 1596 | static unsigned getNumWords(unsigned BitWidth) { |
| 1597 | return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD; |
| 1598 | } |
| 1599 | |
| 1600 | /// Compute the number of active bits in the value |
| 1601 | /// |
| 1602 | /// This function returns the number of active bits which is defined as the |
| 1603 | /// bit width minus the number of leading zeros. This is used in several |
| 1604 | /// computations to see how "wide" the value is. |
| 1605 | unsigned getActiveBits() const { return BitWidth - countLeadingZeros(); } |
| 1606 | |
| 1607 | /// Compute the number of active words in the value of this APInt. |
| 1608 | /// |
| 1609 | /// This is used in conjunction with getActiveData to extract the raw value of |
| 1610 | /// the APInt. |
| 1611 | unsigned getActiveWords() const { |
| 1612 | unsigned numActiveBits = getActiveBits(); |
| 1613 | return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1; |
| 1614 | } |
| 1615 | |
| 1616 | /// Get the minimum bit size for this signed APInt |
| 1617 | /// |
| 1618 | /// Computes the minimum bit width for this APInt while considering it to be a |
| 1619 | /// signed (and probably negative) value. If the value is not negative, this |
| 1620 | /// function returns the same value as getActiveBits()+1. Otherwise, it |
| 1621 | /// returns the smallest bit width that will retain the negative value. For |
| 1622 | /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so |
| 1623 | /// for -1, this function will always return 1. |
| 1624 | unsigned getMinSignedBits() const { return BitWidth - getNumSignBits() + 1; } |
| 1625 | |
| 1626 | /// Get zero extended value |
| 1627 | /// |
| 1628 | /// This method attempts to return the value of this APInt as a zero extended |
| 1629 | /// uint64_t. The bitwidth must be <= 64 or the value must fit within a |
| 1630 | /// uint64_t. Otherwise an assertion will result. |
| 1631 | uint64_t getZExtValue() const { |
| 1632 | if (isSingleWord()) |
| 1633 | return U.VAL; |
| 1634 | assert(getActiveBits() <= 64 && "Too many bits for uint64_t")((void)0); |
| 1635 | return U.pVal[0]; |
| 1636 | } |
| 1637 | |
| 1638 | /// Get sign extended value |
| 1639 | /// |
| 1640 | /// This method attempts to return the value of this APInt as a sign extended |
| 1641 | /// int64_t. The bit width must be <= 64 or the value must fit within an |
| 1642 | /// int64_t. Otherwise an assertion will result. |
| 1643 | int64_t getSExtValue() const { |
| 1644 | if (isSingleWord()) |
| 1645 | return SignExtend64(U.VAL, BitWidth); |
| 1646 | assert(getMinSignedBits() <= 64 && "Too many bits for int64_t")((void)0); |
| 1647 | return int64_t(U.pVal[0]); |
| 1648 | } |
| 1649 | |
| 1650 | /// Get bits required for string value. |
| 1651 | /// |
| 1652 | /// This method determines how many bits are required to hold the APInt |
| 1653 | /// equivalent of the string given by \p str. |
| 1654 | static unsigned getBitsNeeded(StringRef str, uint8_t radix); |
| 1655 | |
| 1656 | /// The APInt version of the countLeadingZeros functions in |
| 1657 | /// MathExtras.h. |
| 1658 | /// |
| 1659 | /// It counts the number of zeros from the most significant bit to the first |
| 1660 | /// one bit. |
| 1661 | /// |
| 1662 | /// \returns BitWidth if the value is zero, otherwise returns the number of |
| 1663 | /// zeros from the most significant bit to the first one bits. |
| 1664 | unsigned countLeadingZeros() const { |
| 1665 | if (isSingleWord()) { |
| 1666 | unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth; |
| 1667 | return llvm::countLeadingZeros(U.VAL) - unusedBits; |
| 1668 | } |
| 1669 | return countLeadingZerosSlowCase(); |
| 1670 | } |
| 1671 | |
| 1672 | /// Count the number of leading one bits. |
| 1673 | /// |
| 1674 | /// This function is an APInt version of the countLeadingOnes |
| 1675 | /// functions in MathExtras.h. It counts the number of ones from the most |
| 1676 | /// significant bit to the first zero bit. |
| 1677 | /// |
| 1678 | /// \returns 0 if the high order bit is not set, otherwise returns the number |
| 1679 | /// of 1 bits from the most significant to the least |
| 1680 | unsigned countLeadingOnes() const { |
| 1681 | if (isSingleWord()) |
| 1682 | return llvm::countLeadingOnes(U.VAL << (APINT_BITS_PER_WORD - BitWidth)); |
| 1683 | return countLeadingOnesSlowCase(); |
| 1684 | } |
| 1685 | |
| 1686 | /// Computes the number of leading bits of this APInt that are equal to its |
| 1687 | /// sign bit. |
| 1688 | unsigned getNumSignBits() const { |
| 1689 | return isNegative() ? countLeadingOnes() : countLeadingZeros(); |
| 1690 | } |
| 1691 | |
| 1692 | /// Count the number of trailing zero bits. |
| 1693 | /// |
| 1694 | /// This function is an APInt version of the countTrailingZeros |
| 1695 | /// functions in MathExtras.h. It counts the number of zeros from the least |
| 1696 | /// significant bit to the first set bit. |
| 1697 | /// |
| 1698 | /// \returns BitWidth if the value is zero, otherwise returns the number of |
| 1699 | /// zeros from the least significant bit to the first one bit. |
| 1700 | unsigned countTrailingZeros() const { |
| 1701 | if (isSingleWord()) { |
| 1702 | unsigned TrailingZeros = llvm::countTrailingZeros(U.VAL); |
| 1703 | return (TrailingZeros > BitWidth ? BitWidth : TrailingZeros); |
| 1704 | } |
| 1705 | return countTrailingZerosSlowCase(); |
| 1706 | } |
| 1707 | |
| 1708 | /// Count the number of trailing one bits. |
| 1709 | /// |
| 1710 | /// This function is an APInt version of the countTrailingOnes |
| 1711 | /// functions in MathExtras.h. It counts the number of ones from the least |
| 1712 | /// significant bit to the first zero bit. |
| 1713 | /// |
| 1714 | /// \returns BitWidth if the value is all ones, otherwise returns the number |
| 1715 | /// of ones from the least significant bit to the first zero bit. |
| 1716 | unsigned countTrailingOnes() const { |
| 1717 | if (isSingleWord()) |
| 1718 | return llvm::countTrailingOnes(U.VAL); |
| 1719 | return countTrailingOnesSlowCase(); |
| 1720 | } |
| 1721 | |
| 1722 | /// Count the number of bits set. |
| 1723 | /// |
| 1724 | /// This function is an APInt version of the countPopulation functions |
| 1725 | /// in MathExtras.h. It counts the number of 1 bits in the APInt value. |
| 1726 | /// |
| 1727 | /// \returns 0 if the value is zero, otherwise returns the number of set bits. |
| 1728 | unsigned countPopulation() const { |
| 1729 | if (isSingleWord()) |
| 1730 | return llvm::countPopulation(U.VAL); |
| 1731 | return countPopulationSlowCase(); |
| 1732 | } |
| 1733 | |
| 1734 | /// @} |
| 1735 | /// \name Conversion Functions |
| 1736 | /// @{ |
| 1737 | void print(raw_ostream &OS, bool isSigned) const; |
| 1738 | |
| 1739 | /// Converts an APInt to a string and append it to Str. Str is commonly a |
| 1740 | /// SmallString. |
| 1741 | void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed, |
| 1742 | bool formatAsCLiteral = false) const; |
| 1743 | |
| 1744 | /// Considers the APInt to be unsigned and converts it into a string in the |
| 1745 | /// radix given. The radix can be 2, 8, 10 16, or 36. |
| 1746 | void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { |
| 1747 | toString(Str, Radix, false, false); |
| 1748 | } |
| 1749 | |
| 1750 | /// Considers the APInt to be signed and converts it into a string in the |
| 1751 | /// radix given. The radix can be 2, 8, 10, 16, or 36. |
| 1752 | void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { |
| 1753 | toString(Str, Radix, true, false); |
| 1754 | } |
| 1755 | |
| 1756 | /// \returns a byte-swapped representation of this APInt Value. |
| 1757 | APInt byteSwap() const; |
| 1758 | |
| 1759 | /// \returns the value with the bit representation reversed of this APInt |
| 1760 | /// Value. |
| 1761 | APInt reverseBits() const; |
| 1762 | |
| 1763 | /// Converts this APInt to a double value. |
| 1764 | double roundToDouble(bool isSigned) const; |
| 1765 | |
| 1766 | /// Converts this unsigned APInt to a double value. |
| 1767 | double roundToDouble() const { return roundToDouble(false); } |
| 1768 | |
| 1769 | /// Converts this signed APInt to a double value. |
| 1770 | double signedRoundToDouble() const { return roundToDouble(true); } |
| 1771 | |
| 1772 | /// Converts APInt bits to a double |
| 1773 | /// |
| 1774 | /// The conversion does not do a translation from integer to double, it just |
| 1775 | /// re-interprets the bits as a double. Note that it is valid to do this on |
| 1776 | /// any bit width. Exactly 64 bits will be translated. |
| 1777 | double bitsToDouble() const { |
| 1778 | return BitsToDouble(getWord(0)); |
| 1779 | } |
| 1780 | |
| 1781 | /// Converts APInt bits to a float |
| 1782 | /// |
| 1783 | /// The conversion does not do a translation from integer to float, it just |
| 1784 | /// re-interprets the bits as a float. Note that it is valid to do this on |
| 1785 | /// any bit width. Exactly 32 bits will be translated. |
| 1786 | float bitsToFloat() const { |
| 1787 | return BitsToFloat(static_cast<uint32_t>(getWord(0))); |
| 1788 | } |
| 1789 | |
| 1790 | /// Converts a double to APInt bits. |
| 1791 | /// |
| 1792 | /// The conversion does not do a translation from double to integer, it just |
| 1793 | /// re-interprets the bits of the double. |
| 1794 | static APInt doubleToBits(double V) { |
| 1795 | return APInt(sizeof(double) * CHAR_BIT8, DoubleToBits(V)); |
| 1796 | } |
| 1797 | |
| 1798 | /// Converts a float to APInt bits. |
| 1799 | /// |
| 1800 | /// The conversion does not do a translation from float to integer, it just |
| 1801 | /// re-interprets the bits of the float. |
| 1802 | static APInt floatToBits(float V) { |
| 1803 | return APInt(sizeof(float) * CHAR_BIT8, FloatToBits(V)); |
| 1804 | } |
| 1805 | |
| 1806 | /// @} |
| 1807 | /// \name Mathematics Operations |
| 1808 | /// @{ |
| 1809 | |
| 1810 | /// \returns the floor log base 2 of this APInt. |
| 1811 | unsigned logBase2() const { return getActiveBits() - 1; } |
| 1812 | |
| 1813 | /// \returns the ceil log base 2 of this APInt. |
| 1814 | unsigned ceilLogBase2() const { |
| 1815 | APInt temp(*this); |
| 1816 | --temp; |
| 1817 | return temp.getActiveBits(); |
| 1818 | } |
| 1819 | |
| 1820 | /// \returns the nearest log base 2 of this APInt. Ties round up. |
| 1821 | /// |
| 1822 | /// NOTE: When we have a BitWidth of 1, we define: |
| 1823 | /// |
| 1824 | /// log2(0) = UINT32_MAX |
| 1825 | /// log2(1) = 0 |
| 1826 | /// |
| 1827 | /// to get around any mathematical concerns resulting from |
| 1828 | /// referencing 2 in a space where 2 does no exist. |
| 1829 | unsigned nearestLogBase2() const { |
| 1830 | // Special case when we have a bitwidth of 1. If VAL is 1, then we |
| 1831 | // get 0. If VAL is 0, we get WORDTYPE_MAX which gets truncated to |
| 1832 | // UINT32_MAX. |
| 1833 | if (BitWidth == 1) |
| 1834 | return U.VAL - 1; |
| 1835 | |
| 1836 | // Handle the zero case. |
| 1837 | if (isNullValue()) |
| 1838 | return UINT32_MAX0xffffffffU; |
| 1839 | |
| 1840 | // The non-zero case is handled by computing: |
| 1841 | // |
| 1842 | // nearestLogBase2(x) = logBase2(x) + x[logBase2(x)-1]. |
| 1843 | // |
| 1844 | // where x[i] is referring to the value of the ith bit of x. |
| 1845 | unsigned lg = logBase2(); |
| 1846 | return lg + unsigned((*this)[lg - 1]); |
| 1847 | } |
| 1848 | |
| 1849 | /// \returns the log base 2 of this APInt if its an exact power of two, -1 |
| 1850 | /// otherwise |
| 1851 | int32_t exactLogBase2() const { |
| 1852 | if (!isPowerOf2()) |
| 1853 | return -1; |
| 1854 | return logBase2(); |
| 1855 | } |
| 1856 | |
| 1857 | /// Compute the square root |
| 1858 | APInt sqrt() const; |
| 1859 | |
| 1860 | /// Get the absolute value; |
| 1861 | /// |
| 1862 | /// If *this is < 0 then return -(*this), otherwise *this; |
| 1863 | APInt abs() const { |
| 1864 | if (isNegative()) |
| 1865 | return -(*this); |
| 1866 | return *this; |
| 1867 | } |
| 1868 | |
| 1869 | /// \returns the multiplicative inverse for a given modulo. |
| 1870 | APInt multiplicativeInverse(const APInt &modulo) const; |
| 1871 | |
| 1872 | /// @} |
| 1873 | /// \name Support for division by constant |
| 1874 | /// @{ |
| 1875 | |
| 1876 | /// Calculate the magic number for signed division by a constant. |
| 1877 | struct ms; |
| 1878 | ms magic() const; |
| 1879 | |
| 1880 | /// Calculate the magic number for unsigned division by a constant. |
| 1881 | struct mu; |
| 1882 | mu magicu(unsigned LeadingZeros = 0) const; |
| 1883 | |
| 1884 | /// @} |
| 1885 | /// \name Building-block Operations for APInt and APFloat |
| 1886 | /// @{ |
| 1887 | |
| 1888 | // These building block operations operate on a representation of arbitrary |
| 1889 | // precision, two's-complement, bignum integer values. They should be |
| 1890 | // sufficient to implement APInt and APFloat bignum requirements. Inputs are |
| 1891 | // generally a pointer to the base of an array of integer parts, representing |
| 1892 | // an unsigned bignum, and a count of how many parts there are. |
| 1893 | |
| 1894 | /// Sets the least significant part of a bignum to the input value, and zeroes |
| 1895 | /// out higher parts. |
| 1896 | static void tcSet(WordType *, WordType, unsigned); |
| 1897 | |
| 1898 | /// Assign one bignum to another. |
| 1899 | static void tcAssign(WordType *, const WordType *, unsigned); |
| 1900 | |
| 1901 | /// Returns true if a bignum is zero, false otherwise. |
| 1902 | static bool tcIsZero(const WordType *, unsigned); |
| 1903 | |
| 1904 | /// Extract the given bit of a bignum; returns 0 or 1. Zero-based. |
| 1905 | static int tcExtractBit(const WordType *, unsigned bit); |
| 1906 | |
| 1907 | /// Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to |
| 1908 | /// DST, of dstCOUNT parts, such that the bit srcLSB becomes the least |
| 1909 | /// significant bit of DST. All high bits above srcBITS in DST are |
| 1910 | /// zero-filled. |
| 1911 | static void tcExtract(WordType *, unsigned dstCount, |
| 1912 | const WordType *, unsigned srcBits, |
| 1913 | unsigned srcLSB); |
| 1914 | |
| 1915 | /// Set the given bit of a bignum. Zero-based. |
| 1916 | static void tcSetBit(WordType *, unsigned bit); |
| 1917 | |
| 1918 | /// Clear the given bit of a bignum. Zero-based. |
| 1919 | static void tcClearBit(WordType *, unsigned bit); |
| 1920 | |
| 1921 | /// Returns the bit number of the least or most significant set bit of a |
| 1922 | /// number. If the input number has no bits set -1U is returned. |
| 1923 | static unsigned tcLSB(const WordType *, unsigned n); |
| 1924 | static unsigned tcMSB(const WordType *parts, unsigned n); |
| 1925 | |
| 1926 | /// Negate a bignum in-place. |
| 1927 | static void tcNegate(WordType *, unsigned); |
| 1928 | |
| 1929 | /// DST += RHS + CARRY where CARRY is zero or one. Returns the carry flag. |
| 1930 | static WordType tcAdd(WordType *, const WordType *, |
| 1931 | WordType carry, unsigned); |
| 1932 | /// DST += RHS. Returns the carry flag. |
| 1933 | static WordType tcAddPart(WordType *, WordType, unsigned); |
| 1934 | |
| 1935 | /// DST -= RHS + CARRY where CARRY is zero or one. Returns the carry flag. |
| 1936 | static WordType tcSubtract(WordType *, const WordType *, |
| 1937 | WordType carry, unsigned); |
| 1938 | /// DST -= RHS. Returns the carry flag. |
| 1939 | static WordType tcSubtractPart(WordType *, WordType, unsigned); |
| 1940 | |
| 1941 | /// DST += SRC * MULTIPLIER + PART if add is true |
| 1942 | /// DST = SRC * MULTIPLIER + PART if add is false |
| 1943 | /// |
| 1944 | /// Requires 0 <= DSTPARTS <= SRCPARTS + 1. If DST overlaps SRC they must |
| 1945 | /// start at the same point, i.e. DST == SRC. |
| 1946 | /// |
| 1947 | /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is returned. |
| 1948 | /// Otherwise DST is filled with the least significant DSTPARTS parts of the |
| 1949 | /// result, and if all of the omitted higher parts were zero return zero, |
| 1950 | /// otherwise overflow occurred and return one. |
| 1951 | static int tcMultiplyPart(WordType *dst, const WordType *src, |
| 1952 | WordType multiplier, WordType carry, |
| 1953 | unsigned srcParts, unsigned dstParts, |
| 1954 | bool add); |
| 1955 | |
| 1956 | /// DST = LHS * RHS, where DST has the same width as the operands and is |
| 1957 | /// filled with the least significant parts of the result. Returns one if |
| 1958 | /// overflow occurred, otherwise zero. DST must be disjoint from both |
| 1959 | /// operands. |
| 1960 | static int tcMultiply(WordType *, const WordType *, const WordType *, |
| 1961 | unsigned); |
| 1962 | |
| 1963 | /// DST = LHS * RHS, where DST has width the sum of the widths of the |
| 1964 | /// operands. No overflow occurs. DST must be disjoint from both operands. |
| 1965 | static void tcFullMultiply(WordType *, const WordType *, |
| 1966 | const WordType *, unsigned, unsigned); |
| 1967 | |
| 1968 | /// If RHS is zero LHS and REMAINDER are left unchanged, return one. |
| 1969 | /// Otherwise set LHS to LHS / RHS with the fractional part discarded, set |
| 1970 | /// REMAINDER to the remainder, return zero. i.e. |
| 1971 | /// |
| 1972 | /// OLD_LHS = RHS * LHS + REMAINDER |
| 1973 | /// |
| 1974 | /// SCRATCH is a bignum of the same size as the operands and result for use by |
| 1975 | /// the routine; its contents need not be initialized and are destroyed. LHS, |
| 1976 | /// REMAINDER and SCRATCH must be distinct. |
| 1977 | static int tcDivide(WordType *lhs, const WordType *rhs, |
| 1978 | WordType *remainder, WordType *scratch, |
| 1979 | unsigned parts); |
| 1980 | |
| 1981 | /// Shift a bignum left Count bits. Shifted in bits are zero. There are no |
| 1982 | /// restrictions on Count. |
| 1983 | static void tcShiftLeft(WordType *, unsigned Words, unsigned Count); |
| 1984 | |
| 1985 | /// Shift a bignum right Count bits. Shifted in bits are zero. There are no |
| 1986 | /// restrictions on Count. |
| 1987 | static void tcShiftRight(WordType *, unsigned Words, unsigned Count); |
| 1988 | |
| 1989 | /// The obvious AND, OR and XOR and complement operations. |
| 1990 | static void tcAnd(WordType *, const WordType *, unsigned); |
| 1991 | static void tcOr(WordType *, const WordType *, unsigned); |
| 1992 | static void tcXor(WordType *, const WordType *, unsigned); |
| 1993 | static void tcComplement(WordType *, unsigned); |
| 1994 | |
| 1995 | /// Comparison (unsigned) of two bignums. |
| 1996 | static int tcCompare(const WordType *, const WordType *, unsigned); |
| 1997 | |
| 1998 | /// Increment a bignum in-place. Return the carry flag. |
| 1999 | static WordType tcIncrement(WordType *dst, unsigned parts) { |
| 2000 | return tcAddPart(dst, 1, parts); |
| 2001 | } |
| 2002 | |
| 2003 | /// Decrement a bignum in-place. Return the borrow flag. |
| 2004 | static WordType tcDecrement(WordType *dst, unsigned parts) { |
| 2005 | return tcSubtractPart(dst, 1, parts); |
| 2006 | } |
| 2007 | |
| 2008 | /// Set the least significant BITS and clear the rest. |
| 2009 | static void tcSetLeastSignificantBits(WordType *, unsigned, unsigned bits); |
| 2010 | |
| 2011 | /// debug method |
| 2012 | void dump() const; |
| 2013 | |
| 2014 | /// @} |
| 2015 | }; |
| 2016 | |
| 2017 | /// Magic data for optimising signed division by a constant. |
| 2018 | struct APInt::ms { |
| 2019 | APInt m; ///< magic number |
| 2020 | unsigned s; ///< shift amount |
| 2021 | }; |
| 2022 | |
| 2023 | /// Magic data for optimising unsigned division by a constant. |
| 2024 | struct APInt::mu { |
| 2025 | APInt m; ///< magic number |
| 2026 | bool a; ///< add indicator |
| 2027 | unsigned s; ///< shift amount |
| 2028 | }; |
| 2029 | |
| 2030 | inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; } |
| 2031 | |
| 2032 | inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; } |
| 2033 | |
| 2034 | /// Unary bitwise complement operator. |
| 2035 | /// |
| 2036 | /// \returns an APInt that is the bitwise complement of \p v. |
| 2037 | inline APInt operator~(APInt v) { |
| 2038 | v.flipAllBits(); |
| 2039 | return v; |
| 2040 | } |
| 2041 | |
| 2042 | inline APInt operator&(APInt a, const APInt &b) { |
| 2043 | a &= b; |
| 2044 | return a; |
| 2045 | } |
| 2046 | |
| 2047 | inline APInt operator&(const APInt &a, APInt &&b) { |
| 2048 | b &= a; |
| 2049 | return std::move(b); |
| 2050 | } |
| 2051 | |
| 2052 | inline APInt operator&(APInt a, uint64_t RHS) { |
| 2053 | a &= RHS; |
| 2054 | return a; |
| 2055 | } |
| 2056 | |
| 2057 | inline APInt operator&(uint64_t LHS, APInt b) { |
| 2058 | b &= LHS; |
| 2059 | return b; |
| 2060 | } |
| 2061 | |
| 2062 | inline APInt operator|(APInt a, const APInt &b) { |
| 2063 | a |= b; |
| 2064 | return a; |
| 2065 | } |
| 2066 | |
| 2067 | inline APInt operator|(const APInt &a, APInt &&b) { |
| 2068 | b |= a; |
| 2069 | return std::move(b); |
| 2070 | } |
| 2071 | |
| 2072 | inline APInt operator|(APInt a, uint64_t RHS) { |
| 2073 | a |= RHS; |
| 2074 | return a; |
| 2075 | } |
| 2076 | |
| 2077 | inline APInt operator|(uint64_t LHS, APInt b) { |
| 2078 | b |= LHS; |
| 2079 | return b; |
| 2080 | } |
| 2081 | |
| 2082 | inline APInt operator^(APInt a, const APInt &b) { |
| 2083 | a ^= b; |
| 2084 | return a; |
| 2085 | } |
| 2086 | |
| 2087 | inline APInt operator^(const APInt &a, APInt &&b) { |
| 2088 | b ^= a; |
| 2089 | return std::move(b); |
| 2090 | } |
| 2091 | |
| 2092 | inline APInt operator^(APInt a, uint64_t RHS) { |
| 2093 | a ^= RHS; |
| 2094 | return a; |
| 2095 | } |
| 2096 | |
| 2097 | inline APInt operator^(uint64_t LHS, APInt b) { |
| 2098 | b ^= LHS; |
| 2099 | return b; |
| 2100 | } |
| 2101 | |
| 2102 | inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) { |
| 2103 | I.print(OS, true); |
| 2104 | return OS; |
| 2105 | } |
| 2106 | |
| 2107 | inline APInt operator-(APInt v) { |
| 2108 | v.negate(); |
| 2109 | return v; |
| 2110 | } |
| 2111 | |
| 2112 | inline APInt operator+(APInt a, const APInt &b) { |
| 2113 | a += b; |
| 2114 | return a; |
| 2115 | } |
| 2116 | |
| 2117 | inline APInt operator+(const APInt &a, APInt &&b) { |
| 2118 | b += a; |
| 2119 | return std::move(b); |
| 2120 | } |
| 2121 | |
| 2122 | inline APInt operator+(APInt a, uint64_t RHS) { |
| 2123 | a += RHS; |
| 2124 | return a; |
| 2125 | } |
| 2126 | |
| 2127 | inline APInt operator+(uint64_t LHS, APInt b) { |
| 2128 | b += LHS; |
| 2129 | return b; |
| 2130 | } |
| 2131 | |
| 2132 | inline APInt operator-(APInt a, const APInt &b) { |
| 2133 | a -= b; |
| 2134 | return a; |
| 2135 | } |
| 2136 | |
| 2137 | inline APInt operator-(const APInt &a, APInt &&b) { |
| 2138 | b.negate(); |
| 2139 | b += a; |
| 2140 | return std::move(b); |
| 2141 | } |
| 2142 | |
| 2143 | inline APInt operator-(APInt a, uint64_t RHS) { |
| 2144 | a -= RHS; |
| 2145 | return a; |
| 2146 | } |
| 2147 | |
| 2148 | inline APInt operator-(uint64_t LHS, APInt b) { |
| 2149 | b.negate(); |
| 2150 | b += LHS; |
| 2151 | return b; |
| 2152 | } |
| 2153 | |
| 2154 | inline APInt operator*(APInt a, uint64_t RHS) { |
| 2155 | a *= RHS; |
| 2156 | return a; |
| 2157 | } |
| 2158 | |
| 2159 | inline APInt operator*(uint64_t LHS, APInt b) { |
| 2160 | b *= LHS; |
| 2161 | return b; |
| 2162 | } |
| 2163 | |
| 2164 | |
| 2165 | namespace APIntOps { |
| 2166 | |
| 2167 | /// Determine the smaller of two APInts considered to be signed. |
| 2168 | inline const APInt &smin(const APInt &A, const APInt &B) { |
| 2169 | return A.slt(B) ? A : B; |
| 2170 | } |
| 2171 | |
| 2172 | /// Determine the larger of two APInts considered to be signed. |
| 2173 | inline const APInt &smax(const APInt &A, const APInt &B) { |
| 2174 | return A.sgt(B) ? A : B; |
| 2175 | } |
| 2176 | |
| 2177 | /// Determine the smaller of two APInts considered to be unsigned. |
| 2178 | inline const APInt &umin(const APInt &A, const APInt &B) { |
| 2179 | return A.ult(B) ? A : B; |
| 2180 | } |
| 2181 | |
| 2182 | /// Determine the larger of two APInts considered to be unsigned. |
| 2183 | inline const APInt &umax(const APInt &A, const APInt &B) { |
| 2184 | return A.ugt(B) ? A : B; |
| 2185 | } |
| 2186 | |
| 2187 | /// Compute GCD of two unsigned APInt values. |
| 2188 | /// |
| 2189 | /// This function returns the greatest common divisor of the two APInt values |
| 2190 | /// using Stein's algorithm. |
| 2191 | /// |
| 2192 | /// \returns the greatest common divisor of A and B. |
| 2193 | APInt GreatestCommonDivisor(APInt A, APInt B); |
| 2194 | |
| 2195 | /// Converts the given APInt to a double value. |
| 2196 | /// |
| 2197 | /// Treats the APInt as an unsigned value for conversion purposes. |
| 2198 | inline double RoundAPIntToDouble(const APInt &APIVal) { |
| 2199 | return APIVal.roundToDouble(); |
| 2200 | } |
| 2201 | |
| 2202 | /// Converts the given APInt to a double value. |
| 2203 | /// |
| 2204 | /// Treats the APInt as a signed value for conversion purposes. |
| 2205 | inline double RoundSignedAPIntToDouble(const APInt &APIVal) { |
| 2206 | return APIVal.signedRoundToDouble(); |
| 2207 | } |
| 2208 | |
| 2209 | /// Converts the given APInt to a float value. |
| 2210 | inline float RoundAPIntToFloat(const APInt &APIVal) { |
| 2211 | return float(RoundAPIntToDouble(APIVal)); |
| 2212 | } |
| 2213 | |
| 2214 | /// Converts the given APInt to a float value. |
| 2215 | /// |
| 2216 | /// Treats the APInt as a signed value for conversion purposes. |
| 2217 | inline float RoundSignedAPIntToFloat(const APInt &APIVal) { |
| 2218 | return float(APIVal.signedRoundToDouble()); |
| 2219 | } |
| 2220 | |
| 2221 | /// Converts the given double value into a APInt. |
| 2222 | /// |
| 2223 | /// This function convert a double value to an APInt value. |
| 2224 | APInt RoundDoubleToAPInt(double Double, unsigned width); |
| 2225 | |
| 2226 | /// Converts a float value into a APInt. |
| 2227 | /// |
| 2228 | /// Converts a float value into an APInt value. |
| 2229 | inline APInt RoundFloatToAPInt(float Float, unsigned width) { |
| 2230 | return RoundDoubleToAPInt(double(Float), width); |
| 2231 | } |
| 2232 | |
| 2233 | /// Return A unsign-divided by B, rounded by the given rounding mode. |
| 2234 | APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM); |
| 2235 | |
| 2236 | /// Return A sign-divided by B, rounded by the given rounding mode. |
| 2237 | APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM); |
| 2238 | |
| 2239 | /// Let q(n) = An^2 + Bn + C, and BW = bit width of the value range |
| 2240 | /// (e.g. 32 for i32). |
| 2241 | /// This function finds the smallest number n, such that |
| 2242 | /// (a) n >= 0 and q(n) = 0, or |
| 2243 | /// (b) n >= 1 and q(n-1) and q(n), when evaluated in the set of all |
| 2244 | /// integers, belong to two different intervals [Rk, Rk+R), |
| 2245 | /// where R = 2^BW, and k is an integer. |
| 2246 | /// The idea here is to find when q(n) "overflows" 2^BW, while at the |
| 2247 | /// same time "allowing" subtraction. In unsigned modulo arithmetic a |
| 2248 | /// subtraction (treated as addition of negated numbers) would always |
| 2249 | /// count as an overflow, but here we want to allow values to decrease |
| 2250 | /// and increase as long as they are within the same interval. |
| 2251 | /// Specifically, adding of two negative numbers should not cause an |
| 2252 | /// overflow (as long as the magnitude does not exceed the bit width). |
| 2253 | /// On the other hand, given a positive number, adding a negative |
| 2254 | /// number to it can give a negative result, which would cause the |
| 2255 | /// value to go from [-2^BW, 0) to [0, 2^BW). In that sense, zero is |
| 2256 | /// treated as a special case of an overflow. |
| 2257 | /// |
| 2258 | /// This function returns None if after finding k that minimizes the |
| 2259 | /// positive solution to q(n) = kR, both solutions are contained between |
| 2260 | /// two consecutive integers. |
| 2261 | /// |
| 2262 | /// There are cases where q(n) > T, and q(n+1) < T (assuming evaluation |
| 2263 | /// in arithmetic modulo 2^BW, and treating the values as signed) by the |
| 2264 | /// virtue of *signed* overflow. This function will *not* find such an n, |
| 2265 | /// however it may find a value of n satisfying the inequalities due to |
| 2266 | /// an *unsigned* overflow (if the values are treated as unsigned). |
| 2267 | /// To find a solution for a signed overflow, treat it as a problem of |
| 2268 | /// finding an unsigned overflow with a range with of BW-1. |
| 2269 | /// |
| 2270 | /// The returned value may have a different bit width from the input |
| 2271 | /// coefficients. |
| 2272 | Optional<APInt> SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, |
| 2273 | unsigned RangeWidth); |
| 2274 | |
| 2275 | /// Compare two values, and if they are different, return the position of the |
| 2276 | /// most significant bit that is different in the values. |
| 2277 | Optional<unsigned> GetMostSignificantDifferentBit(const APInt &A, |
| 2278 | const APInt &B); |
| 2279 | |
| 2280 | } // End of APIntOps namespace |
| 2281 | |
| 2282 | // See friend declaration above. This additional declaration is required in |
| 2283 | // order to compile LLVM with IBM xlC compiler. |
| 2284 | hash_code hash_value(const APInt &Arg); |
| 2285 | |
| 2286 | /// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst |
| 2287 | /// with the integer held in IntVal. |
| 2288 | void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes); |
| 2289 | |
| 2290 | /// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting |
| 2291 | /// from Src into IntVal, which is assumed to be wide enough and to hold zero. |
| 2292 | void LoadIntFromMemory(APInt &IntVal, const uint8_t *Src, unsigned LoadBytes); |
| 2293 | |
| 2294 | /// Provide DenseMapInfo for APInt. |
| 2295 | template <> struct DenseMapInfo<APInt> { |
| 2296 | static inline APInt getEmptyKey() { |
| 2297 | APInt V(nullptr, 0); |
| 2298 | V.U.VAL = 0; |
| 2299 | return V; |
| 2300 | } |
| 2301 | |
| 2302 | static inline APInt getTombstoneKey() { |
| 2303 | APInt V(nullptr, 0); |
| 2304 | V.U.VAL = 1; |
| 2305 | return V; |
| 2306 | } |
| 2307 | |
| 2308 | static unsigned getHashValue(const APInt &Key); |
| 2309 | |
| 2310 | static bool isEqual(const APInt &LHS, const APInt &RHS) { |
| 2311 | return LHS.getBitWidth() == RHS.getBitWidth() && LHS == RHS; |
| 2312 | } |
| 2313 | }; |
| 2314 | |
| 2315 | } // namespace llvm |
| 2316 | |
| 2317 | #endif |